code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
X = r/Rs alpha_r = 2*sigma0 * Rs * X * (1-self._F(X)) / (X**2-1) mass_2d = alpha_r * r * np.pi return mass_2d
def mass_2d_lens(self, r, sigma0, Rs)
mass enclosed projected 2d sphere of radius r :param r: :param rho0: :param a: :param s: :return:
7.050253
7.493682
0.940826
m_tot = 2*np.pi*rho0*Rs**3 return m_tot
def mass_tot(self, rho0, Rs)
total mass within the profile :param rho0: :param a: :param s: :return:
3.861999
5.989996
0.644741
x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) M = self.mass_tot(rho0, Rs) pot = M / (r + Rs) return pot
def grav_pot(self, x, y, rho0, Rs, center_x=0, center_y=0)
gravitational potential (modulo 4 pi G and rho0 in appropriate units) :param x: :param y: :param rho0: :param a: :param s: :param center_x: :param center_y: :return:
3.140466
3.598899
0.872619
x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) if isinstance(r, int) or isinstance(r, float): r = max(self._s, r) else: r[r < self._s] = self._s X = r / Rs f_ = sigma0 * Rs ** 2 * (np.log(X ** 2 / 4.) + 2 * self._F(X)) return f_
def function(self, x, y, sigma0, Rs, center_x=0, center_y=0)
lensing potential :param x: :param y: :param sigma0: sigma0/sigma_crit :param a: :param s: :param center_x: :param center_y: :return:
3.261909
3.437788
0.948839
return self.lens_model.ray_shooting(x, y, kwargs, k=k)
def ray_shooting(self, x, y, kwargs, k=None)
maps image to source position (inverse deflection) :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: source plane positions corresponding to (x, y) in the image plane
3.766662
4.187458
0.89951
if hasattr(self.lens_model, 'fermat_potential'): return self.lens_model.fermat_potential(x_image, y_image, x_source, y_source, kwargs_lens) else: raise ValueError("Fermat potential is not defined in multi-plane lensing. Please use single plane lens models.")
def fermat_potential(self, x_image, y_image, x_source, y_source, kwargs_lens)
fermat potential (negative sign means earlier arrival time) :param x_image: image position :param y_image: image position :param x_source: source position :param y_source: source position :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes :return: fermat potential in arcsec**2 without geometry term (second part of Eqn 1 in Suyu et al. 2013) as a list
2.900986
2.986901
0.971236
return self.lens_model.potential(x, y, kwargs, k=k)
def potential(self, x, y, kwargs, k=None)
lensing potential :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: lensing potential in units of arcsec^2
4.932783
5.044162
0.977919
return self.lens_model.alpha(x, y, kwargs, k=k)
def alpha(self, x, y, kwargs, k=None)
deflection angles :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: deflection angles in units of arcsec
4.915803
5.700484
0.862348
return self.lens_model.hessian(x, y, kwargs, k=k)
def hessian(self, x, y, kwargs, k=None)
hessian matrix :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: f_xx, f_xy, f_yy components
4.387041
6.029773
0.727563
f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs) f_xx_dx, f_xy_dx, f_yx_dx, f_yy_dx = self.hessian(x + diff, y, kwargs) f_xx_dy, f_xy_dy, f_yx_dy, f_yy_dy = self.hessian(x, y + diff, kwargs) f_xxx = (f_xx_dx - f_xx) / diff f_xxy = (f_xx_dy - f_xx) / diff f_xyy = (f_xy_dy - f_xy) / diff f_yyy = (f_yy_dy - f_yy) / diff return f_xxx, f_xxy, f_xyy, f_yyy
def flexion(self, x, y, kwargs, diff=0.000001)
third derivatives (flexion) :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param diff: numerical differential length of Hessian :return: f_xxx, f_xxy, f_xyy, f_yyy
1.549438
1.500086
1.0329
#extract parameters H0 = args[0] omega_m = self.omega_m_fixed Ode0 = self._omega_lambda_fixed logL, bool = self.prior_H0(H0) if bool is True: logL += self.LCDM_lensLikelihood(H0, omega_m, Ode0) return logL, None
def X2_chain_H0(self, args)
routine to compute X2 given variable parameters for a MCMC/PSO chain
7.437995
7.79828
0.9538
H0 = args[0] h = H0/100. omega_m = self.omega_mh2_fixed / h**2 Ode0 = self._omega_lambda_fixed logL, bool = self.prior_omega_mh2(h, omega_m) if bool is True: logL += self.LCDM_lensLikelihood(H0, omega_m, Ode0) return logL, None
def X2_chain_omega_mh2(self, args)
routine to compute the log likelihood given a omega_m h**2 prior fixed :param args: :return:
7.032342
6.613122
1.063392
#extract parameters [H0, omega_m] = args Ode0 = self._omega_lambda_fixed logL_H0, bool_H0 = self.prior_H0(H0) logL_omega_m, bool_omega_m = self.prior_omega_m(omega_m) logL = logL_H0 + logL_omega_m if bool_H0 is True and bool_omega_m is True: logL += self.LCDM_lensLikelihood(H0, omega_m, Ode0) return logL + logL_H0 + logL_omega_m, None
def X2_chain_H0_omgega_m(self, args)
routine to compute X^2 :param args: :return:
3.835141
4.044709
0.948187
if H0 < H0_min or H0 > H0_max: penalty = -10**15 return penalty, False else: return 0, True
def prior_H0(self, H0, H0_min=0, H0_max=200)
checks whether the parameter vector has left its bound, if so, adds a big number
3.907453
3.573469
1.093462
if omega_m < omega_m_min or omega_m > omega_m_max: penalty = -10**15 return penalty, False else: return 0, True
def prior_omega_m(self, omega_m, omega_m_min=0, omega_m_max=1)
checks whether the parameter omega_m is within the given bounds :param omega_m: :param omega_m_min: :param omega_m_max: :return:
3.543282
3.930902
0.901392
sampler = emcee.EnsembleSampler(n_walkers, self.cosmoParam.numParam, self.chain.likelihood) p0 = emcee.utils.sample_ball(mean_start, sigma_start, n_walkers) new_pos, _, _, _ = sampler.run_mcmc(p0, n_burn) sampler.reset() store = InMemoryStorageUtil() for pos, prob, _, _ in sampler.sample(new_pos, iterations=n_run): store.persistSamplingValues(pos, prob, None) return store.samples
def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start)
returns the mcmc analysis of the parameter space
3.813495
3.972319
0.960017
mean = 0. # background mean flux (default zero) # 1d list of coordinates (x,y) of a numPix x numPix square grid, centered to zero x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(numPix=numPix, deltapix=deltaPix, subgrid_res=1, inverse=inverse) # mask (1= model this pixel, 0= leave blanck) exposure_map = np.ones((numPix, numPix)) * exposure_time # individual exposure time/weight per pixel kwargs_data = { 'background_rms': sigma_bkg, 'exposure_map': exposure_map , 'ra_at_xy_0': ra_at_xy_0, 'dec_at_xy_0': dec_at_xy_0, 'transform_pix2angle': Mpix2coord , 'image_data': np.zeros((numPix, numPix)) } return kwargs_data
def data_configure_simple(numPix, deltaPix, exposure_time=1, sigma_bkg=1, inverse=False)
configures the data keyword arguments with a coordinate grid centered at zero. :param numPix: number of pixel (numPix x numPix) :param deltaPix: pixel size (in angular units) :param exposure_time: exposure time :param sigma_bkg: background noise (Gaussian sigma) :param inverse: if True, coordinate system is ra to the left, if False, to the right :return: keyword arguments that can be used to construct a Data() class instance of lenstronomy
4.114022
3.831971
1.073605
if psf_type == 'GAUSSIAN': sigma = util.fwhm2sigma(fwhm) sigma_axis = sigma gaussian = Gaussian() x_grid, y_grid = util.make_grid(kernelsize, deltaPix) kernel_large = gaussian.function(x_grid, y_grid, amp=1., sigma_x=sigma_axis, sigma_y=sigma_axis, center_x=0, center_y=0) kernel_large /= np.sum(kernel_large) kernel_large = util.array2image(kernel_large) kernel_pixel = kernel_util.pixel_kernel(kernel_large) kwargs_psf = {'psf_type': psf_type, 'fwhm': fwhm, 'truncation': truncate*fwhm, 'kernel_point_source': kernel_large, 'kernel_pixel': kernel_pixel, 'pixel_size': deltaPix} elif psf_type == 'PIXEL': kernel_large = copy.deepcopy(kernel) kernel_large = kernel_util.cut_psf(kernel_large, psf_size=kernelsize) kwargs_psf = {'psf_type': "PIXEL", 'kernel_point_source': kernel_large} elif psf_type == 'NONE': kwargs_psf = {'psf_type': 'NONE'} else: raise ValueError("psf type %s not supported!" % psf_type) return kwargs_psf
def psf_configure_simple(psf_type="GAUSSIAN", fwhm=1, kernelsize=11, deltaPix=1, truncate=6, kernel=None)
this routine generates keyword arguments to initialize a PSF() class in lenstronomy. Have a look at the PSF class documentation to see the full possibilities. :param psf_type: string, type of PSF model :param fwhm: Full width at half maximum of PSF (if GAUSSIAN psf) :param kernelsize: size in pixel of kernel (use odd numbers), only applicable for PIXEL kernels :param deltaPix: pixel size in angular units (only needed for GAUSSIAN kernel :param truncate: how many sigmas out is the truncation happening :param kernel: 2d numpy arra centered PSF (odd number per axis) :return: keyword arguments
2.277591
2.23847
1.017477
if not w_t > w_c: w_t, w_c = w_c, w_t s_scale_1 = w_c s_scale_2 = w_t f_x_1, f_y_1 = self.nie.derivatives(1, 0, theta_E=1, e1=0, e2=0, s_scale=s_scale_1) f_x_2, f_y_2 = self.nie.derivatives(1, 0, theta_E=1, e1=0, e2=0, s_scale=s_scale_2) f_x = f_x_1 - f_x_2 theta_E_convert = theta_E / f_x return theta_E_convert, w_c, w_t
def _theta_E_convert(self, theta_E, w_c, w_t)
convert the parameter theta_E (deflection angle one arcsecond from the center) into the "Einstein radius" scale parameter of the two NIE profiles :param theta_E: :param w_c: :param w_t: :return:
2.583258
2.542026
1.01622
assert 'norm' in kwargs.keys(), "key word arguments must contain 'norm', " \ "the normalization of deflection angle in units of arcsec." x_ = x - center_x y_ = y - center_y R = np.sqrt(x_**2 + y_**2) alpha = self._interp(x_, y_, **kwargs) cos_theta = x_ * R ** -1 sin_theta = y_ * R ** -1 f_x, f_y = alpha * cos_theta, alpha * sin_theta return f_x, f_y
def derivatives(self, x, y, center_x = 0, center_y = 0, **kwargs)
returns df/dx and df/dy (un-normalized!!!) interpolated from the numerical deflection table
3.95142
3.760461
1.050781
diff = 1e-6 alpha_ra, alpha_dec = self.derivatives(x, y, center_x = center_x, center_y = center_y, **kwargs) alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, center_x = center_x, center_y = center_y, **kwargs) alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, center_x = center_x, center_y = center_y, **kwargs) dalpha_rara = (alpha_ra_dx - alpha_ra) / diff dalpha_radec = (alpha_ra_dy - alpha_ra) / diff dalpha_decdec = (alpha_dec_dy - alpha_dec) / diff f_xx = dalpha_rara f_yy = dalpha_decdec f_xy = dalpha_radec return f_xx, f_yy, f_xy
def hessian(self, x, y, center_x = 0, center_y = 0, **kwargs)
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy (un-normalized!!!) interpolated from the numerical deflection table
1.854856
1.875613
0.988933
phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_, y_ = self._coord_transf(x, y, q, phi_G, center_x, center_y) f_ = self.sersic.function(x_, y_, n_sersic, R_sersic, k_eff) return f_
def function(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y=0)
returns Gaussian
2.585207
2.769356
0.933505
phi_G, q = param_util.ellipticity2phi_q(e1, e2) e = abs(1. - q) cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) x_, y_ = self._coord_transf(x, y, q, phi_G, center_x, center_y) f_x_prim, f_y_prim = self.sersic.derivatives(x_, y_, n_sersic, R_sersic, k_eff) f_x_prim *= np.sqrt(1 - e) f_y_prim *= np.sqrt(1 + e) f_x = cos_phi*f_x_prim-sin_phi*f_y_prim f_y = sin_phi*f_x_prim+cos_phi*f_y_prim return f_x, f_y
def derivatives(self, x, y, n_sersic, R_sersic, k_eff, e1, e2, center_x=0, center_y=0)
returns df/dx and df/dy of the function
2.168045
2.168925
0.999594
x_int = int(round(x_pos)) y_int = int(round(y_pos)) shift_x = x_int - x_pos shift_y = y_int - y_pos kernel_shifted = interp.shift(kernel, [-shift_y, -shift_x], order=order) return add_layer2image_int(grid2d, x_int, y_int, kernel_shifted)
def add_layer2image(grid2d, x_pos, y_pos, kernel, order=1)
adds a kernel on the grid2d image at position x_pos, y_pos with an interpolated subgrid pixel shift of order=order :param grid2d: 2d pixel grid (i.e. image) :param x_pos: x-position center (pixel coordinate) of the layer to be added :param y_pos: y-position center (pixel coordinate) of the layer to be added :param kernel: the layer to be added to the image :param order: interpolation order for sub-pixel shift of the kernel to be added :return: image with added layer, cut to original size
2.335222
2.157511
1.082368
nx, ny = np.shape(kernel) if nx % 2 == 0: raise ValueError("kernel needs odd numbers of pixels") num_x, num_y = np.shape(grid2d) x_int = int(round(x_pos)) y_int = int(round(y_pos)) k_x, k_y = np.shape(kernel) k_l2_x = int((k_x - 1) / 2) k_l2_y = int((k_y - 1) / 2) min_x = np.maximum(0, x_int-k_l2_x) min_y = np.maximum(0, y_int-k_l2_y) max_x = np.minimum(num_x, x_int+k_l2_x + 1) max_y = np.minimum(num_y, y_int+k_l2_y + 1) min_xk = np.maximum(0, -x_int + k_l2_x) min_yk = np.maximum(0, -y_int + k_l2_y) max_xk = np.minimum(k_x, -x_int + k_l2_x + num_x) max_yk = np.minimum(k_y, -y_int + k_l2_y + num_y) if min_x >= max_x or min_y >= max_y or min_xk >= max_xk or min_yk >= max_yk or (max_x-min_x != max_xk-min_xk) or (max_y-min_y != max_yk-min_yk): return grid2d kernel_re_sized = kernel[min_yk:max_yk, min_xk:max_xk] new = grid2d.copy() new[min_y:max_y, min_x:max_x] += kernel_re_sized return new
def add_layer2image_int(grid2d, x_pos, y_pos, kernel)
adds a kernel on the grid2d image at position x_pos, y_pos at integer positions of pixel :param grid2d: 2d pixel grid (i.e. image) :param x_pos: x-position center (pixel coordinate) of the layer to be added :param y_pos: y-position center (pixel coordinate) of the layer to be added :param kernel: the layer to be added to the image :return: image with added layer
1.706137
1.727423
0.987677
if sigma_bkd < 0: raise ValueError("Sigma background is smaller than zero! Please use positive values.") nx, ny = np.shape(image) background = np.random.randn(nx, ny) * sigma_bkd return background
def add_background(image, sigma_bkd)
adds background noise to image :param image: pixel values of image :param sigma_bkd: background noise (sigma) :return: a realisation of Gaussian noise of the same size as image
4.04166
4.234795
0.954393
if isinstance(exp_time, int) or isinstance(exp_time, float): if exp_time <= 0: exp_time = 1 else: mean_exp_time = np.mean(exp_time) exp_time[exp_time < mean_exp_time/10] = mean_exp_time/10 sigma = np.sqrt(np.abs(image)/exp_time) # Gaussian approximation for Poisson distribution, normalized to exposure time nx, ny = np.shape(image) poisson = np.random.randn(nx, ny) * sigma return poisson
def add_poisson(image, exp_time)
adds a poison (or Gaussian) distributed noise with mean given by surface brightness :param image: pixel values (photon counts per unit exposure time) :param exp_time: exposure time :return: Poisson noise realization of input image
2.92498
2.846336
1.02763
interp_2d = scipy.interpolate.interp2d(x_in, y_in, input_values, kind='linear') #interp_2d = scipy.interpolate.RectBivariateSpline(x_in, y_in, input_values, kx=1, ky=1) out_values = interp_2d.__call__(x_out, y_out) return out_values
def re_size_array(x_in, y_in, input_values, x_out, y_out)
resizes 2d array (i.e. image) to new coordinates. So far only works with square output aligned with coordinate axis. :param x_in: :param y_in: :param input_values: :param x_out: :param y_out: :return:
1.960757
2.008989
0.975992
img_sym = np.zeros_like(image) angle = 360./symmetry for i in range(symmetry): img_sym += rotateImage(image, angle*i) img_sym /= symmetry return img_sym
def symmetry_average(image, symmetry)
symmetry averaged image :param image: :param symmetry: :return:
2.715977
2.980118
0.911365
n = len(x_mins) idex = [] for i in range(n): if i == 0: pass else: for j in range(0, i): if (abs(x_mins[i] - x_mins[j]) < min_distance and abs(y_mins[i] - y_mins[j]) < min_distance): idex.append(i) break x_mins = np.delete(x_mins, idex, axis=0) y_mins = np.delete(y_mins, idex, axis=0) return x_mins, y_mins
def findOverlap(x_mins, y_mins, min_distance)
finds overlapping solutions, deletes multiples and deletes non-solutions and if it is not a solution, deleted as well
1.651169
1.633506
1.010813
idex=[] min = -deltapix*numPix/2 max = deltapix*numPix/2 for i in range(len(x_coord)): #sum over image positions if (x_coord[i] < min or x_coord[i] > max or y_coord[i] < min or y_coord[i] > max): idex.append(i) x_coord = np.delete(x_coord, idex, axis=0) y_coord = np.delete(y_coord, idex, axis=0) return x_coord, y_coord
def coordInImage(x_coord, y_coord, numPix, deltapix)
checks whether image positions are within the pixel image in units of arcsec if not: remove it :param imcoord: image coordinate (in units of angels) [[x,y,delta,magnification][...]] :type imcoord: (n,4) numpy array :returns: image positions within the pixel image
2.102196
2.288234
0.918698
if factor < 1: raise ValueError('scaling factor in re-sizing %s < 1' %factor) f = int(factor) nx, ny = np.shape(image) if int(nx/f) == nx/f and int(ny/f) == ny/f: small = image.reshape([int(nx/f), f, int(ny/f), f]).mean(3).mean(1) return small else: raise ValueError("scaling with factor %s is not possible with grid size %s, %s" %(f, nx, ny))
def re_size(image, factor=1)
resizes image with nx x ny to nx/factor x ny/factor :param image: 2d image with shape (nx,ny) :param factor: integer >=1 :return:
3.570786
3.542216
1.008066
numPix = int(len(image)/bin_size) numPix_precut = numPix * bin_size factor = int(len(image)/numPix) if not numPix * bin_size == len(image): image_precut = image[0:numPix_precut, 0:numPix_precut] else: image_precut = image image_resized = re_size(image_precut, factor) image_resized *= bin_size**2 wht_map_resized = re_size(wht_map[0:numPix_precut, 0:numPix_precut], factor) sigma_bkg_resized = bin_size*sigma_bkg ra_coords_resized = re_size(ra_coords[0:numPix_precut, 0:numPix_precut], factor) dec_coords_resized = re_size(dec_coords[0:numPix_precut, 0:numPix_precut], factor) idex_mask_resized = re_size(idex_mask[0:numPix_precut, 0:numPix_precut], factor) idex_mask_resized[idex_mask_resized > 0] = 1 return image_resized, wht_map_resized, sigma_bkg_resized, ra_coords_resized, dec_coords_resized, idex_mask_resized
def rebin_image(bin_size, image, wht_map, sigma_bkg, ra_coords, dec_coords, idex_mask)
rebins pixels, updates cutout image, wht_map, sigma_bkg, coordinates, PSF :param bin_size: number of pixels (per axis) to merge :return:
1.727761
1.727467
1.00017
factor = int(factor) Mcoord2pix_resized = Mcoord2pix / factor Mpix2coord_resized = Mpix2coord * factor x_at_radec_0_resized = (x_at_radec_0 + 0.5) / factor - 0.5 y_at_radec_0_resized = (y_at_radec_0 + 0.5) / factor - 0.5 ra_at_xy_0_resized, dec_at_xy_0_resized = util.map_coord2pix(-x_at_radec_0_resized, -y_at_radec_0_resized, 0, 0, Mpix2coord_resized) return ra_at_xy_0_resized, dec_at_xy_0_resized, x_at_radec_0_resized, y_at_radec_0_resized, Mpix2coord_resized, Mcoord2pix_resized
def rebin_coord_transform(factor, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix)
adopt coordinate system and transformation between angular and pixel coordinates of a re-binned image :param bin_size: :param ra_0: :param dec_0: :param x_0: :param y_0: :param Matrix: :param Matrix_inv: :return:
1.567266
1.664886
0.941366
image_stacked = np.zeros_like(image_list[0]) wht_stacked = np.zeros_like(image_stacked) sigma_stacked = 0. for i in range(len(image_list)): image_stacked += image_list[i]*wht_list[i] sigma_stacked += sigma_list[i]**2 * np.median(wht_list[i]) wht_stacked += wht_list[i] image_stacked /= wht_stacked sigma_stacked /= np.median(wht_stacked) wht_stacked /= len(wht_list) return image_stacked, wht_stacked, np.sqrt(sigma_stacked)
def stack_images(image_list, wht_list, sigma_list)
stacks images and saves new image as a fits file :param image_name_list: list of image_names to be stacked :return:
1.781241
1.925744
0.924963
nx, ny = image.shape if nx < numPix or ny < numPix: print('WARNING: image can not be resized, in routine cut_edges.') return image if nx % 2 == 0 or ny % 2 == 0 or numPix % 2 == 0: #pass print("WARNING: image or cutout side are even number. This routine only works for odd numbers %s %s %s" % (nx, ny, numPix)) cx = int((nx-1)/2) cy = int((ny-1)/2) d = int((numPix-1)/2) if nx % 2 == 0: cx += 1 if ny % 2 == 0: cy += 1 resized = image[cx-d:cx+d+1, cy-d:cy+d+1] return copy.deepcopy(resized)
def cut_edges(image, numPix)
cuts out the edges of a 2d image and returns re-sized image to numPix center is well defined for odd pixel sizes. :param image: 2d numpy array :param numPix: square size of cut out image :return: cutout image with size numPix
3.134343
3.113955
1.006547
H0_range = np.linspace(10, 100, 90) omega_m_range = np.linspace(0.05, 1, 95) grid2d = np.dstack(np.meshgrid(H0_range, omega_m_range)).reshape(-1, 2) H0_grid = grid2d[:, 0] omega_m_grid = grid2d[:, 1] Dd_grid = np.zeros_like(H0_grid) Ds_Dds_grid = np.zeros_like(H0_grid) for i in range(len(H0_grid)): Dd, Ds_Dds = self.cosmo2Dd_Ds_Dds(H0_grid[i], omega_m_grid[i]) Dd_grid[i] = Dd Ds_Dds_grid[i] = Ds_Dds self._f_H0 = interpolate.interp2d(Dd_grid, Ds_Dds_grid, H0_grid, kind='linear', copy=False, bounds_error=False, fill_value=-1) print("H0 interpolation done") self._f_omega_m = interpolate.interp2d(Dd_grid, Ds_Dds_grid, omega_m_grid, kind='linear', copy=False, bounds_error=False, fill_value=-1) print("omega_m interpolation done")
def _make_interpolation(self)
creates an interpolation grid in H_0, omega_m and computes quantities in Dd and Ds_Dds :return:
2.034923
1.699367
1.19746
if not hasattr(self, '_f_H0') or not hasattr(self, '_f_omega_m'): self._make_interpolation() H0 = self._f_H0(Dd, Ds_Dds) print(H0, 'H0') omega_m = self._f_omega_m(Dd, Ds_Dds) Dd_new, Ds_Dds_new = self.cosmo2Dd_Ds_Dds(H0[0], omega_m[0]) if abs(Dd - Dd_new)/Dd > 0.01 or abs(Ds_Dds - Ds_Dds_new)/Ds_Dds > 0.01: return [-1], [-1] else: return H0[0], omega_m[0]
def get_cosmo(self, Dd, Ds_Dds)
return the values of H0 and omega_m computed with an interpolation :param Dd: flat :param Ds_Dds: float :return:
2.634826
2.393266
1.100933
kwargs = copy.deepcopy(kwargs_profile) try: del kwargs['center_x'] del kwargs['center_y'] except: pass # integral of self._profile.density(x)* 4*np.pi * x^2 *dx, 0,r out = integrate.quad(lambda x: self._profile.density(x, **kwargs)*4*np.pi*x**2, 0, r) return out[0]
def mass_enclosed_3d(self, r, kwargs_profile)
computes the mass enclosed within a sphere of radius r :param r: radius (arcsec) :param kwargs_profile: keyword argument list with lens model parameters :return: 3d mass enclosed of r
4.321038
4.614032
0.936499
kwargs = copy.deepcopy(kwargs_profile) try: del kwargs['center_x'] del kwargs['center_y'] except: pass # integral of self._profile.density(np.sqrt(x^2+r^2))* dx, 0, infty out = integrate.quad(lambda x: 2*self._profile.density(np.sqrt(x**2+r**2), **kwargs), 0, 100) return out[0]
def density_2d(self, r, kwargs_profile)
computes the projected density along the line-of-sight :param r: radius (arcsec) :param kwargs_profile: keyword argument list with lens model parameters :return: 2d projected density at projected radius r
4.560195
5.048294
0.903314
kwargs = copy.deepcopy(kwargs_profile) try: del kwargs['center_x'] del kwargs['center_y'] except: pass # integral of self.density_2d(x)* 2*np.pi * x *dx, 0, r out = integrate.quad(lambda x: self.density_2d(x, kwargs)*2*np.pi*x, 0, r) return out[0]
def mass_enclosed_2d(self, r, kwargs_profile)
computes the mass enclosed the projected line-of-sight :param r: radius (arcsec) :param kwargs_profile: keyword argument list with lens model parameters :return: projected mass enclosed radius r
3.999103
4.392184
0.910504
self._coords.shift_coordinate_grid(x_shift, y_shift, pixel_unit=pixel_unit) self._x_grid, self._y_grid = self._coords.coordinate_grid(self.nx)
def shift_coordinate_grid(self, x_shift, y_shift, pixel_unit=False)
shifts the coordinate system :param x_shif: shift in x (or RA) :param y_shift: shift in y (or DEC) :param pixel_unit: bool, if True, units of pixels in input, otherwise RA/DEC :return: updated data class with change in coordinate system
3.249908
3.739346
0.869111
if not hasattr(self, '_C_D'): if self._noise_map is not None: self._C_D = self._noise_map**2 else: self._C_D = self.covariance_matrix(self.data, self.background_rms, self.exposure_map) return self._C_D
def C_D(self)
Covariance matrix of all pixel values in 2d numpy array (only diagonal component) The covariance matrix is estimated from the data. WARNING: For low count statistics, the noise in the data may lead to biased estimates of the covariance matrix. :return: covariance matrix of all pixel values in 2d numpy array (only diagonal component).
3.385515
3.168031
1.06865
if noise_map is not None: return noise_map**2 if isinstance(exposure_map, int) or isinstance(exposure_map, float): if exposure_map <= 0: exposure_map = 1 else: mean_exp_time = np.mean(exposure_map) exposure_map[exposure_map < mean_exp_time / 10] = mean_exp_time / 10 if verbose: if background_rms * np.max(exposure_map) < 1: print("WARNING! sigma_b*f %s < 1 count may introduce unstable error estimates" % (background_rms * np.max(exposure_map))) d_pos = np.zeros_like(data) #threshold = 1.5*sigma_b d_pos[data >= 0] = data[data >= 0] #d_pos[d < threshold] = 0 sigma = d_pos / exposure_map + background_rms ** 2 return sigma
def covariance_matrix(self, data, background_rms=1, exposure_map=1, noise_map=None, verbose=False)
returns a diagonal matrix for the covariance estimation which describes the error Notes: - the exposure map must be positive definite. Values that deviate too much from the mean exposure time will be given a lower limit to not under-predict the Poisson component of the noise. - the data must be positive semi-definite for the Poisson noise estimate. Values < 0 (Possible after mean subtraction) will not have a Poisson component in their noise estimate. :param data: data array, eg in units of photons/second :param background_rms: background noise rms, eg. in units (photons/second)^2 :param exposure_map: exposure time per pixel, e.g. in units of seconds :return: len(d) x len(d) matrix that give the error of background and Poisson components; (photons/second)^2
3.492133
3.247554
1.075312
x_solve, y_solve = [], [] for i in range(num_random): x_init = np.random.uniform(-search_window / 2., search_window / 2) + x_center y_init = np.random.uniform(-search_window / 2., search_window / 2) + y_center xinitial = np.array([x_init, y_init]) result = minimize(self._root, xinitial, args=(kwargs_lens, source_x, source_y), tol=precision_limit ** 2, method='Nelder-Mead') if self._root(result.x, kwargs_lens, source_x, source_y) < precision_limit**2: x_solve.append(result.x[0]) y_solve.append(result.x[1]) x_mins, y_mins = image_util.findOverlap(x_solve, y_solve, precision_limit) if arrival_time_sort is True: x_mins, y_mins = self.sort_arrival_times(x_mins, y_mins, kwargs_lens) return x_mins, y_mins
def image_position_stochastic(self, source_x, source_y, kwargs_lens, search_window=10, precision_limit=10**(-10), arrival_time_sort=True, x_center=0, y_center=0, num_random=1000, verbose=False)
Solves the lens equation stochastically with the scipy minimization routine on the quadratic distance between the backwards ray-shooted proposed image position and the source position. Credits to Giulia Pagano :param source_x: source position :param source_y: source position :param kwargs_lens: lens model list of keyword arguments :param search_window: angular size of search window :param precision_limit: limit required on the precision in the source plane :param arrival_time_sort: bool, if True sorts according to arrival time :param x_center: center of search window :param y_center: center of search window :param num_random: number of random starting points of the non-linear solver in the search window :param verbose: bool, if True, prints performance information :return: x_image, y_image
2.14242
2.13296
1.004435
if hasattr(self.lensModel, '_no_potential'): raise Exception('Instance of lensModel passed to this class does not compute the lensing potential, ' 'and therefore cannot compute time delays.') if len(x_mins) <= 1: return x_mins, y_mins x_source, y_source = self.lensModel.ray_shooting(x_mins, y_mins, kwargs_lens) x_source = np.mean(x_source) y_source = np.mean(y_source) if self.lensModel.multi_plane is True: arrival_time = self.lensModel.arrival_time(x_mins, y_mins, kwargs_lens) else: fermat_pot = self.lensModel.fermat_potential(x_mins, y_mins, x_source, y_source, kwargs_lens) arrival_time = fermat_pot idx = np.argsort(arrival_time) x_mins = np.array(x_mins)[idx] y_mins = np.array(y_mins)[idx] return x_mins, y_mins
def sort_arrival_times(self, x_mins, y_mins, kwargs_lens)
sort arrival times (fermat potential) of image positions in increasing order of light travel time :param x_mins: ra position of images :param y_mins: dec position of images :param kwargs_lens: keyword arguments of lens model :return: sorted lists of x_mins and y_mins
2.52574
2.445354
1.032873
pos_bool = True for kwargs in kwargs_ps: point_amp = kwargs['point_amp'] for amp in point_amp: if amp < 0: pos_bool = False break return pos_bool
def check_positive_flux(cls, kwargs_ps)
check whether inferred linear parameters are positive :param kwargs_ps: :return: bool
3.592515
3.701601
0.97053
kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo = self.best_fit(bijective=False) param_class = self._param_class likelihoodModule = self.likelihoodModule logL, _ = likelihoodModule.logL(param_class.kwargs2args(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo)) return logL
def best_fit_likelihood(self)
returns the log likelihood of the best fit model of the current state of this class :return: log likelihood, float
4.201858
4.529857
0.927592
param_class = self._param_class # run PSO mcmc_class = Sampler(likelihoodModule=self.likelihoodModule) mean_start = param_class.kwargs2args(self._lens_temp, self._source_temp, self._lens_light_temp, self._ps_temp, self._cosmo_temp) lens_sigma, source_sigma, lens_light_sigma, ps_sigma, cosmo_sigma = self._updateManager.sigma_kwargs sigma_start = param_class.kwargs2args(lens_sigma, source_sigma, lens_light_sigma, ps_sigma, cosmo_sigma) num_param, param_list = param_class.num_param() # run MCMC if not init_samples is None and re_use_samples is True: print("test that you are here!") num_samples, num_param_prev = np.shape(init_samples) print(num_samples, num_param_prev, num_param, 'shape of init_sample') if num_param_prev == num_param: print("re-using previous samples to initialize the next MCMC run.") initpos = ReusePositionGenerator(init_samples) else: print("Can not re-use previous MCMC samples due to change in option") initpos = None else: initpos = None samples, dist = mcmc_class.mcmc_CH(walkerRatio, n_run, n_burn, mean_start, np.array(sigma_start) * sigma_scale, threadCount=threadCount, mpi=self._mpi, init_pos=initpos) return samples, param_list, dist
def mcmc(self, n_burn, n_run, walkerRatio, sigma_scale=1, threadCount=1, init_samples=None, re_use_samples=True)
MCMC routine :param n_burn: number of burn in iterations (will not be saved) :param n_run: number of MCMC iterations that are saved :param walkerRatio: ratio of walkers/number of free parameters :param sigma_scale: scaling of the initial parameter spread relative to the width in the initial settings :param threadCount: number of CPU threads. If MPI option is set, threadCount=1 :param init_samples: initial sample from where to start the MCMC process :param re_use_samples: bool, if True, re-uses the samples described in init_samples.nOtherwise starts from scratch. :return: MCMC samples, parameter names, logL distances of all samples
4.536407
4.540643
0.999067
param_class = self._param_class init_pos = param_class.kwargs2args(self._lens_temp, self._source_temp, self._lens_light_temp, self._ps_temp, self._cosmo_temp) lens_sigma, source_sigma, lens_light_sigma, ps_sigma, cosmo_sigma = self._updateManager.sigma_kwargs sigma_start = param_class.kwargs2args(lens_sigma, source_sigma, lens_light_sigma, ps_sigma, cosmo_sigma) lowerLimit = np.array(init_pos) - np.array(sigma_start) * sigma_scale upperLimit = np.array(init_pos) + np.array(sigma_start) * sigma_scale num_param, param_list = param_class.num_param() # run PSO sampler = Sampler(likelihoodModule=self.likelihoodModule) result, chain = sampler.pso(n_particles, n_iterations, lowerLimit, upperLimit, init_pos=init_pos, threadCount=threadCount, mpi=self._mpi, print_key=print_key) lens_result, source_result, lens_light_result, ps_result, cosmo_result = param_class.args2kwargs(result, bijective=True) return lens_result, source_result, lens_light_result, ps_result, cosmo_result, chain, param_list
def pso(self, n_particles, n_iterations, sigma_scale=1, print_key='PSO', threadCount=1)
Particle Swarm Optimization :param n_particles: number of particles in the Particle Swarm Optimization :param n_iterations: number of iterations in the optimization process :param sigma_scale: scaling of the initial parameter spread relative to the width in the initial settings :param print_key: string, printed text when executing this routine :param threadCount: number of CPU threads. If MPI option is set, threadCount=1 :return: result of the best fit, the chain of the best fit parameter after each iteration, list of parameters in same order
3.015376
2.999834
1.005181
#lens_temp = copy.deepcopy(lens_input) kwargs_model = self._updateManager.kwargs_model param_class = self._param_class lens_updated = param_class.update_lens_scaling(self._cosmo_temp, self._lens_temp) source_updated = param_class.image2source_plane(self._source_temp, lens_updated) if compute_bands is None: compute_bands = [True] * len(self.multi_band_list) for i in range(len(self.multi_band_list)): if compute_bands[i] is True: kwargs_data = self.multi_band_list[i][0] kwargs_psf = self.multi_band_list[i][1] kwargs_numerics = self.multi_band_list[i][2] image_model = class_creator.create_image_model(kwargs_data=kwargs_data, kwargs_psf=kwargs_psf, kwargs_numerics=kwargs_numerics, kwargs_model=kwargs_model) psf_iter = PsfFitting(image_model_class=image_model) kwargs_psf = psf_iter.update_iterative(kwargs_psf, lens_updated, source_updated, self._lens_light_temp, self._ps_temp, num_iter=num_iter, no_break=no_break, stacking_method=stacking_method, block_center_neighbour=block_center_neighbour, keep_psf_error_map=keep_psf_error_map, psf_symmetry=psf_symmetry, psf_iter_factor=psf_iter_factor, verbose=verbose) self.multi_band_list[i][1] = kwargs_psf return 0
def psf_iteration(self, num_iter=10, no_break=True, stacking_method='median', block_center_neighbour=0, keep_psf_error_map=True, psf_symmetry=1, psf_iter_factor=1, verbose=True, compute_bands=None)
iterative PSF reconstruction :param num_iter: number of iterations in the process :param no_break: bool, if False will break the process as soon as one step lead to a wors reconstruction then the previous step :param stacking_method: string, 'median' and 'mean' supported :param block_center_neighbour: radius of neighbouring point source to be blocked in the reconstruction :param keep_psf_error_map: bool, whether or not to keep the previous psf_error_map :param psf_symmetry: int, number of invariant rotations in the reconstructed PSF :param psf_iter_factor: factor of new estimated PSF relative to the old one PSF_updated = (1-psf_iter_factor) * PSF_old + psf_iter_factor*PSF_new :param verbose: bool, print statements :param compute_bands: bool list, if multiple bands, this process can be limited to a subset of bands :return: 0, updated PSF is stored in self.mult_iband_list
2.593966
2.57874
1.005904
kwargs_model = self._updateManager.kwargs_model param_class = self._updateManager.param_class(self._lens_temp) lens_updated = param_class.update_lens_scaling(self._cosmo_temp, self._lens_temp) source_updated = param_class.image2source_plane(self._source_temp, lens_updated) if compute_bands is None: compute_bands = [True] * len(self.multi_band_list) for i in range(len(self.multi_band_list)): if compute_bands[i] is True: kwargs_data = self.multi_band_list[i][0] kwargs_psf = self.multi_band_list[i][1] kwargs_numerics = self.multi_band_list[i][2] alignmentFitting = AlignmentFitting(kwargs_data, kwargs_psf, kwargs_numerics, kwargs_model, lens_updated, source_updated, self._lens_light_temp, self._ps_temp) kwargs_data, chain = alignmentFitting.pso(n_particles=n_particles, n_iterations=n_iterations, lowerLimit=lowerLimit, upperLimit=upperLimit, threadCount=threadCount, mpi=self._mpi, print_key='Alignment fitting for band %s ...' % i) print('Align completed for band %s.' % i) print('ra_shift: %s, dec_shift: %s' %(kwargs_data['ra_shift'], kwargs_data['dec_shift'])) self.multi_band_list[i][0] = kwargs_data return 0
def align_images(self, n_particles=10, n_iterations=10, lowerLimit=-0.2, upperLimit=0.2, threadCount=1, compute_bands=None)
aligns the coordinate systems of different exposures within a fixed model parameterisation by executing a PSO with relative coordinate shifts as free parameters :param n_particles: number of particles in the Particle Swarm Optimization :param n_iterations: number of iterations in the optimization process :param lowerLimit: lower limit of relative shift :param upperLimit: upper limit of relative shift :param verbose: bool, print statements :param compute_bands: bool list, if multiple bands, this process can be limited to a subset of bands :return:
3.268951
3.219561
1.015341
self._updateManager.update_options(kwargs_model, kwargs_constraints, kwargs_likelihood) self._updateManager.update_fixed(self._lens_temp, self._source_temp, self._lens_light_temp, self._ps_temp, self._cosmo_temp, lens_add_fixed, source_add_fixed, lens_light_add_fixed, ps_add_fixed, cosmo_add_fixed, lens_remove_fixed, source_remove_fixed, lens_light_remove_fixed, ps_remove_fixed, cosmo_remove_fixed) self._updateManager.update_limits(change_source_lower_limit, change_source_upper_limit) return 0
def update_settings(self, kwargs_model={}, kwargs_constraints={}, kwargs_likelihood={}, lens_add_fixed=[], source_add_fixed=[], lens_light_add_fixed=[], ps_add_fixed=[], cosmo_add_fixed=[], lens_remove_fixed=[], source_remove_fixed=[], lens_light_remove_fixed=[], ps_remove_fixed=[], cosmo_remove_fixed=[], change_source_lower_limit=None, change_source_upper_limit=None)
updates lenstronomy settings "on the fly" :param kwargs_model: kwargs, specified keyword arguments overwrite the existing ones :param kwargs_constraints: kwargs, specified keyword arguments overwrite the existing ones :param kwargs_likelihood: kwargs, specified keyword arguments overwrite the existing ones :param lens_add_fixed: [[i_model, ['param1', 'param2',...], [...]] :param source_add_fixed: [[i_model, ['param1', 'param2',...], [...]] :param lens_light_add_fixed: [[i_model, ['param1', 'param2',...], [...]] :param ps_add_fixed: [[i_model, ['param1', 'param2',...], [...]] :param cosmo_add_fixed: ['param1', 'param2',...] :param lens_remove_fixed: [[i_model, ['param1', 'param2',...], [...]] :param source_remove_fixed: [[i_model, ['param1', 'param2',...], [...]] :param lens_light_remove_fixed: [[i_model, ['param1', 'param2',...], [...]] :param ps_remove_fixed: [[i_model, ['param1', 'param2',...], [...]] :param cosmo_remove_fixed: ['param1', 'param2',...] :return: 0, the settings are overwritten for the next fitting step to come
1.699925
1.721447
0.987497
M = A.T.dot(np.multiply(C_D_inv, A.T).T) if inv_bool: if np.linalg.cond(M) < 5/sys.float_info.epsilon: try: M_inv = np.linalg.inv(M) except: M_inv = np.zeros_like(M) else: M_inv = np.zeros_like(M) R = A.T.dot(np.multiply(C_D_inv, d)) B = M_inv.dot(R) else: if np.linalg.cond(M) < 5/sys.float_info.epsilon: R = A.T.dot(np.multiply(C_D_inv, d)) try: B = np.linalg.solve(M, R).T except: B = np.zeros(len(A.T)) else: B = np.zeros(len(A.T)) M_inv = None image = A.dot(B) return B, M_inv, image
def get_param_WLS(A, C_D_inv, d, inv_bool=True)
returns the parameter values given :param A: response matrix Nd x Ns (Nd = # data points, Ns = # parameters) :param C_D_inv: inverse covariance matrix of the data, Nd x Nd, diagonal form :param d: data array, 1-d Nd :param inv_bool: boolean, wheter returning also the inverse matrix or just solve the linear system :return: 1-d array of parameter values
2.089428
2.214168
0.943663
u = r / R if np.min(u) < 1: raise ValueError("3d radius is smaller than projected radius! Does not make sense.") if self._type == 'const_wrong': beta = kwargs['beta'] k = 1./2. * u**(2*beta - 1.) * ((3./2 - beta) * np.sqrt(np.pi) * special.gamma(beta - 1./2)/special.gamma(beta) + beta * self._B(x=1./u**2, a=beta+1./2, b=1./2) - self._B(x=1./u**2, a=beta-1./2, b=1./2)) elif self._type == 'const': beta = kwargs['beta'] k = np.sqrt(1 - 1./u**2) / (1. - 2*beta) + np.sqrt(np.pi)/2 * special.gamma(beta - 1./2)/special.gamma(beta)\ * (3./2 - beta) * u**(2*beta - 1.) * (1 - special.betainc(beta+1./2, 1./2, 1./u**2)) elif self._type == 'isotropic': k = np.sqrt(1 - 1./u**2) elif self._type == 'radial': k = np.pi/4 * u - 1./2 * np.sqrt(1 - 1./u**2) - u/2. * np.arcsin(1./u) elif self._type == 'OsipkovMerritt': r_ani = kwargs['r_ani'] ua = r_ani / R k = (ua**2 + 1./2) / (ua**2 + 1)**(3./2) * (u**2 + ua**2) / u * np.arctan(np.sqrt((u**2 - 1) / (ua**2 + 1))) \ - 1./2/(ua**2 + 1) * np.sqrt(1 -1./u**2) elif self._type == 'Colin': r_ani = kwargs['r_ani'] ua = r_ani / R if ua == 1: k = (1 + 1./u) * np.arccosh(u) - 1./6 * (8./u + 7) * np.sqrt((u-1.)/(u+1.)) else: k = 0.5 / (ua**2 - 1) * np.sqrt(1 - 1./u**2) + (1. + ua/u) * np.cosh(u) - np.sign(ua - 1) * ua * \ (ua**2 - 0.5) / np.abs(ua**2-1)**(3./2) * (1. + ua/u) * np.arccosh((ua*u + 1)/(u + ua)) else: raise ValueError('anisotropy type %s not supported!' % self._type) return k
def K(self, r, R, kwargs)
equation A16 im Mamon & Lokas :param r: 3d radius :param R: projected 2d radius :return:
3.312943
3.314782
0.999445
if self._type == 'const': return self.const_beta(kwargs) elif self._type == 'OsipkovMerritt': return self.ospikov_meritt(r, kwargs) elif self._type == 'Colin': return self.colin(r, kwargs) elif self._type == 'isotropic': return self.isotropic() elif self._type == 'radial': return self.radial() else: raise ValueError('anisotropy type %s not supported!' % self._type)
def beta_r(self, r, kwargs)
returns the anisotorpy parameter at a given radius :param r: :return:
3.346062
3.341001
1.001515
return special.betainc(a, b, x) * special.beta(a, b)
def _B(self, x, a, b)
incomplete Beta function as described in Mamon&Lokas A13 :param x: :param a: :param b: :return:
4.770155
5.842515
0.816456
if self._type == 'const': return self.const_beta(kwargs) elif self._type == 'r_ani': return self.beta_r_ani(r, kwargs) else: raise ValueError('anisotropy type %s not supported!' % self._type)
def beta_r(self, r, kwargs)
returns the anisotorpy parameter at a given radius :param r: :return:
3.709016
3.769331
0.983999
x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform( numPix=self.numpix, deltapix=self.pixel_scale, subgrid_res=1, left_lower=False, inverse=False) kwargs_data = {'numPix': self.numpix, 'ra_at_xy_0': ra_at_xy_0, 'dec_at_xy_0': dec_at_xy_0, 'transform_pix2angle': Mpix2coord, 'background_rms': self.background_noise, 'exp_time': self.scaled_exposure_time} data_class = Data(kwargs_data) return data_class
def data_class(self)
creates a Data() instance of lenstronomy based on knowledge of the observation :return: instance of Data() class
3.304709
2.942489
1.1231
if self._psf_type == 'GAUSSIAN': psf_type = "GAUSSIAN" fwhm = self._seeing kwargs_psf = {'psf_type': psf_type, 'fwhm': fwhm} elif self._psf_type == 'PIXEL': if self._psf_model is not None: kwargs_psf = {'psf_type': "PIXEL", 'kernel_point_source': self._psf_model} else: raise ValueError("You need to create the class instance with a psf_model!") else: raise ValueError("psf_type %s not supported!" % self._psf_type) psf_class = PSF(kwargs_psf) return psf_class
def psf_class(self)
creates instance of PSF() class based on knowledge of the observations For the full possibility of how to create such an instance, see the PSF() class documentation :return: instance of PSF() class
2.45812
2.474599
0.993341
kwargs_model_updated = self.kwargs_model.update(kwargs_model) kwargs_constraints_updated = self.kwargs_constraints.update(kwargs_constraints) kwargs_likelihood_updated = self.kwargs_likelihood.update(kwargs_likelihood) return kwargs_model_updated, kwargs_constraints_updated, kwargs_likelihood_updated
def update_options(self, kwargs_model, kwargs_constraints, kwargs_likelihood)
updates the options by overwriting the kwargs with the new ones being added/changed WARNING: some updates may not be valid depending on the model options. Use carefully! :param kwargs_model: :param kwargs_constraints: :param kwargs_likelihood: :return:
1.633454
1.623457
1.006158
if not change_source_lower_limit is None: self._source_lower = self._update_limit(change_source_lower_limit, self._source_lower) if not change_source_upper_limit is None: self._source_upper = self._update_limit(change_source_upper_limit, self._source_upper)
def update_limits(self, change_source_lower_limit=None, change_source_upper_limit=None)
updates the limits (lower and upper) of the update manager instance :param change_source_lower_limit: [[i_model, ['param_name', ...], [value1, value2, ...]]] :return: updates internal state of lower and upper limits accessible from outside
1.790138
1.946956
0.919455
lens_fixed = self._add_fixed(kwargs_lens, self._lens_fixed, lens_add_fixed) lens_fixed = self._remove_fixed(lens_fixed, lens_remove_fixed) source_fixed = self._add_fixed(kwargs_source, self._source_fixed, source_add_fixed) source_fixed = self._remove_fixed(source_fixed, source_remove_fixed) lens_light_fixed = self._add_fixed(kwargs_lens_light, self._lens_light_fixed, lens_light_add_fixed) lens_light_fixed = self._remove_fixed(lens_light_fixed, lens_light_remove_fixed) ps_fixed = self._add_fixed(kwargs_ps, self._ps_fixed, ps_add_fixed) ps_fixed = self._remove_fixed(ps_fixed, ps_remove_fixed) cosmo_fixed = copy.deepcopy(self._cosmo_fixed) for param_name in cosmo_add_fixed: if param_name in cosmo_fixed: pass else: cosmo_fixed[param_name] = kwargs_cosmo[param_name] for param_name in cosmo_remove_fixed: if param_name in cosmo_fixed: del cosmo_fixed[param_name] self._lens_fixed, self._source_fixed, self._lens_light_fixed, self._ps_fixed, self._cosmo_fixed = lens_fixed, source_fixed, lens_light_fixed, ps_fixed, cosmo_fixed
def update_fixed(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo, lens_add_fixed=[], source_add_fixed=[], lens_light_add_fixed=[], ps_add_fixed=[], cosmo_add_fixed=[], lens_remove_fixed=[], source_remove_fixed=[], lens_light_remove_fixed=[], ps_remove_fixed=[], cosmo_remove_fixed=[])
adds the values of the keyword arguments that are stated in the _add_fixed to the existing fixed arguments. :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: :param kwargs_cosmo: :param lens_add_fixed: :param source_add_fixed: :param lens_light_add_fixed: :param ps_add_fixed: :param cosmo_add_fixed: :return: updated kwargs fixed
1.291617
1.3534
0.95435
sigma_s2_sum = 0 rho0_r0_gamma = self._rho0_r0_gamma(theta_E, gamma) for i in range(0, rendering_number): sigma_s2_draw = self.vel_disp_one(gamma, rho0_r0_gamma, r_eff, r_ani, R_slit, dR_slit, FWHM) sigma_s2_sum += sigma_s2_draw sigma_s2_average = sigma_s2_sum / rendering_number return np.sqrt(sigma_s2_average)
def vel_disp(self, gamma, theta_E, r_eff, r_ani, R_slit, dR_slit, FWHM, rendering_number=1000)
computes the averaged LOS velocity dispersion in the slit (convolved) :param gamma: power-law slope of the mass profile (isothermal = 2) :param theta_E: Einstein radius of the lens (in arcseconds) :param r_eff: half light radius of the Hernquist profile (or as an approximation of any other profile to be described as a Hernquist profile :param r_ani: anisotropy radius :param R_slit: length of the slit/box :param dR_slit: width of the slit/box :param FWHM: full width at half maximum of the seeing conditions, described as a Gaussian :param rendering_number: number of spectral renderings drawn from the light distribution that go through the slit of the observations :return: LOS integrated velocity dispersion in units [km/s]
2.498556
2.713642
0.920739
a = 0.551 * r_eff while True: r = self.P_r(a) # draw r R, x, y = self.R_r(r) # draw projected R x_, y_ = self.displace_PSF(x, y, FWHM) # displace via PSF bool = self.check_in_slit(x_, y_, R_slit, dR_slit) if bool is True: break sigma_s2 = self.sigma_s2(r, R, r_ani, a, gamma, rho0_r0_gamma) return sigma_s2
def vel_disp_one(self, gamma, rho0_r0_gamma, r_eff, r_ani, R_slit, dR_slit, FWHM)
computes one realisation of the velocity dispersion realized in the slit :param gamma: power-law slope of the mass profile (isothermal = 2) :param rho0_r0_gamma: combination of Einstein radius and power-law slope as equation (14) in Suyu+ 2010 :param r_eff: half light radius of the Hernquist profile (or as an approximation of any other profile to be described as a Hernquist profile :param r_ani: anisotropy radius :param R_slit: length of the slit/box :param dR_slit: width of the slit/box :param FWHM: full width at half maximum of the seeing conditions, described as a Gaussian :return: projected velocity dispersion of a single drawn position in the potential [km/s]
4.757994
4.841959
0.982659
phi = np.random.uniform(0, 2*np.pi) theta = np.random.uniform(0, np.pi) x = r * np.sin(theta) * np.cos(phi) y = r * np.sin(theta) * np.sin(phi) R = np.sqrt(x**2 + y**2) return R, x, y
def R_r(self, r)
draws a random projection from radius r in 2d and 1d :param r: 3d radius :return: R, x, y
1.985911
1.669441
1.189567
beta = self._beta_ani(r, r_ani) return (1 - beta * R**2/r**2) * self.sigma_r2(r, a, gamma, rho0_r0_gamma, r_ani)
def sigma_s2(self, r, R, r_ani, a, gamma, rho0_r0_gamma)
projected velocity dispersion :param r: :param R: :param r_ani: :param a: :param gamma: :param phi_E: :return:
3.931681
4.292385
0.915967
# first term prefac1 = 4*np.pi * const.G * a**(-gamma) * rho0_r0_gamma / (3-gamma) prefac2 = r * (r + a)**3/(r**2 + r_ani**2) hyp1 = vel_util.hyp_2F1(a=2+gamma, b=gamma, c=3+gamma, z=1./(1+r/a)) hyp2 = vel_util.hyp_2F1(a=3, b=gamma, c=1+gamma, z=-a/r) fac = r_ani**2/a**2 * hyp1 / ((2+gamma) * (r/a + 1)**(2+gamma)) + hyp2 / (gamma*(r/a)**gamma) return prefac1 * prefac2 * fac * (self._cosmo.arcsec2phys_lens(1.) * const.Mpc / 1000) ** 2
def sigma_r2(self, r, a, gamma, rho0_r0_gamma, r_ani)
equation (19) in Suyu+ 2010
4.813793
4.775516
1.008015
if model_bool_list is None: model_bool_list = [True] * len(kwargs_lens_light) if numPix is None: numPix = 100 if deltaPix is None: deltaPix = 0.05 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) x_grid += center_x y_grid += center_y I_xy = self._lens_light_internal(x_grid, y_grid, kwargs_lens_light, model_bool_list=model_bool_list) e1, e2 = analysis_util.ellipticities(I_xy, x_grid, y_grid) return e1, e2
def ellipticity_lens_light(self, kwargs_lens_light, center_x=0, center_y=0, model_bool_list=None, deltaPix=None, numPix=None)
make sure that the window covers all the light, otherwise the moments may give to low answers. :param kwargs_lens_light: :param center_x: :param center_y: :param model_bool_list: :param deltaPix: :param numPix: :return:
1.911525
2.047226
0.933714
if model_bool_list is None: model_bool_list = [True] * len(kwargs_lens_light) if numPix is None: numPix = 1000 if deltaPix is None: deltaPix = 0.05 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) x_grid += center_x y_grid += center_y lens_light = self._lens_light_internal(x_grid, y_grid, kwargs_lens_light, model_bool_list=model_bool_list) R_h = analysis_util.half_light_radius(lens_light, x_grid, y_grid, center_x, center_y) return R_h
def half_light_radius_lens(self, kwargs_lens_light, center_x=0, center_y=0, model_bool_list=None, deltaPix=None, numPix=None)
computes numerically the half-light-radius of the deflector light and the total photon flux :param kwargs_lens_light: :return:
1.869171
1.982148
0.943003
if numPix is None: numPix = 1000 if deltaPix is None: deltaPix = 0.005 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) x_grid += center_x y_grid += center_y source_light = self.SourceModel.surface_brightness(x_grid, y_grid, kwargs_source) R_h = analysis_util.half_light_radius(source_light, x_grid, y_grid, center_x=center_x, center_y=center_y) return R_h
def half_light_radius_source(self, kwargs_source, center_x=0, center_y=0, deltaPix=None, numPix=None)
computes numerically the half-light-radius of the deflector light and the total photon flux :param kwargs_source: :return:
2.148122
2.233968
0.961572
if model_bool_list is None: model_bool_list = [True] * len(kwargs_lens_light) lens_light = np.zeros_like(x_grid) for i, bool in enumerate(model_bool_list): if bool is True: lens_light_i = self.LensLightModel.surface_brightness(x_grid, y_grid, kwargs_lens_light, k=i) lens_light += lens_light_i return lens_light
def _lens_light_internal(self, x_grid, y_grid, kwargs_lens_light, model_bool_list=None)
evaluates only part of the light profiles :param x_grid: :param y_grid: :param kwargs_lens_light: :return:
2.01105
2.205737
0.911736
if 'center_x' in kwargs_lens_light[0]: center_x = kwargs_lens_light[0]['center_x'] center_y = kwargs_lens_light[0]['center_y'] else: center_x, center_y = 0, 0 r_h = self.half_light_radius_lens(kwargs_lens_light, center_x=center_x, center_y=center_y, model_bool_list=model_bool_list, deltaPix=deltaPix, numPix=numPix) r_array = np.logspace(-3, 2, 200) * r_h * 2 x_coords, y_coords = param_util.transform_e1e2(r_array, np.zeros_like(r_array), e1=-e1, e2=-e2) x_coords += center_x y_coords += center_y #r_array = np.logspace(-2, 1, 50) * r_h flux_r = self._lens_light_internal(x_coords, y_coords, kwargs_lens_light, model_bool_list=model_bool_list) amplitudes, sigmas, norm = mge.mge_1d(r_array, flux_r, N=n_comp) return amplitudes, sigmas, center_x, center_y
def multi_gaussian_lens_light(self, kwargs_lens_light, model_bool_list=None, e1=0, e2=0, n_comp=20, deltaPix=None, numPix=None)
multi-gaussian decomposition of the lens light profile (in 1-dimension) :param kwargs_lens_light: :param n_comp: :return:
2.488765
2.570602
0.968164
if 'center_x' in kwargs_lens[0]: center_x = kwargs_lens[0]['center_x'] center_y = kwargs_lens[0]['center_y'] else: raise ValueError('no keyword center_x defined!') theta_E = self._lensModelExtensions.effective_einstein_radius(kwargs_lens) r_array = np.logspace(-4, 2, 200) * theta_E x_coords, y_coords = param_util.transform_e1e2(r_array, np.zeros_like(r_array), e1=-e1, e2=-e2) x_coords += center_x y_coords += center_y #r_array = np.logspace(-2, 1, 50) * theta_E if model_bool_list is None: model_bool_list = [True] * len(kwargs_lens) kappa_s = np.zeros_like(r_array) for i in range(len(kwargs_lens)): if model_bool_list[i] is True: kappa_s += self.LensModel.kappa(x_coords, y_coords, kwargs_lens, k=i) amplitudes, sigmas, norm = mge.mge_1d(r_array, kappa_s, N=n_comp) return amplitudes, sigmas, center_x, center_y
def multi_gaussian_lens(self, kwargs_lens, model_bool_list=None, e1=0, e2=0, n_comp=20)
multi-gaussian lens model in convergence space :param kwargs_lens: :param n_comp: :return:
2.663901
2.744787
0.970531
flux_list = [] R_h_list = [] x_grid, y_grid = util.make_grid(numPix=n_grid, deltapix=delta_grid) kwargs_copy = copy.deepcopy(kwargs_light) for k, kwargs in enumerate(kwargs_light): if 'center_x' in kwargs_copy[k]: kwargs_copy[k]['center_x'] = 0 kwargs_copy[k]['center_y'] = 0 if type == 'lens': light = self.LensLightModel.surface_brightness(x_grid, y_grid, kwargs_copy, k=k) elif type == 'source': light = self.SourceModel.surface_brightness(x_grid, y_grid, kwargs_copy, k=k) else: raise ValueError("type %s not supported!" % type) flux = np.sum(light)*delta_grid**2 / deltaPix**2 R_h = analysis_util.half_light_radius(light, x_grid, y_grid) flux_list.append(flux) R_h_list.append(R_h) return flux_list, R_h_list
def flux_components(self, kwargs_light, n_grid=400, delta_grid=0.01, deltaPix=0.05, type="lens")
computes the total flux in each component of the model :param kwargs_light: :param n_grid: :param delta_grid: :return:
2.320759
2.353904
0.985919
error_map = np.zeros_like(x_grid) basis_functions, n_source = self.SourceModel.functions_split(x_grid, y_grid, kwargs_source) basis_functions = np.array(basis_functions) if cov_param is not None: for i in range(len(error_map)): error_map[i] = basis_functions[:, i].T.dot(cov_param[:n_source, :n_source]).dot(basis_functions[:, i]) return error_map
def error_map_source(self, kwargs_source, x_grid, y_grid, cov_param)
variance of the linear source reconstruction in the source plane coordinates, computed by the diagonal elements of the covariance matrix of the source reconstruction as a sum of the errors of the basis set. :param kwargs_source: keyword arguments of source model :param x_grid: x-axis of positions to compute error map :param y_grid: y-axis of positions to compute error map :param cov_param: covariance matrix of liner inversion parameters :return: diagonal covariance errors at the positions (x_grid, y_grid)
3.161902
3.272127
0.966314
# make sugrid x_grid_sub, y_grid_sub = util.make_grid(numPix=numPix*5, deltapix=deltaPix, subgrid_res=subgrid_res) import lenstronomy.Util.mask as mask_util mask = mask_util.mask_sphere(x_grid_sub, y_grid_sub, center_x, center_y, r=1) x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) # compute light on the subgrid lightModel = LightModel(light_model_list=lens_light_model_list) flux = lightModel.surface_brightness(x_grid_sub, y_grid_sub, kwargs_lens_light) flux_norm = np.sum(flux[mask == 1]) / np.sum(mask) flux /= flux_norm from lenstronomy.LensModel.numerical_profile_integrals import ConvergenceIntegrals integral = ConvergenceIntegrals() # compute lensing quantities with subgrid convergence_sub = flux f_x_sub, f_y_sub = integral.deflection_from_kappa(convergence_sub, x_grid_sub, y_grid_sub, deltaPix=deltaPix/float(subgrid_res)) f_sub = integral.potential_from_kappa(convergence_sub, x_grid_sub, y_grid_sub, deltaPix=deltaPix/float(subgrid_res)) # interpolation function on lensing quantities x_axes_sub, y_axes_sub = util.get_axes(x_grid_sub, y_grid_sub) from lenstronomy.LensModel.Profiles.interpol import Interpol interp_func = Interpol() interp_func.do_interp(x_axes_sub, y_axes_sub, f_sub, f_x_sub, f_y_sub) # compute lensing quantities on sparser grid x_axes, y_axes = util.get_axes(x_grid, y_grid) f_ = interp_func.function(x_grid, y_grid) f_x, f_y = interp_func.derivatives(x_grid, y_grid) # numerical differentials for second order differentials from lenstronomy.LensModel.numeric_lens_differentials import NumericLens lens_differential = NumericLens(lens_model_list=['INTERPOL']) kwargs = [{'grid_interp_x': x_axes_sub, 'grid_interp_y': y_axes_sub, 'f_': f_sub, 'f_x': f_x_sub, 'f_y': f_y_sub}] f_xx, f_xy, f_yx, f_yy = lens_differential.hessian(x_grid, y_grid, kwargs) kwargs_interpol = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': util.array2image(f_), 'f_x': util.array2image(f_x), 'f_y': util.array2image(f_y), 'f_xx': util.array2image(f_xx), 'f_xy': util.array2image(f_xy), 'f_yy': util.array2image(f_yy)} return kwargs_interpol
def light2mass_interpol(lens_light_model_list, kwargs_lens_light, numPix=100, deltaPix=0.05, subgrid_res=5, center_x=0, center_y=0)
takes a lens light model and turns it numerically in a lens model (with all lensmodel quantities computed on a grid). Then provides an interpolated grid for the quantities. :param kwargs_lens_light: lens light keyword argument list :param numPix: number of pixels per axis for the return interpolation :param deltaPix: interpolation/pixel size :param center_x: center of the grid :param center_y: center of the grid :param subgrid: subgrid for the numerical integrals :return:
2.45956
2.492282
0.986871
x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=2.*theta_E / numPix) x_grid += center_x y_grid += center_y mask = mask_util.mask_sphere(x_grid, y_grid, center_x, center_y, theta_E) kappa_list = [] for i in range(len(kwargs_lens)): kappa = self.LensModel.kappa(x_grid, y_grid, kwargs_lens, k=i) kappa_mean = np.sum(kappa * mask) / np.sum(mask) kappa_list.append(kappa_mean) return kappa_list
def mass_fraction_within_radius(self, kwargs_lens, center_x, center_y, theta_E, numPix=100)
computes the mean convergence of all the different lens model components within a spherical aperture :param kwargs_lens: lens model keyword argument list :param center_x: center of the aperture :param center_y: center of the aperture :param theta_E: radius of aperture :return: list of average convergences for all the model components
2.11166
2.28432
0.924415
self._search_window, self._x_center, self._y_center = search_window, x_center, y_center
def update_search_window(self, search_window, x_center, y_center)
update the search area for the lens equation solver :param search_window: search_window: window size of the image position search with the lens equation solver. :param x_center: center of search window :param y_center: center of search window :return: updated self instances
2.840273
2.977685
0.953853
ra_list, dec_list = self.image_position(kwargs_ps, kwargs_lens, k=k) amp_list = self.image_amplitude(kwargs_ps, kwargs_lens) ra_array, dec_array, amp_array = [], [], [] for i, ra in enumerate(ra_list): for j in range(len(ra)): ra_array.append(ra_list[i][j]) dec_array.append(dec_list[i][j]) amp_array.append(amp_list[i][j]) return ra_array, dec_array, amp_array
def point_source_list(self, kwargs_ps, kwargs_lens, k=None)
returns the coordinates and amplitudes of all point sources in a single array :param kwargs_ps: :param kwargs_lens: :return:
1.856506
1.901672
0.976249
amp_list = [] for i, model in enumerate(self._point_source_list): if k is None or k == i: amp_list.append(model.image_amplitude(kwargs_ps=kwargs_ps[i], kwargs_lens=kwargs_lens, min_distance=self._min_distance, search_window=self._search_window, precision_limit=self._precision_limit, num_iter_max=self._num_iter_max, x_center=self._x_center, y_center=self._y_center)) return amp_list
def image_amplitude(self, kwargs_ps, kwargs_lens, k=None)
returns the image amplitudes :param kwargs_ps: :param kwargs_lens: :return:
2.687915
2.894913
0.928496
amp_list = [] for i, model in enumerate(self._point_source_list): amp_list.append(model.source_amplitude(kwargs_ps=kwargs_ps[i], kwargs_lens=kwargs_lens)) return amp_list
def source_amplitude(self, kwargs_ps, kwargs_lens)
returns the source amplitudes :param kwargs_ps: :param kwargs_lens: :return:
2.703993
3.099487
0.8724
x_image_list, y_image_list = self.image_position(kwargs_ps, kwargs_lens) for i, model in enumerate(self._point_source_list): if model in ['LENSED_POSITION', 'SOURCE_POSITION']: x_pos = x_image_list[i] y_pos = y_image_list[i] x_source, y_source = self._lensModel.ray_shooting(x_pos, y_pos, kwargs_lens) dist = np.sqrt((x_source - x_source[0]) ** 2 + (y_source - y_source[0]) ** 2) if np.max(dist) > tolerance: return False return True
def check_image_positions(self, kwargs_ps, kwargs_lens, tolerance=0.001)
checks whether the point sources in kwargs_ps satisfy the lens equation with a tolerance (computed by ray-tracing in the source plane) :param kwargs_ps: :param kwargs_lens: :param tolerance: :return: bool: True, if requirement on tolerance is fulfilled, False if not.
2.270944
2.30075
0.987045
for i, model in enumerate(self.point_source_type_list): if model == 'UNLENSED': kwargs_ps[i]['point_amp'] *= norm_factor elif model in ['LENSED_POSITION', 'SOURCE_POSITION']: if self._fixed_magnification_list[i] is True: kwargs_ps[i]['source_amp'] *= norm_factor else: kwargs_ps[i]['point_amp'] *= norm_factor return kwargs_ps
def re_normalize_flux(self, kwargs_ps, norm_factor)
renormalizes the point source amplitude keywords by a factor :param kwargs_ps_updated: :param norm_factor: :return:
3.047189
3.210499
0.949133
mag_finite = np.zeros_like(x_pos) deltaPix = float(window_size)/grid_number if shape == 'GAUSSIAN': from lenstronomy.LightModel.Profiles.gaussian import Gaussian quasar = Gaussian() elif shape == 'TORUS': import lenstronomy.LightModel.Profiles.torus as quasar else: raise ValueError("shape %s not valid for finite magnification computation!" % shape) x_grid, y_grid = util.make_grid(numPix=grid_number, deltapix=deltaPix, subgrid_res=1) if polar_grid: a = window_size*0.5 b = window_size*0.5*aspect_ratio ellipse_inds = (x_grid*a**-1) **2 + (y_grid*b**-1) **2 <= 1 x_grid, y_grid = x_grid[ellipse_inds], y_grid[ellipse_inds] for i in range(len(x_pos)): ra, dec = x_pos[i], y_pos[i] center_x, center_y = self._lensModel.ray_shooting(ra, dec, kwargs_lens) if polar_grid: theta = np.arctan2(dec,ra) xcoord, ycoord = util.rotate(x_grid, y_grid, theta) else: xcoord, ycoord = x_grid, y_grid betax, betay = self._lensModel.ray_shooting(xcoord + ra, ycoord + dec, kwargs_lens) I_image = quasar.function(betax, betay, 1., source_sigma, source_sigma, center_x, center_y) mag_finite[i] = np.sum(I_image) * deltaPix**2 return mag_finite
def magnification_finite(self, x_pos, y_pos, kwargs_lens, source_sigma=0.003, window_size=0.1, grid_number=100, shape="GAUSSIAN", polar_grid=False, aspect_ratio=0.5)
returns the magnification of an extended source with Gaussian light profile :param x_pos: x-axis positons of point sources :param y_pos: y-axis position of point sources :param kwargs_lens: lens model kwargs :param source_sigma: Gaussian sigma in arc sec in source :param window_size: size of window to compute the finite flux :param grid_number: number of grid cells per axis in the window to numerically comute the flux :return: numerically computed brightness of the sources
2.449976
2.511309
0.975577
ra_1, dec_1, mag_1 = edge1 ra_2, dec_2, mag_2 = edge2 ra_3, dec_3, mag_3 = edge_90 sign_list = np.sign([mag_1, mag_2, mag_3]) if sign_list[0] == sign_list[1] and sign_list[0] == sign_list[2]: # if all signs are the same return [], [] else: # split triangle along the long axis # execute tiling twice # add ra_crit and dec_crit together # if max depth has been reached, return the mean value in the triangle max_order -= 1 if max_order <= 0: return [(ra_1 + ra_2 + ra_3)/3], [(dec_1 + dec_2 + dec_3)/3] else: # split triangle ra_90_ = (ra_1 + ra_2)/2 # find point in the middle of the long axis to split triangle dec_90_ = (dec_1 + dec_2)/2 mag_90_ = self._lensModel.magnification(ra_90_, dec_90_, kwargs_lens) edge_90_ = [ra_90_, dec_90_, mag_90_] ra_crit, dec_crit = self._tiling_crit(edge1=edge_90, edge2=edge1, edge_90=edge_90_, max_order=max_order, kwargs_lens=kwargs_lens) ra_crit_2, dec_crit_2 = self._tiling_crit(edge1=edge_90, edge2=edge2, edge_90=edge_90_, max_order=max_order, kwargs_lens=kwargs_lens) ra_crit += ra_crit_2 dec_crit += dec_crit_2 return ra_crit, dec_crit
def _tiling_crit(self, edge1, edge2, edge_90, max_order, kwargs_lens)
tiles a rectangular triangle and compares the signs of the magnification :param edge1: [ra_coord, dec_coord, magnification] :param edge2: [ra_coord, dec_coord, magnification] :param edge_90: [ra_coord, dec_coord, magnification] :param max_order: maximal order to fold triangle :return:
2.313112
2.199791
1.051514
if 'center_x' in kwargs_lens_list[0]: center_x = kwargs_lens_list[0]['center_x'] center_y = kwargs_lens_list[0]['center_y'] elif self._lensModel.lens_model_list[0] in ['INTERPOL', 'INTERPOL_SCALED']: center_x, center_y = 0, 0 else: center_x, center_y = 0, 0 numPix = 200 deltaPix = 0.05 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) x_grid += center_x y_grid += center_y kappa = self._lensModel.kappa(x_grid, y_grid, kwargs_lens_list, k=k) if self._lensModel.lens_model_list[0] in ['INTERPOL', 'INTERPOL_SCALED']: center_x = x_grid[kappa == np.max(kappa)] center_y = y_grid[kappa == np.max(kappa)] kappa = util.array2image(kappa) r_array = np.linspace(0.0001, numPix*deltaPix/2., spacing) for r in r_array: mask = np.array(1 - mask_util.mask_center_2d(center_x, center_y, r, x_grid, y_grid)) sum_mask = np.sum(mask) if sum_mask > 0: kappa_mean = np.sum(kappa*mask)/np.sum(mask) if kappa_mean < 1: return r print(kwargs_lens_list, "Warning, no Einstein radius computed!") return r_array[-1]
def effective_einstein_radius(self, kwargs_lens_list, k=None, spacing=1000)
computes the radius with mean convergence=1 :param kwargs_lens: :param spacing: number of annular bins to compute the convergence (resolution of the Einstein radius estimate) :return:
2.276795
2.281907
0.99776
alpha0_x, alpha0_y = 0, 0 kappa_ext = 0 shear1, shear2 = 0, 0 if lens_model_internal_bool is None: lens_model_internal_bool = [True] * len(kwargs_lens) for i, kwargs in enumerate(kwargs_lens): if not lens_model_internal_bool[i] is True: f_x, f_y = self._lensModel.alpha(0, 0, kwargs_lens, k=i) f_xx, f_xy, f_yx, f_yy = self._lensModel.hessian(0, 0, kwargs_lens, k=i) alpha0_x += f_x alpha0_y += f_y kappa_ext += (f_xx + f_yy)/2. shear1 += 1./2 * (f_xx - f_yy) shear2 += f_xy return alpha0_x, alpha0_y, kappa_ext, shear1, shear2
def external_lensing_effect(self, kwargs_lens, lens_model_internal_bool=None)
computes deflection, shear and convergence at (0,0) for those part of the lens model not included in the main deflector :param kwargs_lens: :return:
2.088442
2.085339
1.001488
x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) x_grid += center_x_init y_grid += center_y_init if bool_list is None: kappa = self._lensModel.kappa(x_grid, y_grid, kwargs_lens, k=k) else: kappa = np.zeros_like(x_grid) for k in range(len(kwargs_lens)): if bool_list[k] is True: kappa += self._lensModel.kappa(x_grid, y_grid, kwargs_lens, k=k) center_x = x_grid[kappa == np.max(kappa)] center_y = y_grid[kappa == np.max(kappa)] return center_x, center_y
def lens_center(self, kwargs_lens, k=None, bool_list=None, numPix=200, deltaPix=0.01, center_x_init=0, center_y_init=0)
computes the convergence weighted center of a lens model :param kwargs_lens: lens model keyword argument list :param bool_list: bool list (optional) to include certain models or not :return: center_x, center_y
1.75485
1.864329
0.941277
theta_E = self.effective_einstein_radius(kwargs_lens_list) x0 = kwargs_lens_list[0]['center_x'] y0 = kwargs_lens_list[0]['center_y'] x, y = util.points_on_circle(theta_E, num_points) dr = 0.01 x_dr, y_dr = util.points_on_circle(theta_E + dr, num_points) if lens_model_internal_bool is None: lens_model_internal_bool = [True]*len(kwargs_lens_list) alpha_E_x_i, alpha_E_y_i = self._lensModel.alpha(x0 + x, y0 + y, kwargs_lens_list, k=lens_model_internal_bool) alpha_E_r = np.sqrt(alpha_E_x_i**2 + alpha_E_y_i**2) alpha_E_dr_x_i, alpha_E_dr_y_i = self._lensModel.alpha(x0 + x_dr, y0 + y_dr, kwargs_lens_list, k=lens_model_internal_bool) alpha_E_dr = np.sqrt(alpha_E_dr_x_i ** 2 + alpha_E_dr_y_i ** 2) slope = np.mean(np.log(alpha_E_dr / alpha_E_r) / np.log((theta_E + dr) / theta_E)) gamma = -slope + 2 return gamma
def profile_slope(self, kwargs_lens_list, lens_model_internal_bool=None, num_points=10)
computes the logarithmic power-law slope of a profile :param kwargs_lens_list: lens model keyword argument list :param lens_model_internal_bool: bool list, indicate which part of the model to consider :param num_points: number of estimates around the Einstein radius :return:
1.975724
1.984356
0.99565
f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs, diff=diff) kappa = 1./2 * (f_xx + f_yy) return kappa
def kappa(self, x, y, kwargs, diff=diff)
computes the convergence :return: kappa
2.730575
3.015474
0.905521
f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs, diff=diff) gamma1 = 1./2 * (f_xx - f_yy) gamma2 = f_xy return gamma1, gamma2
def gamma(self, x, y, kwargs, diff=diff)
computes the shear :return: gamma1, gamma2
2.797839
2.56732
1.08979
f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs, diff=diff) det_A = (1 - f_xx) * (1 - f_yy) - f_xy*f_yx return 1/det_A
def magnification(self, x, y, kwargs, diff=diff)
computes the magnification :return: potential
3.067538
3.394014
0.903808
alpha_ra, alpha_dec = self.alpha(x, y, kwargs) alpha_ra_dx, alpha_dec_dx = self.alpha(x + diff, y, kwargs) alpha_ra_dy, alpha_dec_dy = self.alpha(x, y + diff, kwargs) dalpha_rara = (alpha_ra_dx - alpha_ra)/diff dalpha_radec = (alpha_ra_dy - alpha_ra)/diff dalpha_decra = (alpha_dec_dx - alpha_dec)/diff dalpha_decdec = (alpha_dec_dy - alpha_dec)/diff f_xx = dalpha_rara f_yy = dalpha_decdec f_xy = dalpha_radec f_yx = dalpha_decra return f_xx, f_xy, f_yx, f_yy
def hessian(self, x, y, kwargs, diff=diff)
computes the differentials f_xx, f_yy, f_xy from f_x and f_y :return: f_xx, f_xy, f_yx, f_yy
1.794903
1.697983
1.057079
return self.D_xy(0, z_lens) * self.D_xy(0, z_source) / self.D_xy(z_lens, z_source) * (1 + z_lens)
def D_dt(self, z_lens, z_source)
time-delay distance :param z_lens: redshift of lens :param z_source: redshift of source :return: time-delay distance in units of Mpc
3.565712
3.814447
0.934791
h = self.cosmo.H(0).value / 100. return 3 * h ** 2 / (8 * np.pi * const.G) * 10 ** 10 * const.Mpc / const.M_sun
def rho_crit(self)
critical density :return: value in M_sol/Mpc^3
4.650388
4.073923
1.141501
if not self._stable_cut: return hermite.hermval(x, n_array) else: n_max = len(n_array) x_cut = np.sqrt(n_max + 1) * self._cut_scale if isinstance(x, int) or isinstance(x, float): if x >= x_cut: return 0 else: return hermite.hermval(x, n_array) else: out = np.zeros_like(x) out[x < x_cut] = hermite.hermval(x[x < x_cut], n_array, tensor=tensor) return out
def hermval(self, x, n_array, tensor=True)
computes the Hermit polynomial as numpy.polynomial.hermite.hermval difference: for values more than sqrt(n_max + 1) * cut_scale, the value is set to zero this should be faster and numerically stable :param x: array of values :param n_array: list of coeffs in H_n :param cut_scale: scale where the polynomial will be set to zero :return: see numpy.polynomial.hermite.hermval
2.649246
2.155106
1.229288
if not self._interpolation: n_array = np.zeros(n+1) n_array[n] = 1 return self.hermval(x, n_array, tensor=False) # attention, this routine calculates every single hermite polynomial and multiplies it with zero (exept the right one) else: return np.interp(x, self.x_grid, self.H_interp[n])
def H_n(self, n, x)
constructs the Hermite polynomial of order n at position x (dimensionless) :param n: The n'the basis function. :type name: int. :param x: 1-dim position (dimensionless) :type state: float or numpy array. :returns: array-- H_n(x). :raises: AttributeError, KeyError
9.537766
9.105321
1.047494
x_ = x - center_x y_ = y - center_y n = len(np.atleast_1d(x)) H_x = np.empty((n_order+1, n)) H_y = np.empty((n_order+1, n)) if n_order > 170: raise ValueError('polynomial order to large', n_order) for n in range(0, n_order+1): prefactor = 1./np.sqrt(2**n*np.sqrt(np.pi)*math.factorial(n)) n_array = np.zeros(n+1) n_array[n] = 1 H_x[n] = self.hermval(x_/beta, n_array, tensor=False) * prefactor * np.exp(-(x_/beta)**2/2.) H_y[n] = self.hermval(y_/beta, n_array, tensor=False) * prefactor * np.exp(-(y_/beta)**2/2.) return H_x, H_y
def pre_calc(self, x, y, beta, n_order, center_x, center_y)
calculates the H_n(x) and H_n(y) for a given x-array and y-array :param x: :param y: :param amp: :param beta: :param n_order: :param center_x: :param center_y: :return: list of H_n(x) and H_n(y)
2.747572
2.673935
1.027539
num_param = int((n_max+1)*(n_max+2)/2) param_list = np.zeros(num_param) amp_norm = 1./beta**2*deltaPix**2 n1 = 0 n2 = 0 H_x, H_y = self.shapelets.pre_calc(x, y, beta, n_max, center_x, center_y) for i in range(num_param): kwargs_source_shapelet = {'center_x': center_x, 'center_y': center_y, 'n1': n1, 'n2': n2, 'beta': beta, 'amp': amp_norm} base = self.shapelets.function(H_x, H_y, **kwargs_source_shapelet) param = np.sum(image*base) param_list[i] = param if n1 == 0: n1 = n2 + 1 n2 = 0 else: n1 -= 1 n2 += 1 return param_list
def decomposition(self, image, x, y, n_max, beta, deltaPix, center_x=0, center_y=0)
decomposes an image into the shapelet coefficients in same order as for the function call :param image: :param x: :param y: :param n_max: :param beta: :param center_x: :param center_y: :return:
2.932053
2.867177
1.022627