code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
x_shift = x - center_x
y_shift = y - center_y
return amp * (1. + (x_shift**2+y_shift**2)/alpha**2)**(-beta)
|
def function(self, x, y, amp, alpha, beta, center_x, center_y)
|
returns Moffat profile
| 2.475056
| 2.513353
| 0.984763
|
if theta_E < 0:
theta_E = 0
if s_scale < 0.00000001:
s_scale = 0.00000001
if gamma < 1.2:
gamma = 1.2
theta_E = 0
if gamma > 2.9:
gamma = 2.9
theta_E = 0
if q < 0.01:
q = 0.01
theta_E = 0
if q > 1:
q = 1.
theta_E = 0
return theta_E, gamma, q, phi_G, s_scale
|
def _parameter_constraints(self, theta_E, gamma, q, phi_G, s_scale)
|
sets bounds to parameters due to numerical stability
:param theta_E:
:param gamma:
:param q:
:param phi_G:
:param s_scale:
:return:
| 1.800551
| 1.784077
| 1.009234
|
if num_sigma > 3:
raise ValueError("Number of sigma-constraints restircted to three. %s not valid" % num_sigma)
num = len(sample)
num_threshold1 = int(round((num-1)*0.833))
num_threshold2 = int(round((num-1)*0.977249868))
num_threshold3 = int(round((num-1)*0.998650102))
median = np.median(sample)
sorted_sample = np.sort(sample)
if num_sigma > 0:
upper_sigma1 = sorted_sample[num_threshold1-1]
lower_sigma1 = sorted_sample[num-num_threshold1-1]
else:
return median, [[]]
if num_sigma > 1:
upper_sigma2 = sorted_sample[num_threshold2-1]
lower_sigma2 = sorted_sample[num-num_threshold2-1]
else:
return median, [[median-lower_sigma1, upper_sigma1-median]]
if num_sigma > 2:
upper_sigma3 = sorted_sample[num_threshold3-1]
lower_sigma3 = sorted_sample[num-num_threshold3-1]
return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median],
[median-lower_sigma3, upper_sigma3-median]]
else:
return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median]]
|
def compute_lower_upper_errors(sample, num_sigma=1)
|
computes the upper and lower sigma from the median value.
This functions gives good error estimates for skewed pdf's
:param sample: 1-D sample
:return: median, lower_sigma, upper_sigma
| 2.087714
| 2.063923
| 1.011527
|
t = (x-e) / w
return 2. / w * stats.norm.pdf(t) * stats.norm.cdf(a*t)
|
def pdf(self, x, e=0., w=1., a=0.)
|
probability density function
see: https://en.wikipedia.org/wiki/Skew_normal_distribution
:param x: input value
:param e:
:param w:
:param a:
:return:
| 5.021868
| 6.660064
| 0.754027
|
if skw > 1 or skw < -1:
print("skewness %s out of range" % skw)
skw = 1.
e, w, a = self.map_mu_sigma_skw(mu, sigma, skw)
pdf = self.pdf(x, e, w, a)
return pdf
|
def pdf_new(self, x, mu, sigma, skw)
|
function with different parameterisation
:param x:
:param mu: mean
:param sigma: sigma
:param skw: skewness
:return:
| 4.015105
| 4.182294
| 0.960025
|
sigma2=sigma**2
w2 = sigma2/(1-2*delta**2/np.pi)
w = np.sqrt(w2)
return w
|
def _w_sigma_delta(self, sigma, delta)
|
invert variance
:param sigma:
:param delta:
:return: w parameter
| 5.51817
| 5.914146
| 0.933046
|
delta = self._delta_skw(skw)
a = self._alpha_delta(delta)
w = self._w_sigma_delta(sigma, delta)
e = self._e_mu_w_delta(mu, w, delta)
return e, w, a
|
def map_mu_sigma_skw(self, mu, sigma, skw)
|
map to parameters e, w, a
:param mu: mean
:param sigma: standard deviation
:param skw: skewness
:return: e, w, a
| 4.13241
| 3.201078
| 1.290943
|
if lower_start is None or upper_start is None:
lower_start, upper_start = np.array(self.lower_limit), np.array(self.upper_limit)
print("PSO initialises its particles with default values")
else:
lower_start = np.maximum(lower_start, self.lower_limit)
upper_start = np.minimum(upper_start, self.upper_limit)
if mpi is True:
pso = MpiParticleSwarmOptimizer(self.chain, lower_start, upper_start, n_particles, threads=1)
if pso.isMaster():
print('MPI option chosen')
else:
pso = ParticleSwarmOptimizer(self.chain, lower_start, upper_start, n_particles, threads=threadCount)
if init_pos is None:
init_pos = (upper_start - lower_start) / 2 + lower_start
if not init_pos is None:
pso.gbest.position = init_pos
pso.gbest.velocity = [0]*len(init_pos)
pso.gbest.fitness, _ = self.chain.likelihood(init_pos)
X2_list = []
vel_list = []
pos_list = []
time_start = time.time()
if pso.isMaster():
print('Computing the %s ...' % print_key)
num_iter = 0
for swarm in pso.sample(n_iterations):
X2_list.append(pso.gbest.fitness*2)
vel_list.append(pso.gbest.velocity)
pos_list.append(pso.gbest.position)
num_iter += 1
if pso.isMaster():
if num_iter % 10 == 0:
print(num_iter)
if not mpi:
result = pso.gbest.position
else:
result = MpiUtil.mpiBCast(pso.gbest.position)
#if (pso.isMaster() and mpi is True) or self.chain.sampling_option == 'X2_catalogue':
if mpi is True and not pso.isMaster():
pass
else:
lens_dict, source_dict, lens_light_dict, ps_dict, kwargs_cosmo = self.chain.param.args2kwargs(result)
print(pso.gbest.fitness * 2 / (self.chain.effectiv_num_data_points(lens_dict, source_dict, lens_light_dict, ps_dict)), 'reduced X^2 of best position')
print(pso.gbest.fitness, 'logL')
print(self.chain.effectiv_num_data_points(lens_dict, source_dict, lens_light_dict, ps_dict), 'effective number of data points')
print(lens_dict, 'lens result')
print(source_dict, 'source result')
print(lens_light_dict, 'lens light result')
print(ps_dict, 'point source result')
print(kwargs_cosmo, 'cosmo result')
time_end = time.time()
print(time_end - time_start, 'time used for PSO', print_key)
print('===================')
return result, [X2_list, pos_list, vel_list, []]
|
def pso(self, n_particles, n_iterations, lower_start=None, upper_start=None, threadCount=1, init_pos=None,
mpi=False, print_key='PSO')
|
returns the best fit for the lense model on catalogue basis with particle swarm optimizer
| 2.92947
| 2.86003
| 1.024279
|
lowerLimit, upperLimit = self.lower_limit, self.upper_limit
mean_start = np.maximum(lowerLimit, mean_start)
mean_start = np.minimum(upperLimit, mean_start)
low_start = mean_start - sigma_start
high_start = mean_start + sigma_start
low_start = np.maximum(lowerLimit, low_start)
high_start = np.minimum(upperLimit, high_start)
sigma_start = (high_start - low_start) / 2
mean_start = (high_start + low_start) / 2
params = np.array([mean_start, lowerLimit, upperLimit, sigma_start]).T
chain = LikelihoodComputationChain(
min=lowerLimit,
max=upperLimit)
temp_dir = tempfile.mkdtemp("Hammer")
file_prefix = os.path.join(temp_dir, "logs")
#file_prefix = "./lenstronomy_debug"
# chain.addCoreModule(CambCoreModule())
chain.addLikelihoodModule(self.chain)
chain.setup()
store = InMemoryStorageUtil()
#store = None
if mpi is True:
sampler = MpiCosmoHammerSampler(
params=params,
likelihoodComputationChain=chain,
filePrefix=file_prefix,
walkersRatio=walkerRatio,
burninIterations=n_burn,
sampleIterations=n_run,
threadCount=1,
initPositionGenerator=init_pos,
storageUtil=store)
else:
sampler = CosmoHammerSampler(
params=params,
likelihoodComputationChain=chain,
filePrefix=file_prefix,
walkersRatio=walkerRatio,
burninIterations=n_burn,
sampleIterations=n_run,
threadCount=threadCount,
initPositionGenerator=init_pos,
storageUtil=store)
time_start = time.time()
if sampler.isMaster():
print('Computing the MCMC...')
print('Number of walkers = ', len(mean_start)*walkerRatio)
print('Burn-in iterations: ', n_burn)
print('Sampling iterations:', n_run)
sampler.startSampling()
if sampler.isMaster():
time_end = time.time()
print(time_end - time_start, 'time taken for MCMC sampling')
# if sampler._sampler.pool is not None:
# sampler._sampler.pool.close()
try:
shutil.rmtree(temp_dir)
except Exception as ex:
print(ex, 'shutil.rmtree did not work')
pass
#samples = np.loadtxt(file_prefix+".out")
#prob = np.loadtxt(file_prefix+"prob.out")
return store.samples, store.prob
|
def mcmc_CH(self, walkerRatio, n_run, n_burn, mean_start, sigma_start, threadCount=1, init_pos=None, mpi=False)
|
runs mcmc on the parameter space given parameter bounds with CosmoHammerSampler
returns the chain
| 2.867338
| 2.852039
| 1.005364
|
exposure_time_tot = num_exposures * exposure_time
readout_noise_tot = num_exposures * readout_noise ** 2
sky_per_pixel = sky_brightness * pixel_scael ** 2
sigma_bkg = np.sqrt(readout_noise_tot + exposure_time_tot * sky_per_pixel ** 2) / exposure_time_tot
return sigma_bkg
|
def bkg_noise(readout_noise, exposure_time, sky_brightness, pixel_scael, num_exposures=1)
|
computes the expected Gaussian background noise of a pixel in units of counts/second
:param readout_noise: noise added per readout
:param exposure_time: exposure time per exposure (in seconds)
:param sky_brightness: counts per second per unit arcsecond square
:param pixel_scael: size of pixel in units arcseonds
:param num_exposures: number of exposures (with same exposure time) to be co-added
:return: estimated Gaussian noise sqrt(variance)
| 2.26455
| 2.376397
| 0.952934
|
if self.PointSource is not None:
self.PointSource.delete_lens_model_cach()
self.PointSource.set_save_cache(bool)
|
def reset_point_source_cache(self, bool=True)
|
deletes all the cache in the point source class and saves it from then on
:return:
| 7.926554
| 7.900605
| 1.003284
|
A = self._response_matrix(self.ImageNumerics.ra_grid_ray_shooting, self.ImageNumerics.dec_grid_ray_shooting,
kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, self.ImageNumerics.mask)
return A
|
def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None)
|
computes the linear response matrix (m x n), with n beeing the data size and m being the coefficients
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return:
| 4.18527
| 4.970788
| 0.841973
|
d = self.ImageNumerics.image2array(self.Data.data * self.ImageNumerics.mask)
return d
|
def data_response(self)
|
returns the 1d array of the data element that is fitted for (including masking)
:return: 1d numpy array
| 20.897778
| 15.961881
| 1.30923
|
model_error = self.error_map(kwargs_lens, kwargs_ps)
error_map_1d = self.ImageNumerics.image2array(model_error)
C_D_response = self.ImageNumerics.C_D_response + error_map_1d
return C_D_response, model_error
|
def error_response(self, kwargs_lens, kwargs_ps)
|
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
| 4.889143
| 5.054728
| 0.967242
|
chi2 = self.reduced_residuals(model, error_map)
return np.sum(chi2**2) / self.num_data_evaluate()
|
def reduced_chi2(self, model, error_map=0)
|
returns reduced chi2
:param model:
:param error_map:
:return:
| 5.075542
| 6.194157
| 0.819408
|
args = self.lensParams.setParams(kwargs_lens)
args += self.souceParams.setParams(kwargs_source)
args += self.lensLightParams.setParams(kwargs_lens_light)
args += self.pointSourceParams.setParams(kwargs_ps)
args += self.cosmoParams.setParams(kwargs_cosmo)
return args
|
def kwargs2args(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None, kwargs_cosmo=None)
|
inverse of getParam function
:param kwargs_lens: keyword arguments depending on model options
:param kwargs_source: keyword arguments depending on model options
:return: tuple of parameters
| 2.002854
| 2.139736
| 0.936029
|
kwargs_source_copy = copy.deepcopy(kwargs_source)
for i, kwargs in enumerate(kwargs_source_copy):
if self._image_plane_source_list[i] is True and not image_plane:
if 'center_x' in kwargs:
x_mapped, y_mapped = self._image2SourceMapping.image2source(kwargs['center_x'], kwargs['center_y'],
kwargs_lens, idex_source=i)
kwargs['center_x'] = x_mapped
kwargs['center_y'] = y_mapped
return kwargs_source_copy
|
def image2source_plane(self, kwargs_source, kwargs_lens, image_plane=False)
|
maps the image plane position definition of the source plane
:param kwargs_source:
:param kwargs_lens:
:return:
| 3.201626
| 3.273851
| 0.977939
|
kwargs_lens_updated = copy.deepcopy(kwargs_lens)
if self._mass_scaling is False:
return kwargs_lens_updated
scale_factor_list = np.array(kwargs_cosmo['scale_factor'])
if inverse is True:
scale_factor_list = 1. / np.array(kwargs_cosmo['scale_factor'])
for i, kwargs in enumerate(kwargs_lens_updated):
if self._mass_scaling_list[i] is not False:
scale_factor = scale_factor_list[self._mass_scaling_list[i] - 1]
if 'theta_E' in kwargs:
kwargs['theta_E'] *= scale_factor
elif 'theta_Rs' in kwargs:
kwargs['theta_Rs'] *= scale_factor
elif 'sigma0' in kwargs:
kwargs['sigma0'] *= scale_factor
elif 'k_eff' in kwargs:
kwargs['k_eff'] *= scale_factor
return kwargs_lens_updated
|
def update_lens_scaling(self, kwargs_cosmo, kwargs_lens, inverse=False)
|
multiplies the scaling parameters of the profiles
:param args:
:param kwargs_lens:
:param i:
:param inverse:
:return:
| 2.156718
| 2.175265
| 0.991474
|
if self._solver is True:
image_x, image_y = kwargs_ps[0]['ra_image'], kwargs_ps[0]['dec_image']
image_x, image_y = self.real_image_positions(image_x, image_y, kwargs_cosmo)
dist = self._solver_module.check_solver(image_x, image_y, kwargs_lens)
return np.max(dist)
else:
return 0
|
def check_solver(self, kwargs_lens, kwargs_ps, kwargs_cosmo={})
|
test whether the image positions map back to the same source position
:param kwargs_lens:
:param kwargs_ps:
:return: Euclidean distance between the rayshooting of the image positions
| 3.262689
| 3.165092
| 1.030836
|
num, param_list = self.num_param()
num_linear = self.num_param_linear()
print("The following model options are chosen:")
print("Lens models:", self._lens_model_list)
print("Source models:", self._source_light_model_list)
print("Lens light models:", self._lens_light_model_list)
print("Point source models:", self._point_source_model_list)
print("===================")
print("The following parameters are being fixed:")
print("Lens:", self.lensParams.kwargs_fixed)
print("Source:", self.souceParams.kwargs_fixed)
print("Lens light:", self.lensLightParams.kwargs_fixed)
print("Point source:", self.pointSourceParams.kwargs_fixed)
print("===================")
print("Joint parameters for different models")
print("Joint lens with lens:", self._joint_lens_with_lens)
print("Joint lens with lens light:", self._joint_lens_light_with_lens_light)
print("Joint source with source:", self._joint_source_with_source)
print("Joint lens with light:", self._joint_lens_with_light)
print("Joint source with point source:", self._joint_source_with_point_source)
print("===================")
print("Number of non-linear parameters being sampled: ", num)
print("Parameters being sampled: ", param_list)
print("Number of linear parameters being solved for: ", num_linear)
|
def print_setting(self)
|
prints the setting of the parameter class
:return:
| 2.771905
| 2.734357
| 1.013732
|
rho0 = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs, r_core=r_core)
if Rs < 0.0000001:
Rs = 0.0000001
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
dx, dy = self.coreBurkAlpha(R, Rs, rho0, r_core, x_, y_)
return dx, dy
|
def derivatives(self, x, y, Rs, theta_Rs, r_core, center_x=0, center_y=0)
|
deflection angles
:param x: x coordinate
:param y: y coordinate
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
:param center_x:
:param center_y:
:return:
| 3.425037
| 3.46141
| 0.989492
|
if Rs < 0.0001:
Rs = 0.0001
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
rho0 = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs, r_core=r_core)
kappa = self.density_2d(x_, y_, Rs, rho0, r_core)
gamma1, gamma2 = self.cBurkGamma(R, Rs, rho0, r_core, x_, y_)
f_xx = kappa + gamma1
f_yy = kappa - gamma1
f_xy = gamma2
return f_xx, f_yy, f_xy
|
def hessian(self, x, y, Rs, theta_Rs, r_core, center_x=0, center_y=0)
|
:param x: x coordinate
:param y: y coordinate
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
:param center_x:
:param center_y:
:return:
| 3.154614
| 3.099449
| 1.017798
|
x = R * Rs ** -1
p = Rs * r_core ** -1
gx = self._G(x, p)
m_2d = 2 * np.pi * rho0 * Rs ** 3 * gx
return m_2d
|
def mass_2d(self, R, Rs, rho0, r_core)
|
analytic solution of the projection integral
(convergence)
:param R: projected distance
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
| 4.663474
| 5.217732
| 0.893774
|
x = R * Rs ** -1
p = Rs * r_core ** -1
gx = self._G(x, p)
a = 2 * rho0 * Rs ** 2 * gx / x
return a * ax_x / R, a * ax_y / R
|
def coreBurkAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y)
|
deflection angle
:param R:
:param Rs:
:param rho0:
:param r_core:
:param ax_x:
:param ax_y:
:return:
| 6.569632
| 7.469386
| 0.879541
|
Rs = float(Rs)
b = r_core * Rs ** -1
c = R * Rs ** -1
M0 = 4*np.pi*Rs**3 * rho0
return M0 * (1+b**2) ** -1 * (0.5*np.log(1+c**2) + b**2*np.log(c*b**-1 + 1) - b*np.arctan(c))
|
def mass_3d(self, R, Rs, rho0, r_core)
|
:param R: projected distance
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
| 4.372849
| 4.507954
| 0.97003
|
x = R * Rs ** -1
p = Rs * r_core ** -1
hx = self._H(x, p)
return 2 * rho0 * Rs ** 3 * hx
|
def cBurkPot(self, R, Rs, rho0, r_core)
|
:param R: projected distance
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
| 7.077143
| 6.85438
| 1.032499
|
prefactor = 0.5 * (1 + p ** 2) ** -1 * p
if isinstance(x, np.ndarray):
inds0 = np.where(x * p == 1)
inds1 = np.where(x * p < 1)
inds2 = np.where(x * p > 1)
func = np.ones_like(x)
func[inds0] = self._u(x[inds0]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds0]) ** -1))
func[inds1] = self._u(x[inds1]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds1]) ** -1)) - \
(2 * p * self._g(x[inds1], p) ** -1 * np.arctanh(self._g(x[inds1], p)))
func[inds2] = self._u(x[inds2]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds2]) ** -1)) - \
(2 * p * self._f(x[inds2], p) ** -1 * np.arctan(self._f(x[inds2], p)))
return prefactor * func
else:
if x * p == 1:
func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1))
elif x * p < 1:
func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) - \
(2 * p * self._g(x, p) ** -1 * np.arctanh(self._g(x, p)))
else:
func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) - \
(2 * p * self._f(x, p) ** -1 * np.arctan(self._f(x, p)))
return prefactor * func
|
def _F(self, x, p)
|
solution of the projection integal (kappa)
arctanh / arctan function
:param x: r/Rs
:param p: r_core / Rs
:return:
| 1.55789
| 1.56003
| 0.998628
|
prefactor = (p + p ** 3) ** -1 * p
if isinstance(x, np.ndarray):
inds0 = np.where(x * p == 1)
inds1 = np.where(x * p < 1)
inds2 = np.where(x * p > 1)
func = np.ones_like(x)
func[inds0] = np.log(0.25 * x[inds0] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds0]) - 1) + \
2 * p ** 2 * (self._u(x[inds0]) * np.arctanh(self._u(x[inds0]) ** -1) +
np.log(0.5 * x[inds0]))
func[inds1] = np.log(0.25 * x[inds1] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds1]) - 1) + \
2 * p ** 2 * (self._u(x[inds1]) * np.arctanh(self._u(x[inds1]) ** -1) +
np.log(0.5 * x[inds1])) + 2 * self._g(x[inds1], p) * np.arctanh(
self._g(x[inds1], p))
func[inds2] = np.log(0.25 * x[inds2] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds2]) - 1) + \
2 * p ** 2 * (self._u(x[inds2]) * np.arctanh(self._u(x[inds2]) ** -1) +
np.log(0.5 * x[inds2])) - 2 * self._f(x[inds2], p) * np.arctan(
self._f(x[inds2], p))
else:
if x * p == 1:
func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \
2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) +
np.log(0.5 * x))
elif x * p < 1:
func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \
2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) +
np.log(0.5 * x)) + 2 * self._g(x, p) * np.arctanh(self._g(x, p))
else:
func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \
2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) +
np.log(0.5 * x)) - 2 * self._f(x, p) * np.arctan(self._f(x, p))
return func * prefactor
|
def _G(self, x, p)
|
analytic solution of the 2d projected mass integral
integral: 2 * pi * x * kappa * dx
:param x:
:param p:
:return:
| 1.47926
| 1.475303
| 1.002682
|
A = []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
A_i = self._imageModel_list[i].linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
if A == []:
A = A_i
else:
A = np.append(A, A_i, axis=1)
return A
|
def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None)
|
computes the linear response matrix (m x n), with n beeing the data size and m being the coefficients
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return:
| 2.716209
| 2.924547
| 0.928762
|
d = []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
d_i = self._imageModel_list[i].data_response
if d == []:
d = d_i
else:
d = np.append(d, d_i)
return d
|
def data_response(self)
|
returns the 1d array of the data element that is fitted for (including masking)
:return: 1d numpy array
| 4.996149
| 4.38412
| 1.139601
|
image_list = []
k = 0
for i in range(self._num_bands):
if self._compute_bool[i] is True:
num_data = self.num_response_list[i]
array_i = array[k:k + num_data]
image_i = self._imageModel_list[i].ImageNumerics.array2image(array_i)
image_list.append(image_i)
k += num_data
return image_list
|
def _array2image_list(self, array)
|
maps 1d vector of joint exposures in list of 2d images of single exposures
:param array: 1d numpy array
:return: list of 2d numpy arrays of size of exposures
| 4.139485
| 4.450428
| 0.930132
|
# generate image
im_sim_list, model_error_list, cov_matrix, param = self.image_linear_solve(kwargs_lens, kwargs_source,
kwargs_lens_light, kwargs_ps,
inv_bool=source_marg)
# compute X^2
logL = 0
index = 0
for i in range(self._num_bands):
if self._compute_bool[i] is True:
logL += self._imageModel_list[i].Data.log_likelihood(im_sim_list[index], self._imageModel_list[i].ImageNumerics.mask, model_error_list[index])
index += 1
if cov_matrix is not None and source_marg:
marg_const = de_lens.marginalisation_const(cov_matrix)
logL += marg_const
return logL
|
def likelihood_data_given_model(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg=False)
|
computes the likelihood of the data given a model
This is specified with the non-linear parameters and a linear inversion and prior marginalisation.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images)
| 4.637409
| 4.525506
| 1.024727
|
wls_list, error_map_list, cov_param_list, param_list = [], [], [], []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
kwargs_source_i = [kwargs_source[k] for k in self._index_source_list[i]]
kwargs_lens_light_i = [kwargs_lens_light[k] for k in self._index_lens_light_list[i]]
wls_model, error_map, cov_param, param = self._imageModel_list[i].image_linear_solve(kwargs_lens,
kwargs_source_i,
kwargs_lens_light_i,
kwargs_ps,
inv_bool=inv_bool)
wls_list.append(wls_model)
error_map_list.append(error_map)
cov_param_list.append(cov_param)
param_list.append(param)
return wls_list, error_map_list, cov_param_list, param_list
|
def image_linear_solve(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, inv_bool=False)
|
computes the image (lens and source surface brightness with a given lens model).
The linear parameters are computed with a weighted linear least square optimization (i.e. flux normalization of the brightness profiles)
:param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles
:param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles
:param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles
:param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions
:param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix.
:return: 1d array of surface brightness pixels of the optimal solution of the linear parameters to match the data
| 1.94082
| 1.969807
| 0.985284
|
# generate image
logL = 0
for i in range(self._num_bands):
if self._compute_bool[i] is True:
kwargs_source_i = [kwargs_source[k] for k in self._index_source_list[i]]
kwargs_lens_light_i = [kwargs_lens_light[k] for k in self._index_lens_light_list[i]]
logL += self._imageModel_list[i].likelihood_data_given_model(kwargs_lens, kwargs_source_i,
kwargs_lens_light_i, kwargs_ps,
source_marg=source_marg)
return logL
|
def likelihood_data_given_model(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg=False)
|
computes the likelihood of the data given a model
This is specified with the non-linear parameters and a linear inversion and prior marginalisation.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images)
| 2.363994
| 2.417532
| 0.977854
|
source_x, source_y = self._lensModel.ray_shooting(image_x, image_y, kwargs_lens)
dist = np.sqrt((source_x - source_x[0]) ** 2 + (source_y - source_y[0]) ** 2)
return dist
|
def check_solver(self, image_x, image_y, kwargs_lens)
|
returns the precision of the solver to match the image position
:param kwargs_lens: full lens model (including solved parameters)
:param image_x: point source in image
:param image_y: point source in image
:return: precision of Euclidean distances between the different rays arriving at the image positions
| 2.103826
| 2.263464
| 0.929472
|
kwargs_fixed_lens = self._solver.add_fixed_lens(kwargs_fixed_lens, kwargs_lens_init)
return kwargs_fixed_lens
|
def add_fixed_lens(self, kwargs_fixed_lens, kwargs_lens_init)
|
returns kwargs that are kept fixed during run, depending on options
:param kwargs_options:
:param kwargs_lens:
:return:
| 2.809613
| 3.119948
| 0.900532
|
if not hasattr(self, '_Epsilon_Crit'):
const_SI = const.c ** 2 / (4 * np.pi * const.G) #c^2/(4*pi*G) in units of [kg/m]
conversion = const.Mpc / const.M_sun # converts [kg/m] to [M_sun/Mpc]
factor = const_SI*conversion #c^2/(4*pi*G) in units of [M_sun/Mpc]
self._Epsilon_Crit = self.D_s/(self.D_d*self.D_ds) * factor #[M_sun/Mpc^2]
return self._Epsilon_Crit
|
def epsilon_crit(self)
|
returns the critical projected mass density in units of M_sun/Mpc^2 (physical units)
:return: critical projected mass density
| 4.464348
| 3.95301
| 1.129354
|
mass = self.arcsec2phys_lens(theta_E) ** 2 * np.pi * self.epsilon_crit
return mass
|
def mass_in_theta_E(self, theta_E)
|
mass within Einstein radius (area * epsilon crit) [M_sun]
:param theta_E: Einstein radius [arcsec]
:return: mass within Einstein radius [M_sun]
| 8.545235
| 7.714655
| 1.107663
|
Rs = Rs_angle * const.arcsec * self.D_d
theta_scaled = theta_Rs * self.epsilon_crit * self.D_d * const.arcsec
rho0 = theta_scaled / (4 * Rs ** 2 * (1 + np.log(1. / 2.)))
rho0_com = rho0 / self.h**2 * self.a_z(self.z_lens)**3
c = self.nfw_param.c_rho0(rho0_com)
r200 = c * Rs
M200 = self.nfw_param.M_r200(r200 * self.h / self.a_z(self.z_lens)) / self.h
return rho0, Rs, c, r200, M200
|
def nfw_angle2physical(self, Rs_angle, theta_Rs)
|
converts the angular parameters into the physical ones for an NFW profile
:param theta_Rs: observed bending angle at the scale radius in units of arcsec
:param Rs: scale radius in units of arcsec
:return: M200, r200, Rs_physical, c
| 4.476482
| 4.383677
| 1.02117
|
rho0, Rs, r200 = self.nfwParam_physical(M, c)
Rs_angle = Rs / self.D_d / const.arcsec # Rs in arcsec
theta_Rs = rho0 * (4 * Rs ** 2 * (1 + np.log(1. / 2.)))
return Rs_angle, theta_Rs / self.epsilon_crit / self.D_d / const.arcsec
|
def nfw_physical2angle(self, M, c)
|
converts the physical mass and concentration parameter of an NFW profile into the lensing quantities
:param M: mass enclosed 200 rho_crit in units of M_sun
:param c: NFW concentration parameter (r200/r_s)
:return: theta_Rs (observed bending angle at the scale radius, Rs_angle (angle at scale radius) (in units of arcsec)
| 6.860722
| 6.154288
| 1.114787
|
r200 = self.nfw_param.r200_M(M * self.h) / self.h * self.a_z(self.z_lens) # physical radius r200
rho0 = self.nfw_param.rho0_c(c) * self.h**2 / self.a_z(self.z_lens)**3 # physical density in M_sun/Mpc**3
Rs = r200/c
return rho0, Rs, r200
|
def nfwParam_physical(self, M, c)
|
returns the NFW parameters in physical units
:param M: physical mass in M_sun
:param c: concentration
:return:
| 3.995978
| 3.8558
| 1.036355
|
v_sigma_c2 = theta_E * const.arcsec / (4*np.pi) * self.D_s / self.D_ds
return np.sqrt(v_sigma_c2)*const.c / 1000
|
def sis_theta_E2sigma_v(self, theta_E)
|
converts the lensing Einstein radius into a physical velocity dispersion
:param theta_E: Einstein radius (in arcsec)
:return: velocity dispersion in units (km/s)
| 5.614827
| 5.822703
| 0.964299
|
theta_E = 4 * np.pi * (v_sigma * 1000./const.c)**2 * self.D_ds / self.D_s / const.arcsec
return theta_E
|
def sis_sigma_v2theta_E(self, v_sigma)
|
converts the velocity dispersion into an Einstein radius for a SIS profile
:param v_sigma: velocity dispersion (km/s)
:return: theta_E (arcsec)
| 6.495914
| 6.223675
| 1.043742
|
lensCosmo = self._get_cosom(H_0, Om0, Ode0)
return lensCosmo.D_d
|
def D_d(self, H_0, Om0, Ode0=None)
|
angular diameter to deflector
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
| 6.099614
| 7.422866
| 0.821733
|
lensCosmo = self._get_cosom(H_0, Om0, Ode0)
return lensCosmo.D_s
|
def D_s(self, H_0, Om0, Ode0=None)
|
angular diameter to source
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
| 6.058127
| 7.389849
| 0.81979
|
lensCosmo = self._get_cosom(H_0, Om0, Ode0)
return lensCosmo.D_ds
|
def D_ds(self, H_0, Om0, Ode0=None)
|
angular diameter from deflector to source
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
| 6.036915
| 7.236716
| 0.834206
|
lensCosmo = self._get_cosom(H_0, Om0, Ode0)
return lensCosmo.D_dt
|
def D_dt(self, H_0, Om0, Ode0=None)
|
time delay distance
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
| 6.277036
| 7.365797
| 0.852187
|
shapelets = self._createShapelet(coeffs)
n_order = self._get_num_n(len(coeffs))
dx_shapelets = self._dx_shapelets(shapelets, beta)
dy_shapelets = self._dy_shapelets(shapelets, beta)
n = len(np.atleast_1d(x))
if n <= 1:
f_x = self._shapeletOutput(x, y, beta, dx_shapelets, precalc=False)
f_y = self._shapeletOutput(x, y, beta, dy_shapelets, precalc=False)
else:
H_x, H_y = self.pre_calc(x, y, beta, n_order+1, center_x, center_y)
f_x = self._shapeletOutput(H_x, H_y, beta, dx_shapelets)
f_y = self._shapeletOutput(H_x, H_y, beta, dy_shapelets)
return f_x, f_y
|
def derivatives(self, x, y, coeffs, beta, center_x=0, center_y=0)
|
returns df/dx and df/dy of the function
| 2.889927
| 2.810729
| 1.028177
|
shapelets = self._createShapelet(coeffs)
n_order = self._get_num_n(len(coeffs))
dxx_shapelets = self._dxx_shapelets(shapelets, beta)
dyy_shapelets = self._dyy_shapelets(shapelets, beta)
dxy_shapelets = self._dxy_shapelets(shapelets, beta)
n = len(np.atleast_1d(x))
if n <= 1:
f_xx = self._shapeletOutput(x, y, beta, dxx_shapelets, precalc=False)
f_yy = self._shapeletOutput(x, y, beta, dyy_shapelets, precalc=False)
f_xy = self._shapeletOutput(x, y, beta, dxy_shapelets, precalc=False)
else:
H_x, H_y = self.pre_calc(x, y, beta, n_order+2, center_x, center_y)
f_xx = self._shapeletOutput(H_x, H_y, beta, dxx_shapelets)
f_yy = self._shapeletOutput(H_x, H_y, beta, dyy_shapelets)
f_xy = self._shapeletOutput(H_x, H_y, beta, dxy_shapelets)
return f_xx, f_yy, f_xy
|
def hessian(self, x, y, coeffs, beta, center_x=0, center_y=0)
|
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
| 2.233428
| 2.210883
| 1.010197
|
n_coeffs = len(coeffs)
num_n = self._get_num_n(n_coeffs)
shapelets=np.zeros((num_n+1, num_n+1))
n = 0
k = 0
for coeff in coeffs:
shapelets[n-k][k] = coeff
k += 1
if k == n + 1:
n += 1
k = 0
return shapelets
|
def _createShapelet(self, coeffs)
|
returns a shapelet array out of the coefficients *a, up to order l
:param num_l: order of shapelets
:type num_l: int.
:param coeff: shapelet coefficients
:type coeff: floats
:returns: complex array
:raises: AttributeError, KeyError
| 2.883511
| 3.003345
| 0.9601
|
n = len(np.atleast_1d(x))
if n <= 1:
values = 0.
else:
values = np.zeros(len(x[0]))
n = 0
k = 0
i = 0
num_n = len(shapelets)
while i < num_n * (num_n+1)/2:
values += self._function(x, y, shapelets[n-k][k], beta, n-k, k, precalc=precalc)
k += 1
if k == n + 1:
n += 1
k = 0
i += 1
return values
|
def _shapeletOutput(self, x, y, beta, shapelets, precalc=True)
|
returns the the numerical values of a set of shapelets at polar coordinates
:param shapelets: set of shapelets [l=,r=,a_lr=]
:type shapelets: array of size (n,3)
:param coordPolar: set of coordinates in polar units
:type coordPolar: array of size (n,2)
:returns: array of same size with coords [r,phi]
:raises: AttributeError, KeyError
| 3.495043
| 3.890432
| 0.898369
|
num_n = len(shapelets)
dx = np.zeros((num_n+1, num_n+1))
for n1 in range(num_n):
for n2 in range(num_n):
amp = shapelets[n1][n2]
dx[n1+1][n2] -= np.sqrt((n1+1)/2.) * amp
if n1 > 0:
dx[n1-1][n2] += np.sqrt(n1/2.) * amp
return dx/beta
|
def _dx_shapelets(self, shapelets, beta)
|
computes the derivative d/dx of the shapelet coeffs
:param shapelets:
:param beta:
:return:
| 2.839108
| 2.844248
| 0.998193
|
num_n = len(shapelets)
dy = np.zeros((num_n+1, num_n+1))
for n1 in range(num_n):
for n2 in range(num_n):
amp = shapelets[n1][n2]
dy[n1][n2+1] -= np.sqrt((n2+1)/2.) * amp
if n2 > 0:
dy[n1][n2-1] += np.sqrt(n2/2.) * amp
return dy/beta
|
def _dy_shapelets(self, shapelets, beta)
|
computes the derivative d/dx of the shapelet coeffs
:param shapelets:
:param beta:
:return:
| 2.761715
| 2.757558
| 1.001508
|
n_array = np.zeros(n+1)
n_array[n] = 1
return hermite.hermval(x, n_array, tensor=False)
|
def H_n(self, n, x)
|
constructs the Hermite polynomial of order n at position x (dimensionless)
:param n: The n'the basis function.
:type name: int.
:param x: 1-dim position (dimensionless)
:type state: float or numpy array.
:returns: array-- H_n(x).
:raises: AttributeError, KeyError
| 4.747534
| 5.565606
| 0.853013
|
prefactor = 1./np.sqrt(2**n*np.sqrt(np.pi)*math.factorial(n))
return prefactor*self.H_n(n,x)*np.exp(-x**2/2.)
|
def phi_n(self,n,x)
|
constructs the 1-dim basis function (formula (1) in Refregier et al. 2001)
:param n: The n'the basis function.
:type name: int.
:param x: 1-dim position (dimensionless)
:type state: float or numpy array.
:returns: array-- phi_n(x).
:raises: AttributeError, KeyError
| 3.444309
| 4.112904
| 0.83744
|
lens_model = self._lens_mode_list[0]
if self._solver_type == 'CENTER':
center_x = kwargs_list[0]['center_x']
center_y = kwargs_list[0]['center_y']
x = [center_x, center_y]
elif self._solver_type == 'ELLIPSE':
e1 = kwargs_list[0]['e1']
e2 = kwargs_list[0]['e2']
x = [e1, e2]
elif self._solver_type == 'SHAPELETS':
coeffs = list(kwargs_list[0]['coeffs'])
[c10, c01] = coeffs[1: 3]
x = [c10, c01]
elif self._solver_type == 'THETA_E_PHI':
theta_E = kwargs_list[0]['theta_E']
e1 = kwargs_list[1]['e1']
e2 = kwargs_list[1]['e2']
phi_ext, gamma_ext = param_util.ellipticity2phi_gamma(e1, e2)
x = [theta_E, phi_ext]
else:
raise ValueError("Solver type %s not supported for 2-point solver!" % self._solver_type)
return x
|
def _extract_array(self, kwargs_list)
|
inverse of _update_kwargs
:param kwargs_list:
:return:
| 2.58854
| 2.582156
| 1.002472
|
if self._background_noise is None:
return data_util.bkg_noise(self.read_noise, self._exposure_time, self.sky_brightness, self.pixel_scale,
num_exposures=self._num_exposures)
else:
return self._background_noise
|
def background_noise(self)
|
Gaussian sigma of noise level per pixel (in counts per second)
:return: sqrt(variance) of background noise level
| 4.273021
| 4.669784
| 0.915036
|
if self._data_count_unit == 'ADU':
exp_time = self.ccd_gain * self.exposure_time
else:
exp_time = self.exposure_time
return exp_time
|
def scaled_exposure_time(self)
|
scaled "effective" exposure time of IID counts. This can be used by lenstronomy to estimate the Poisson errors
keeping the assumption that the counts are IIDs (even if they are not).
:return: scaled exposure time
| 3.977553
| 4.030285
| 0.986916
|
# compute counts in units of ADS (as magnitude zero point is defined)
cps = data_util.magnitude2cps(magnitude, magnitude_zero_point=self._magnitude_zero_point)
if self._data_count_unit == 'e-':
cps *= self.ccd_gain
return cps
|
def magnitude2cps(self, magnitude)
|
converts an apparent magnitude to counts per second (in units of the data)
The zero point of an instrument, by definition, is the magnitude of an object that produces one count
(or data number, DN) per second. The magnitude of an arbitrary object producing DN counts in an observation of
length EXPTIME is therefore:
m = -2.5 x log10(DN / EXPTIME) + ZEROPOINT
:param magnitude: magnitude of object
:return: counts per second of object
| 10.503045
| 9.673745
| 1.085727
|
Ra, Rs = self._sort_ra_rs(Ra, Rs)
rho = rho0 / ((1 + (r / Ra) ** 2) * (1 + (r / Rs) ** 2))
return rho
|
def density(self, r, rho0, Ra, Rs)
|
computes the density
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:return:
| 3.363724
| 3.935128
| 0.854794
|
Ra, Rs = self._sort_ra_rs(Ra, Rs)
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
sigma0 = self.rho2sigma(rho0, Ra, Rs)
sigma = sigma0 * Ra * Rs / (Rs - Ra) * (1 / np.sqrt(Ra ** 2 + r ** 2) - 1 / np.sqrt(Rs ** 2 + r ** 2))
return sigma
|
def density_2d(self, x, y, rho0, Ra, Rs, center_x=0, center_y=0)
|
projected density
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:param center_x:
:param center_y:
:return:
| 2.817644
| 3.016779
| 0.933991
|
m_3d = 4 * np.pi * rho0 * Ra ** 2 * Rs ** 2 / (Rs ** 2 - Ra ** 2) * (Rs * np.arctan(r / Rs) - Ra * np.arctan(r / Ra))
return m_3d
|
def mass_3d(self, r, rho0, Ra, Rs)
|
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:return:
| 2.896625
| 3.249695
| 0.891353
|
Ra, Rs = self._sort_ra_rs(Ra, Rs)
sigma0 = self.rho2sigma(rho0, Ra, Rs)
m_2d = 2 * np.pi * sigma0 * Ra * Rs / (Rs - Ra) * (np.sqrt(Ra ** 2 + r ** 2) - Ra - np.sqrt(Rs ** 2 + r ** 2) + Rs)
return m_2d
|
def mass_2d(self, r, rho0, Ra, Rs)
|
mass enclosed projected 2d sphere of radius r
:param r:
:param rho0:
:param Ra:
:param Rs:
:return:
| 3.50246
| 3.599039
| 0.973165
|
Ra, Rs = self._sort_ra_rs(Ra, Rs)
sigma0 = self.rho2sigma(rho0, Ra, Rs)
m_tot = 2 * np.pi * sigma0 * Ra * Rs
return m_tot
|
def mass_tot(self, rho0, Ra, Rs)
|
total mass within the profile
:param rho0:
:param Ra:
:param Rs:
:return:
| 3.959361
| 4.327614
| 0.914906
|
Ra, Rs = self._sort_ra_rs(Ra, Rs)
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
pot = 4 * np.pi * rho0 * Ra ** 2 * Rs ** 2 / (Rs ** 2 - Ra ** 2) * (Rs / r * np.arctan(r / Rs) - Ra / r * np.arctan(r / Ra)
+ 1. / 2 * np.log((Rs ** 2 + r ** 2) / (Ra ** 2 + r ** 2)))
return pot
|
def grav_pot(self, x, y, rho0, Ra, Rs, center_x=0, center_y=0)
|
gravitational potential (modulo 4 pi G and rho0 in appropriate units)
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:param center_x:
:param center_y:
:return:
| 2.831388
| 2.933557
| 0.965172
|
return r_a/(1+np.sqrt(1 + r_a**2)) - r_s/(1+np.sqrt(1 + r_s**2))
|
def _f_A20(self, r_a, r_s)
|
equation A20 in Eliasdottir (2013)
:param r_a: r/Ra
:param r_s: r/Rs
:return:
| 3.133049
| 3.629546
| 0.863207
|
return np.pi * rho0 * Ra * Rs / (Rs + Ra)
|
def rho2sigma(self, rho0, Ra, Rs)
|
converts 3d density into 2d projected density parameter
:param rho0:
:param Ra:
:param Rs:
:return:
| 7.505322
| 9.38016
| 0.800127
|
imageData = np.array(inputArray, copy=True)
if scale_min is None:
scale_min = imageData.min()
if scale_max is None:
scale_max = imageData.max()
imageData = imageData.clip(min=scale_min, max=scale_max)
imageData = imageData - scale_min
indices = np.where(imageData < 0)
imageData[indices] = 0.0
imageData = np.sqrt(imageData)
imageData = imageData / math.sqrt(scale_max - scale_min)
return imageData
|
def sqrt(inputArray, scale_min=None, scale_max=None)
|
Performs sqrt scaling of the input numpy array.
@type inputArray: numpy array
@param inputArray: image data array
@type scale_min: float
@param scale_min: minimum data value
@type scale_max: float
@param scale_max: maximum data value
@rtype: numpy array
@return: image data array
| 1.975582
| 2.031999
| 0.972236
|
#extract parameters
kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo = self.param.args2kwargs(args)
#generate image and computes likelihood
self._reset_point_source_cache(bool=True)
logL = 0
if self._check_bounds is True:
penalty, bound_hit = self.check_bounds(args, self._lower_limit, self._upper_limit)
logL -= penalty
if bound_hit:
return logL, None
if self._image_likelihood is True:
logL += self.image_likelihood.logL(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
if self._time_delay_likelihood is True:
logL += self.time_delay_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_cosmo)
if self._check_positive_flux is True:
bool = self.param.check_positive_flux(kwargs_source, kwargs_lens_light, kwargs_ps)
if bool is False:
logL -= 10**10
if self._flux_ratio_likelihood is True:
logL += self.flux_ratio_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_cosmo)
logL += self._position_likelihood.logL(kwargs_lens, kwargs_ps, kwargs_cosmo)
logL += self._prior_likelihood.logL(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo)
self._reset_point_source_cache(bool=False)
return logL, None
|
def logL(self, args)
|
routine to compute X2 given variable parameters for a MCMC/PSO chain
| 2.667423
| 2.68378
| 0.993905
|
penalty = 0
bound_hit = False
for i in range(0, len(args)):
if args[i] < lowerLimit[i] or args[i] > upperLimit[i]:
penalty = 10**15
bound_hit = True
return penalty, bound_hit
|
def check_bounds(args, lowerLimit, upperLimit)
|
checks whether the parameter vector has left its bound, if so, adds a big number
| 2.848909
| 2.605716
| 1.09333
|
num_linear = 0
if self._image_likelihood is True:
num_linear = self.image_likelihood.num_param_linear(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
num_param, _ = self.param.num_param()
return self.num_data - num_param - num_linear
|
def effectiv_num_data_points(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
|
returns the effective number of data points considered in the X2 estimation to compute the reduced X2 value
| 3.854848
| 3.680401
| 1.047399
|
if self._multi_source_plane is False:
x_source, y_source = self._lensModel.ray_shooting(x, y, kwargs_lens)
else:
if self._multi_lens_plane is False:
x_alpha, y_alpha = self._lensModel.alpha(x, y, kwargs_lens)
scale_factor = self._deflection_scaling_list[idex_source]
x_source = x - x_alpha * scale_factor
y_source = y - y_alpha * scale_factor
else:
z_stop = self._source_redshift_list[idex_source]
x_comov, y_comov, alpha_x, alpha_y = self._lensModel.lens_model.ray_shooting_partial(0, 0, x, y,
0, z_stop,
kwargs_lens,
keep_range=False,
include_z_start=False)
T_z = self._bkg_cosmo.T_xy(0, z_stop)
x_source = x_comov / T_z
y_source = y_comov / T_z
return x_source, y_source
|
def image2source(self, x, y, kwargs_lens, idex_source)
|
mapping of image plane to source plane coordinates
WARNING: for multi lens plane computations and multi source planes, this computation can be slow and should be
used as rarely as possible.
:param x: image plane coordinate
:param y: image plane coordinate
:param kwargs_lens: lens model kwargs list
:param idex_source: int, index of source model
:return: source plane coordinate corresponding to the source model of index idex_source
| 3.02999
| 3.011349
| 1.00619
|
alpha_ra, alpha_dec = self.derivatives(x, y, amp, sigma, e1, e2, center_x, center_y)
diff = self._diff
alpha_ra_dx, alpha_dec_dx = self.derivatives(x + diff, y, amp, sigma, e1, e2, center_x, center_y)
alpha_ra_dy, alpha_dec_dy = self.derivatives(x, y + diff, amp, sigma, e1, e2, center_x, center_y)
f_xx = (alpha_ra_dx - alpha_ra) / diff
f_xy = (alpha_ra_dy - alpha_ra) / diff
# f_yx = (alpha_dec_dx - alpha_dec)/diff
f_yy = (alpha_dec_dy - alpha_dec) / diff
return f_xx, f_yy, f_xy
|
def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0)
|
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
| 1.76195
| 1.745195
| 1.009601
|
sigma_s2_sum = 0
for i in range(0, num):
sigma_s2_draw = self._vel_disp_one(kwargs_profile, kwargs_aperture, kwargs_light, kwargs_anisotropy)
sigma_s2_sum += sigma_s2_draw
sigma_s2_average = sigma_s2_sum/num
return np.sqrt(sigma_s2_average)
|
def vel_disp(self, kwargs_profile, kwargs_aperture, kwargs_light, kwargs_anisotropy, num=1000)
|
computes the averaged LOS velocity dispersion in the slit (convolved)
:param gamma:
:param phi_E:
:param r_eff:
:param r_ani:
:param R_slit:
:param FWHM:
:return:
| 2.179057
| 2.446546
| 0.890667
|
while True:
r = self.lightProfile.draw_light(kwargs_light) # draw r
R, x, y = util.R_r(r) # draw projected R
x_, y_ = util.displace_PSF(x, y, self.FWHM) # displace via PSF
bool = self.aperture.aperture_select(x_, y_, kwargs_aperture)
if bool is True:
break
sigma_s2 = self.sigma_s2(r, R, kwargs_profile, kwargs_anisotropy, kwargs_light)
return sigma_s2
|
def _vel_disp_one(self, kwargs_profile, kwargs_aperture, kwargs_light, kwargs_anisotropy)
|
computes one realisation of the velocity dispersion realized in the slit
:param gamma:
:param rho0_r0_gamma:
:param r_eff:
:param r_ani:
:param R_slit:
:param dR_slit:
:param FWHM:
:return:
| 5.475137
| 5.641561
| 0.9705
|
beta = self.anisotropy.beta_r(r, kwargs_anisotropy)
return (1 - beta * R**2/r**2) * self.sigma_r2(r, kwargs_profile, kwargs_anisotropy, kwargs_light)
|
def sigma_s2(self, r, R, kwargs_profile, kwargs_anisotropy, kwargs_light)
|
projected velocity dispersion
:param r:
:param R:
:param r_ani:
:param a:
:param gamma:
:param phi_E:
:return:
| 3.400757
| 3.595625
| 0.945804
|
return self.jeans_solver.sigma_r2(r, kwargs_profile, kwargs_anisotropy, kwargs_light)
|
def sigma_r2(self, r, kwargs_profile, kwargs_anisotropy, kwargs_light)
|
computes radial velocity dispersion at radius r (solving the Jeans equation
:param r:
:return:
| 3.698301
| 3.719888
| 0.994197
|
# first term
theta_E = kwargs_profile['theta_E']
gamma = kwargs_profile['gamma']
r_ani = kwargs_anisotropy['r_ani']
a = 0.551 * kwargs_light['r_eff']
rho0_r0_gamma = self._rho0_r0_gamma(theta_E, gamma)
prefac1 = 4*np.pi * const.G * a**(-gamma) * rho0_r0_gamma / (3-gamma)
prefac2 = r * (r + a)**3/(r**2 + r_ani**2)
hyp1 = vel_util.hyp_2F1(a=2+gamma, b=gamma, c=3+gamma, z=1./(1+r/a))
hyp2 = vel_util.hyp_2F1(a=3, b=gamma, c=1+gamma, z=-a/r)
fac = r_ani**2/a**2 * hyp1 / ((2+gamma) * (r/a + 1)**(2+gamma)) + hyp2 / (gamma*(r/a)**gamma)
sigma2_dim_less = prefac1 * prefac2 * fac
return sigma2_dim_less * (self.cosmo.arcsec2phys_lens(1.) * const.Mpc / 1000)**2
|
def power_law_anisotropy(self, r, kwargs_profile, kwargs_anisotropy, kwargs_light)
|
equation (19) in Suyu+ 2010
:param r:
:return:
| 4.435073
| 4.429794
| 1.001192
|
if self._mass_profile == 'power_law':
if self._anisotropy_type == 'r_ani':
if self._light_profile == 'Hernquist':
sigma_r = self.power_law_anisotropy(r, kwargs_profile, kwargs_anisotropy, kwargs_light)
else:
raise ValueError('light profile %s not supported for Jeans solver' % self._light_profile)
else:
raise ValueError('anisotropy type %s not implemented in Jeans equation modelling' % self._anisotropy_type)
else:
raise ValueError('mass profile type %s not implemented in Jeans solver' % self._mass_profile)
return sigma_r
|
def sigma_r2(self, r, kwargs_profile, kwargs_anisotropy, kwargs_light)
|
solves radial Jeans equation
| 3.134186
| 2.85741
| 1.096862
|
if self._aperture_type == 'shell':
bool_list = self.shell_select(ra, dec, **kwargs_aperture)
elif self._aperture_type == 'slit':
bool_list = self.slit_select(ra, dec, **kwargs_aperture)
else:
raise ValueError("aperture type %s not implemented!" % self._aperture_type)
return bool_list
|
def aperture_select(self, ra, dec, kwargs_aperture)
|
returns a bool list if the coordinate is within the aperture (list)
:param ra:
:param dec:
:return:
| 2.207493
| 2.156239
| 1.02377
|
wls_list, error_map_list, cov_param_list, param_list = [], [], [], []
for i in range(self._num_bands):
wls_model, error_map, cov_param, param = self._imageModel_list[i].image_linear_solve(kwargs_lens,
kwargs_source,
kwargs_lens_light,
kwargs_else,
inv_bool=inv_bool)
wls_list.append(wls_model)
error_map_list.append(error_map)
cov_param_list.append(cov_param)
param_list.append(param)
return wls_list, error_map_list, cov_param_list, param_list
|
def image_linear_solve(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_else, inv_bool=False)
|
computes the image (lens and source surface brightness with a given lens model).
The linear parameters are computed with a weighted linear least square optimization (i.e. flux normalization of the brightness profiles)
:param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles
:param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles
:param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles
:param kwargs_else: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions
:param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix.
:return: 1d array of surface brightness pixels of the optimal solution of the linear parameters to match the data
| 1.929742
| 2.018726
| 0.955921
|
# generate image
logL = 0
for i in range(self._num_bands):
if self._compute_bool[i] is True:
logL += self._imageModel_list[i].likelihood_data_given_model(kwargs_lens, kwargs_source,
kwargs_lens_light, kwargs_ps,
source_marg=source_marg)
return logL
|
def likelihood_data_given_model(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg=False)
|
computes the likelihood of the data given a model
This is specified with the non-linear parameters and a linear inversion and prior marginalisation.
:param kwargs_lens:
:param kwargs_source:
:param kwargs_lens_light:
:param kwargs_ps:
:return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images)
| 3.074005
| 3.199445
| 0.960793
|
A = self.linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
C_D_response, model_error_list = self.error_response(kwargs_lens, kwargs_ps)
d = self.data_response
param, cov_param, wls_model = de_lens.get_param_WLS(A.T, 1 / C_D_response, d, inv_bool=inv_bool)
kwargs_lens_0 = [kwargs_lens[k] for k in self._idex_lens_list[0]]
_, _, _, _ = self._imageModel_list[0]._update_linear_kwargs(param, kwargs_lens_0, kwargs_source, kwargs_lens_light, kwargs_ps)
wls_list = self._array2image_list(wls_model)
return wls_list, model_error_list, cov_param, param
|
def image_linear_solve(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, inv_bool=False)
|
computes the image (lens and source surface brightness with a given lens model).
The linear parameters are computed with a weighted linear least square optimization (i.e. flux normalization of the brightness profiles)
:param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles
:param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles
:param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles
:param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions
:param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix.
:return: 1d array of surface brightness pixels of the optimal solution of the linear parameters to match the data
| 4.890826
| 5.011773
| 0.975867
|
C_D_response, model_error = [], []
for i in range(self._num_bands):
if self._compute_bool[i] is True:
kwargs_lens_i = [kwargs_lens[k] for k in self._idex_lens_list[i]]
C_D_response_i, model_error_i = self._imageModel_list[i].error_response(kwargs_lens_i, kwargs_ps)
model_error.append(model_error_i)
if C_D_response == []:
C_D_response = C_D_response_i
else:
C_D_response = np.append(C_D_response, C_D_response_i)
return C_D_response, model_error
|
def error_response(self, kwargs_lens, kwargs_ps)
|
returns the 1d array of the error estimate corresponding to the data response
:return: 1d numpy array of response, 2d array of additonal errors (e.g. point source uncertainties)
| 2.588604
| 2.630931
| 0.983912
|
c = amp/(2*np.pi*sigma_x*sigma_y)
delta_x = x - center_x
delta_y = y - center_y
exponent = -((delta_x/sigma_x)**2+(delta_y/sigma_y)**2)/2.
return c * np.exp(exponent)
|
def function(self, x, y, amp, sigma_x, sigma_y, center_x=0, center_y=0)
|
returns Gaussian
| 2.225957
| 2.303566
| 0.966309
|
f_ = self.function(x, y, amp, sigma_x, sigma_y, center_x, center_y)
return f_ * (center_x-x)/sigma_x**2, f_ * (center_y-y)/sigma_y**2
|
def derivatives(self, x, y, amp, sigma_x, sigma_y, center_x=0, center_y=0)
|
returns df/dx and df/dy of the function
| 2.2167
| 2.172975
| 1.020122
|
f_ = self.function(x, y, amp, sigma_x, sigma_y, center_x, center_y)
f_xx = f_ * ( (-1./sigma_x**2) + (center_x-x)**2/sigma_x**4 )
f_yy = f_ * ( (-1./sigma_y**2) + (center_y-y)**2/sigma_y**4 )
f_xy = f_ * (center_x-x)/sigma_x**2 * (center_y-y)/sigma_y**2
return f_xx, f_yy, f_xy
|
def hessian(self, x, y, amp, sigma_x, sigma_y, center_x = 0, center_y = 0)
|
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
| 1.848102
| 1.838537
| 1.005203
|
return 4*np.pi*rho0*Rs**3*(np.log(1.+c)-c/(1.+c))
|
def M200(self, Rs, rho0, c)
|
M(R_200) calculation for NFW profile
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param c: concentration
:type c: float [4,40]
:return: M(R_200) density
| 4.630567
| 5.399677
| 0.857564
|
return 200./3*self.rhoc*c**3/(np.log(1.+c)-c/(1.+c))
|
def rho0_c(self, c)
|
computes density normalization as a function of concentration parameter
:return: density normalization in h^2/Mpc^3 (comoving)
| 8.013542
| 7.839578
| 1.02219
|
if not hasattr(self, '_c_rho0_interp'):
c_array = np.linspace(0.1, 10, 100)
rho0_array = self.rho0_c(c_array)
from scipy import interpolate
self._c_rho0_interp = interpolate.InterpolatedUnivariateSpline(rho0_array, c_array, w=None, bbox=[None, None], k=3)
return self._c_rho0_interp(rho0)
|
def c_rho0(self, rho0)
|
computes the concentration given a comoving overdensity rho0 (inverse of function rho0_c)
:param rho0: density normalization in h^2/Mpc^3 (comoving)
:return: concentration parameter c
| 2.6278
| 2.489365
| 1.055611
|
# fitted parameter values
A = 5.22
B = -0.072
C = -0.42
M_pivot = 2.*10**12
return A*(M/M_pivot)**B*(1+z)**C
|
def c_M_z(self, M, z)
|
fitting function of http://moriond.in2p3.fr/J08/proceedings/duffy.pdf for the mass and redshift dependence of the concentration parameter
:param M: halo mass in M_sun/h
:type M: float or numpy array
:param z: redshift
:type z: float >0
:return: concentration parameter as float
| 8.083947
| 7.078096
| 1.142108
|
c = self.c_M_z(M, z)
r200 = self.r200_M(M)
rho0 = self.rho0_c(c)
Rs = r200/c
return r200, rho0, c, Rs
|
def profileMain(self, M, z)
|
returns all needed parameter (in comoving units modulo h) to draw the profile of the main halo
r200 in co-moving Mpc/h
rho_s in h^2/Mpc^3 (co-moving)
Rs in Mpc/h co-moving
c unit less
| 3.58345
| 3.065794
| 1.168849
|
# reconstructed model with given psf
model, error_map, cov_param, param = image_model_class.image_linear_solve(kwargs_lens, kwargs_source,
kwargs_lens_light, kwargs_ps)
#model = image_model_class.image(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
data = image_model_class.Data.data
mask = image_model_class.ImageNumerics.mask
point_source_list = image_model_class.point_sources_list(kwargs_ps, kwargs_lens)
n = len(point_source_list)
model_single_source_list = []
for i in range(n):
model_single_source = (data - model + point_source_list[i]) * mask
model_single_source_list.append(model_single_source)
return model_single_source_list
|
def image_single_point_source(self, image_model_class, kwargs_lens, kwargs_source, kwargs_lens_light,
kwargs_ps)
|
return model without including the point source contributions as a list (for each point source individually)
:param image_model_class: ImageModel class instance
:param kwargs_lens: lens model kwargs list
:param kwargs_source: source model kwargs list
:param kwargs_lens_light: lens light model kwargs list
:param kwargs_ps: point source model kwargs list
:return: list of images with point source isolated
| 3.133347
| 3.135006
| 0.999471
|
n = int(len(kernel_list_new) * symmetry)
angle = 360. / symmetry
kernelsize = len(kernel_old)
kernel_list = np.zeros((n, kernelsize, kernelsize))
i = 0
for kernel_new in kernel_list_new:
for k in range(symmetry):
kernel_rotated = image_util.rotateImage(kernel_new, angle * k)
kernel_norm = kernel_util.kernel_norm(kernel_rotated)
kernel_list[i, :, :] = kernel_norm
i += 1
kernel_old_rotated = np.zeros((symmetry, kernelsize, kernelsize))
for i in range(symmetry):
kernel_old_rotated[i, :, :] = kernel_old
kernel_list_new = np.append(kernel_list, kernel_old_rotated, axis=0)
if stacking_option == 'median':
kernel_new = np.median(kernel_list_new, axis=0)
elif stacking_option == 'mean':
kernel_new = np.mean(kernel_list_new, axis=0)
else:
raise ValueError(" stack_option must be 'median' or 'mean', %s is not supported." % stacking_option)
kernel_new[kernel_new < 0] = 0
kernel_new = kernel_util.kernel_norm(kernel_new)
kernel_return = factor * kernel_new + (1.-factor)* kernel_old
kernel_bkg = copy.deepcopy(kernel_return)
kernel_bkg[kernel_bkg < sigma_bkg] = sigma_bkg
error_map = np.var(kernel_list_new, axis=0) / kernel_bkg**2 / 2.
return kernel_return, error_map
|
def combine_psf(kernel_list_new, kernel_old, sigma_bkg, factor=1, stacking_option='median', symmetry=1)
|
updates psf estimate based on old kernel and several new estimates
:param kernel_list_new: list of new PSF kernels estimated from the point sources in the image
:param kernel_old: old PSF kernel
:param sigma_bkg: estimated background noise in the image
:param factor: weight of updated estimate based on new and old estimate, factor=1 means new estimate,
factor=0 means old estimate
:param stacking_option: option of stacking, mean or median
:param symmetry: imposed symmetry of PSF estimate
:return: updated PSF estimate and error_map associated with it
| 2.295998
| 2.19236
| 1.047272
|
if self._kde_type == 'scipy_gaussian':
density = self._PDF_kernel([D_d, D_delta_t])
logL = np.log(density)
else:
x = np.array([[D_d], [D_delta_t]])
logL = self._kde.score_samples(x.T)
return logL
|
def logLikelihood(self, D_d, D_delta_t)
|
likelihood of the data (represented in the distribution of this class) given a model with predicted angular
diameter distances.
:param D_d: model predicted angular diameter distance
:param D_delta_t: model predicted time-delay distance
:return: loglikelihood (log of KDE value)
| 3.455985
| 3.402915
| 1.015596
|
rho = rho0 / (r/Rs * (1 + (r/Rs))**3)
return rho
|
def density(self, r, rho0, Rs)
|
computes the density
:param x:
:param y:
:param rho0:
:param a:
:param s:
:return:
| 5.951176
| 11.608481
| 0.512658
|
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
X = r/Rs
sigma0 = self.rho2sigma(rho0, Rs)
if isinstance(X, int) or isinstance(X, float):
if X == 1:
X = 1.000001
else:
X[X == 1] = 1.000001
sigma = sigma0 / (X**2-1)**2 * (-3 + (2+X**2)*self._F(X))
return sigma
|
def density_2d(self, x, y, rho0, Rs, center_x=0, center_y=0)
|
projected density
:param x:
:param y:
:param rho0:
:param a:
:param s:
:param center_x:
:param center_y:
:return:
| 3.225533
| 3.395915
| 0.949827
|
mass_3d = 2*np.pi*Rs**3*rho0 * r**2/(r + Rs)**2
return mass_3d
|
def mass_3d(self, r, rho0, Rs)
|
mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return:
| 4.584394
| 5.971422
| 0.767722
|
rho0 = self.sigma2rho(sigma0, Rs)
return self.mass_3d(r, rho0, Rs)
|
def mass_3d_lens(self, r, sigma0, Rs)
|
mass enclosed a 3d sphere or radius r for lens parameterisation
:param sigma0:
:param Rs:
:return:
| 3.688619
| 4.514276
| 0.817101
|
sigma0 = self.rho2sigma(rho0, Rs)
return self.mass_2d_lens(r, sigma0, Rs)
|
def mass_2d(self, r, rho0, Rs)
|
mass enclosed projected 2d sphere of radius r
:param r:
:param rho0:
:param a:
:param s:
:return:
| 5.550016
| 5.991891
| 0.926255
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.