repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
UltraNest | UltraNest-master/examples/evaluate_evolution.py | """
* same nearest neighbor is rare (in rosen, asymgauss, multishell for <=8 steps)
* likelihood rank difference between chain start and end is
typically ~ 117 (expectation from 400 live points)
in a converged chain (>= 8 steps in rosen, >= 64 steps in asymgauss, >= 8 in multishell)
* distance from start to end > maxradius
in a converged chain (>= 64 steps in rosen, >= 64 steps in asymgauss, >= 64 in multishell)
* angle from start to end > 80° (dot product is near zero)
specifically, 1 sigma lower uncertainty is 90-55 / (ndims-1)**0.5 degrees
in a converged chain (>= 64 steps in rosen, > 64 steps in asymgauss, >= 64 in multishell)
Possible strategies:
* if above(below) the critical value, increase(decrease) nsteps
--> when median is reached by nsteps, there is an equilibrium of increases and decreases
* evaluate convergence at 50% of chain completeness
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import scipy.signal
Nmax = None
figdL = plt.figure("dL").gca()
figdist = plt.figure("dist").gca()
figangle = plt.figure("angle").gca()
figindex = plt.figure("index").gca()
fignsteps = plt.figure("nsteps").gca()
for filename in sys.argv[1:]:
print("loading '%s'... " % filename)
data = np.loadtxt(filename)[:]
ndim = (data.shape[1] - 1 - 3 - 4) // 4
Lmin = data[:,0]
nsteps, maxradius, mean_pair_distance, iLstart, iLfinal, itstart, itfinal = data[:,1+ndim*4:].transpose()
ustart = data[:,1+ndim*0:1+ndim*1]
ufinal = data[:,1+ndim*1:1+ndim*2]
tstart = data[:,1+ndim*2:1+ndim*3]
tfinal = data[:,1+ndim*3:1+ndim*4]
label = filename.split('/')[-2]
#figdist.plot(((tfinal - tstart)**2).sum(axis=1)**0.5, label='dist %s' % label)
l, = figdist.plot(scipy.signal.medfilt(((tfinal - tstart)**2).sum(axis=1)**0.5, 401), label='dist %s' % label)
figdist.plot(maxradius, ls='--', label='radius %s' % label, color=l.get_color())
#figdL.plot(mean_pair_distance, label='pair %s' % label)
angle = np.arccos( (tfinal * tstart).sum(axis=1) / (((tstart**2).sum(axis=1) * (tfinal**2).sum(axis=1)))**0.5) / np.pi * 180
l, = figangle.plot(angle, label='%s' % label, alpha=0.2)
figangle.plot(scipy.signal.medfilt(angle, 41), ls='--', color=l.get_color())
#l, = figdL.plot(Lmin[1:]-Lmin[:-1], label='dL %s' % label, lw=1, alpha=0.25)
#figdL.plot(scipy.signal.medfilt(Lmin[1:]-Lmin[:-1], 401), color=l.get_color())
figdL.plot(scipy.signal.medfilt(Lmin[1:]-Lmin[:-1], 401)*400, label='dL %s' % label)
figdL.plot(Lmin[1:]-Lmin[0], ls='--', label='L %s' % label)
#figindex.plot(iLstart, ls='-', label='Lstart %s' % label, alpha=0.2)
#figindex.plot(iLfinal, ls='-', label='Lfinal %s' % label, alpha=0.2)
#figindex.plot(np.abs(iLfinal-iLstart), ls='-', label='Ldiff %s' % label, alpha=0.2)
l, = figindex.plot(scipy.signal.medfilt(np.abs(iLfinal-iLstart), 401), ls='--', label='Ldiff %s' % label)
figindex.plot((itstart == itfinal)*400, ls='-', label='same NN %s' % label, alpha=0.2, color=l.get_color())
#figindex.plot(scipy.signal.medfilt((itstart == itfinal)*400, 401), ls='--', label='same NN %s' % label)
#figindex.plot(scipy.ndimage.filters.gaussian_filter((itstart == itfinal)*400, 400)*400, ls='--', label='same NN %s' % label)
fignsteps.plot(nsteps, ls='-', label='%s' % label)
a = np.random.randint(0, 400, size=100000)
b = np.random.randint(0, 400, size=100000)
print('delta_index = %.1f, same=%.2f%%' % (np.median(np.abs(a - b)), (a==b).mean()*100), 1/400.)
figindex.set_xlim(0, Nmax)
figindex.set_yscale('log')
figindex.hlines(117, *figindex.get_xlim())
figindex = plt.figure("index")
plt.legend(loc='best')
figindex.savefig("evolution_index.pdf", bbox_inches='tight')
plt.close()
figdist.set_xlim(0, Nmax)
#figdist.set_yscale('log')
figdist.set_ylabel('Distance')
figdist = plt.figure("dist")
plt.legend(loc='best')
figdist.savefig("evolution_dist.pdf", bbox_inches='tight')
plt.close()
fignsteps.set_xlim(0, Nmax)
#fignsteps.set_yscale('log')
fignsteps.set_ylabel('Number of steps')
fignsteps = plt.figure("nsteps")
plt.legend(loc='best')
fignsteps.savefig("evolution_nsteps.pdf", bbox_inches='tight')
plt.close()
figangle.set_xlim(0, Nmax)
figangle.set_ylim(0, 180)
figangle.set_ylabel('Angle [deg]')
figangle.hlines(90-55 / (ndim-1)**0.5, *figangle.get_xlim())
figangle = plt.figure("angle")
plt.legend(loc='best')
figangle.savefig("evolution_angle.pdf", bbox_inches='tight')
plt.close()
figdL.set_xlim(0, Nmax)
figdL.set_ylim(0.1, None)
figdL.set_ylabel('Likelihood difference')
#figdL.set_yscale('log')
figdL = plt.figure("dL")
plt.legend(loc='best')
figdL.savefig("evolution_dL.pdf", bbox_inches='tight')
plt.close()
| 4,618 | 35.370079 | 126 | py |
UltraNest | UltraNest-master/examples/testeggbox.py | import os
import sys
import argparse
import numpy as np
from numpy import cos, pi
def main(args):
def loglike(z):
chi = (cos(z / 2.)).prod(axis=1)
return (2. + chi)**5
def transform(x):
return x * 10 * pi
import string
paramnames = list(string.ascii_lowercase)[:args.x_dim]
if args.reactive:
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=args.log_dir, resume='overwrite',
draw_multiple=False, vectorized=True,
)
sampler.run(log_interval=20,
max_num_improvement_loops=10, min_num_live_points=args.num_live_points,)
sampler.print_results()
sampler.plot()
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
num_live_points=args.num_live_points, vectorized=True,
log_dir=args.log_dir, resume='overwrite')
#log_dir=None)
sampler.run(log_interval=20)
sampler.print_results()
sampler.plot()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--log_dir', type=str, default='logs/eggbox')
parser.add_argument('--reactive', action='store_true')
args = parser.parse_args()
main(args)
| 1,622 | 29.622642 | 84 | py |
UltraNest | UltraNest-master/examples/test_popsampler.py | #!/usr/bin/env python
# coding: utf-8
import numpy as np
from ultranest import ReactiveNestedSampler
from ultranest.mlfriends import RobustEllipsoidRegion, SimpleRegion, ScalingLayer
import ultranest.popstepsampler
import matplotlib.pyplot as plt
import sys
import argparse
def main(generate_direction_method, ndim, nsteps, popsize, log_dir=None, verbose=False):
np.random.seed(1)
logsigma = -5
sigma = np.logspace(-1, logsigma, ndim)
width = 1 - 5 * sigma
width[width < 1e-20] = 1e-20
centers = (np.sin(np.arange(ndim)/2.) * width + 1.) / 2.
#sigma[:] = 0.01
#centers[:] = 0.5
norm = -0.5 * np.log(2 * np.pi * sigma**2).sum()
def loglike(theta):
return -0.5 * (((theta - centers) / sigma)**2).sum(axis=1) + norm
def transform(x):
return x
paramnames = ['param%d' % (i+1) for i in range(ndim)]
sampler = ReactiveNestedSampler(
paramnames, loglike, transform=transform,
vectorized=True, log_dir=log_dir, resume=True)
# ellipsoidal:
region_class = RobustEllipsoidRegion
# ellipsoidal axis-aligned:
#sampler.transform_layer_class = ScalingLayer
#region_class = SimpleRegion
sampler.stepsampler = ultranest.popstepsampler.PopulationRandomWalkSampler(
popsize=popsize, nsteps=nsteps, scale=1. / len(paramnames),
generate_direction=getattr(ultranest.popstepsampler, generate_direction_method), log=verbose,
#logfile=sys.stderr
)
results = sampler.run(
frac_remain=0.01, update_interval_volume_fraction=0.01,
max_num_improvement_loops=0, min_num_live_points=400,
viz_callback=None, region_class=region_class
)
sampler.print_results()
stats = results['posterior']
plt.errorbar(x=np.arange(ndim), y=stats['mean'] - centers, yerr=stats['stdev'] / sigma, color='k')
plt.savefig('populationstepsampler_%d.pdf' % ndim)
plt.close()
#sampler.plot_trace()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=400)
parser.add_argument("--generate_direction_method", type=str, required=True)
parser.add_argument("--num_steps", type=int, required=True)
parser.add_argument("--popsize", type=int, required=True)
parser.add_argument('--log_dir', type=str)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
main(args.generate_direction_method, args.x_dim, args.num_steps, args.popsize, args.log_dir, verbose=args.verbose)
| 2,654 | 34.878378 | 118 | py |
UltraNest | UltraNest-master/examples/testloggamma.py | import argparse
import numpy as np
from numpy import log
import scipy.stats
from ultranest.utils import verify_gradient
def main(args):
ndim = args.x_dim
scale = args.scale
adaptive_nsteps = args.adapt_steps
if adaptive_nsteps is None:
adaptive_nsteps = False
rv1a = scipy.stats.loggamma(1, loc=2./3, scale=scale)
rv1b = scipy.stats.loggamma(1, loc=1./3, scale=scale)
rv2a = scipy.stats.norm(2./3, scale)
rv2b = scipy.stats.norm(1./3, scale)
rv_rest = []
for i in range(2, ndim):
if i <= (ndim+2)/2:
rv = scipy.stats.loggamma(1, loc=2./3., scale=scale)
else:
rv = scipy.stats.norm(2./3, scale)
rv_rest.append(rv)
del rv
def loglike(theta):
L1 = log(0.5 * rv1a.pdf(theta[:,0]) + 0.5 * rv1b.pdf(theta[:,0]) + 1e-300)
L2 = log(0.5 * rv2a.pdf(theta[:,1]) + 0.5 * rv2b.pdf(theta[:,1]) + 1e-300)
Lrest = np.sum([rv.logpdf(t) for rv, t in zip(rv_rest, theta[:,2:].transpose())], axis=0)
#assert L1.shape == (len(theta),)
#assert L2.shape == (len(theta),)
#assert Lrest.shape == (len(theta),), Lrest.shape
like = L1 + L2 + Lrest
like = np.where(like < -1e300, -1e300 - ((np.asarray(theta) - 0.5)**2).sum(), like)
assert like.shape == (len(theta),), (like.shape, theta.shape)
return like
def transform(x):
return x
paramnames = ['param%d' % (i+1) for i in range(ndim)]
if args.pymultinest:
from pymultinest.solve import solve
def flat_loglike(theta):
return loglike(theta.reshape((1, -1)))
result = solve(LogLikelihood=flat_loglike, Prior=transform,
n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
verbose=True, resume=True, importance_nested_sampling=False)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
elif args.reactive:
if args.slice:
log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim, args.slice_steps)
elif args.harm:
log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim, args.slice_steps)
elif args.dyhmc:
log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim, args.slice_steps)
elif args.dychmc:
log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim, args.slice_steps)
else:
log_dir = args.log_dir + 'RNS-%dd' % (ndim)
if adaptive_nsteps:
log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=log_dir, resume=True,
vectorized=True)
if args.slice:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w') if sampler.mpi_rank == 0 else False)
if args.harm:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w') if sampler.mpi_rank == 0 else False)
#if args.dyhmc:
# import ultranest.dyhmc
# verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
# sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps,
# transform_loglike_gradient=transform_loglike_gradient, adaptive_nsteps=adaptive_nsteps)
#if args.dychmc:
# import ultranest.dychmc
# verify_gradient(ndim, transform, loglike, gradient)
# sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps,
# transform=transform, loglike=loglike, gradient=gradient, adaptive_nsteps=adaptive_nsteps)
sampler.run(frac_remain=0.5, min_num_live_points=args.num_live_points, max_num_improvement_loops=1)
sampler.print_results()
if sampler.stepsampler is not None:
sampler.stepsampler.plot(filename = log_dir + '/stepsampler_stats_region.pdf')
if ndim <= 20:
sampler.plot()
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
num_live_points=args.num_live_points, vectorized=True,
log_dir=args.log_dir + '-%dd' % ndim, resume=True)
sampler.run()
sampler.print_results()
sampler.plot()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument('--scale', type=float, default=1/30., help="Peak widths")
parser.add_argument("--num_live_points", type=int, default=400)
parser.add_argument('--log_dir', type=str, default='logs/loggamma')
parser.add_argument('--pymultinest', action='store_true')
parser.add_argument('--reactive', action='store_true')
parser.add_argument('--slice', action='store_true')
parser.add_argument('--harm', action='store_true')
parser.add_argument('--dyhmc', action='store_true')
parser.add_argument('--dychmc', action='store_true')
parser.add_argument('--slice_steps', type=int, default=100)
parser.add_argument('--adapt_steps', type=str)
args = parser.parse_args()
main(args)
| 5,829 | 43.166667 | 136 | py |
UltraNest | UltraNest-master/examples/testsine.py | import argparse
import numpy as np
from numpy import pi, sin, log
import matplotlib.pyplot as plt
def main(args):
adaptive_nsteps = args.adapt_steps
if adaptive_nsteps is None:
adaptive_nsteps = False
np.random.seed(2)
Ndata = args.ndata
jitter_true = 0.1
phase_true = 0.
period_true = 180
amplitude_true = args.contrast / Ndata * jitter_true
paramnames = ['amplitude', 'jitter', 'phase', 'period']
ndim = 4
derivednames = [] #'frequency']
wrapped_params = [False, False, True, False]
#wrapped_params = None
x = np.linspace(0, 360, 1000)
y = amplitude_true * sin(x / period_true * 2 * pi + phase_true)
if True:
plt.plot(x, y)
x = np.random.uniform(0, 360, Ndata)
y = np.random.normal(amplitude_true * sin(x / period_true * 2 * pi + phase_true), jitter_true)
plt.errorbar(x, y, yerr=jitter_true, marker='x', ls=' ')
plt.savefig('testsine.pdf', bbox_inches='tight')
plt.close()
def loglike(params):
amplitude, jitter, phase, period = params.transpose()[:4]
predicty = amplitude * sin(x.reshape((-1,1)) / period * 2 * pi + phase)
logl = (-0.5 * log(2 * pi * jitter**2) - 0.5 * ((predicty - y.reshape((-1,1))) / jitter)**2).sum(axis=0)
assert logl.shape == jitter.shape
return logl
def transform(x):
z = np.empty((len(x), 4))
z[:,0] = 10**(x[:,0] * 4 - 2)
z[:,1] = 10**(x[:,1] * 1 - 1.5)
z[:,2] = 2 * pi * x[:,2]
z[:,3] = 10**(x[:,3] * 4 - 1)
#z[:,4] = 2 * pi / x[:,3]
return z
loglike(transform(np.ones((2, ndim))*0.5))
if args.pymultinest:
from pymultinest.solve import solve
global Lmax
Lmax = -np.inf
def flat_loglike(theta):
L = loglike(theta.reshape((1, -1)))[0]
global Lmax
if L > Lmax:
print("Like: %.2f" % L)
Lmax = L
return L
def flat_transform(cube):
return transform(cube.reshape((1, -1)))[0]
result = solve(LogLikelihood=flat_loglike, Prior=flat_transform,
n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
n_live_points=args.num_live_points,
verbose=True, resume=False, importance_nested_sampling=False)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
return
elif args.reactive:
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=args.log_dir, vectorized=True,
derived_param_names=derivednames, wrapped_params=wrapped_params,
resume='overwrite')
if args.harm:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps)
if args.slice:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps)
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
log_dir=args.log_dir, vectorized=True,
derived_param_names=derivednames, wrapped_params=wrapped_params,
resume='overwrite')
sampler.run(min_num_live_points=args.num_live_points)
print()
sampler.print_results()
sampler.plot()
for i, p in enumerate(paramnames + derivednames):
v = sampler.results['samples'][:,i]
print('%20s: %5.3f +- %5.3f' % (p, v.mean(), v.std()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--contrast', type=int, default=100,
help="Signal-to-Noise level")
parser.add_argument('--ndata', type=int, default=40,
help="Number of simulated data points")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--log_dir', type=str, default='logs/testsine')
parser.add_argument('--reactive', action='store_true', default=False)
parser.add_argument('--pymultinest', action='store_true')
parser.add_argument('--slice', action='store_true')
parser.add_argument('--harm', action='store_true')
parser.add_argument('--dyhmc', action='store_true')
parser.add_argument('--dychmc', action='store_true')
parser.add_argument('--slice_steps', type=int, default=100)
parser.add_argument('--adapt_steps', type=str)
args = parser.parse_args()
main(args)
| 4,970 | 36.946565 | 136 | py |
UltraNest | UltraNest-master/examples/testfeatures.py | import numpy as np
import shutil
import scipy.stats
import traceback
import json
import random
import sys
import os
import signal
import string
import hashlib
def get_arg_hash(runargs):
return hashlib.md5(str(runargs).encode()).hexdigest()[:10]
def main(args):
ndim = args.x_dim
paramnames = list(string.ascii_lowercase)[:ndim]
np.random.seed(args.seed)
if args.wrapped_dims:
wrapped_params = [True] * ndim
else:
wrapped_params = None
true_Z = None
if args.log_dir is None:
if args.delete_dir:
return
log_dir = None
else:
log_dir = args.log_dir + '-%s' % args.problem
log_dir += '-%dd' % ndim
if args.wrapped_dims:
log_dir += '-wrapped'
if args.delete_dir:
shutil.rmtree(log_dir, ignore_errors=True)
if ndim >= 20 and args.num_live_points >= 1000:
print("skipping, probably too slow to run")
return
if args.problem == 'gauss':
sigma = 0.01
if args.wrapped_dims:
centers = (np.sin(np.arange(ndim) / 2.) + 1.) / 2.
else:
centers = (np.sin(np.arange(ndim) / 2.) / 2. + 1.) / 2.
true_Z = 0
def loglike(theta):
like = -0.5 * (((theta - centers) / sigma)**2).sum(axis=1) - 0.5 * np.log(2 * np.pi * sigma**2) * ndim
return like
def transform(x):
return x
elif args.problem == 'slantedeggbox':
if not args.pass_transform:
return
def loglike(z):
chi = (2. + (np.cos(z[:,:2] / 2.)).prod(axis=1))**5
chi2 = -np.abs((z - 5 * np.pi) / 0.5).sum(axis=1)
return chi + chi2
def transform(x):
return x * 100
elif args.problem == 'funnel':
if args.wrapped_dims:
return
if not args.pass_transform:
return
sigma = 0.01
centers = np.sin(np.arange(ndim) / 2.)
data = np.random.normal(centers, sigma).reshape((1, -1))
def loglike(theta):
sigma = 10**theta[:,0]
like = -0.5 * (((theta[:,1:] - data) / sigma.reshape((-1, 1)))**2).sum(axis=1) - 0.5 * np.log(2 * np.pi * sigma**2) * ndim
return like
def transform(x):
z = x * 20 - 10
z[:,0] = x[:,0] * 6 - 3
return z
paramnames.insert(0, 'sigma')
elif args.problem == 'loggamma':
true_Z = 0.0
if args.wrapped_dims:
return
rv1a = scipy.stats.loggamma(1, loc=2. / 3, scale=1. / 30)
rv1b = scipy.stats.loggamma(1, loc=1. / 3, scale=1. / 30)
rv2a = scipy.stats.norm(2. / 3, 1. / 30)
rv2b = scipy.stats.norm(1. / 3, 1. / 30)
rv_rest = []
for i in range(2, ndim):
if i <= (ndim + 2) / 2:
rv = scipy.stats.loggamma(1, loc=2. / 3., scale=1. / 30)
else:
rv = scipy.stats.norm(2. / 3, 1. / 30)
rv_rest.append(rv)
del rv
def loglike(theta):
L1 = np.log(0.5 * rv1a.pdf(theta[:,0]) + 0.5 * rv1b.pdf(theta[:,0]))
L2 = np.log(0.5 * rv2a.pdf(theta[:,1]) + 0.5 * rv2b.pdf(theta[:,1]))
Lrest = np.sum([rv.logpdf(t) for rv, t in zip(rv_rest, theta[:,2:].transpose())], axis=0)
like = L1 + L2 + Lrest
like = np.where(like < -1e100, -1e100 - ((np.asarray(theta) - 0.5)**2).sum(), like)
assert like.shape == (len(theta),), (like.shape, theta.shape)
return like
def transform(x):
return x
from ultranest import ReactiveNestedSampler
from ultranest.mlfriends import MLFriends, RobustEllipsoidRegion, SimpleRegion, ScalingLayer
sampler = ReactiveNestedSampler(
paramnames, loglike,
transform=transform if args.pass_transform else None,
log_dir=log_dir, vectorized=True,
resume='resume' if args.resume else 'overwrite',
wrapped_params=wrapped_params,
)
if hasattr(args, 'axis_aligned') and args.axis_aligned:
sampler.transform_layer_class = ScalingLayer
region_class = SimpleRegion
else:
region_class = RobustEllipsoidRegion if hasattr(args, 'ellipsoidal') and args.ellipsoidal else MLFriends
print("MPI:", sampler.mpi_size, sampler.mpi_rank)
for result in sampler.run_iter(
update_interval_volume_fraction=args.update_interval_iter_fraction,
dlogz=args.dlogz,
dKL=args.dKL,
frac_remain=args.frac_remain,
min_ess=args.min_ess,
max_iters=args.max_iters,
cluster_num_live_points=args.cluster_num_live_points,
min_num_live_points=args.num_live_points,
max_ncalls=int(args.max_ncalls),
region_class=region_class,
):
sampler.print_results()
print(
" (remember, we are trying to achive: %s ) " % (
dict(
dlogz=args.dlogz,
dKL=args.dKL,
frac_remain=args.frac_remain,
min_ess=args.min_ess,
)))
results = sampler.results
try:
sampler.plot()
except AssertionError as e:
if "I don't believe that you want more dimensions than samples" in str(e) and results['ess'] <= ndim + 1:
pass
else:
raise e
sampler.pointstore.close()
if results['logzerr_tail'] < 0.5 and results['logzerr'] < 1.0 and true_Z is not None and args.num_live_points > 50:
assert results['logz'] - results['logzerr'] * 3 < true_Z < results['logz'] + results['logzerr'] * 3
return results
def run_safely(runargs):
id = get_arg_hash(runargs)
if os.path.exists('testfeatures/%s.done' % id):
print("not rerunning %s" % id)
return
print("Running %s with options:" % id, runargs)
def timeout_handler(signum, frame):
raise Exception("Timeout")
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(60 * (1 + runargs['x_dim'])) # give a few minutes
try:
main(AttrDict(runargs))
except Exception:
traceback.print_exc()
filename = 'testfeatures/runsettings-%s-error.json' % id
print("Storing configuration as '%s'. Options were:" % filename, runargs)
with open(filename, 'w') as f:
json.dump(runargs, f, indent=2)
sys.exit(1)
signal.alarm(0)
with open('testfeatures/%s.done' % id, 'w'):
pass
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--random', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--timeout', action='store_true')
parser.add_argument('--nrounds', type=int, default=1,
help="Number of random configurations to generate")
parser.add_argument('conf', nargs='*', help='config files')
progargs = parser.parse_args()
if len(progargs.conf) > 0:
for filename in progargs.conf:
print("loading configuration from file '%s'..." % filename)
runargs = json.load(open(filename))
print("Running with options:", runargs)
main(AttrDict(runargs))
if progargs.timeout:
run_safely(runargs)
sys.exit(0)
if progargs.random:
random.seed(progargs.seed)
def choose(myargs):
# pick first (default) option most of the time
if random.random() < 0.25:
return myargs[0]
else:
return random.choice(myargs)
else:
def choose(myargs):
return myargs
Nrounds = progargs.nrounds
i = 0
while True:
print("generating a random configuration...")
runargs = dict(
problem = choose(['gauss', 'slantedeggbox', 'funnel', 'loggamma']),
x_dim = choose([2, 1, 6, 20]),
seed = choose([1, 2, 3]),
wrapped_dims = choose([False, True]),
log_dir = choose(['logs/features', None]),
delete_dir = choose([False, False, False, True]),
pass_transform = choose([True, False]),
num_live_points = choose([100, 50, 400, 1000]),
resume = choose([False, True]),
cluster_num_live_points = choose([50, 0]),
update_interval_iter_fraction=choose([0.2, 1.0]),
dlogz = choose([2.0, 0.5]),
dKL = choose([1.0, 0.1]),
frac_remain = choose([0.5, 0.001]),
min_ess = choose([0, 4000]),
max_iters = choose([None, 10000]),
max_ncalls = choose([10000000., 10000., 100000.]),
axis_aligned = choose([False, True]),
ellipsoidal = choose([False, True]),
)
if not progargs.random:
key = i
nkeys = len(runargs.keys())
for k, v in runargs.items():
if 0 <= key <= len(v):
j = key % len(v)
runargs[k] = v[j]
key -= len(v)
else:
runargs[k] = v[0]
key -= len(v)
filename = 'testfeatures/runsettings-%s-iterated.json' % get_arg_hash(runargs)
print("Storing configuration as '%s'. Options were:" % filename, runargs)
with open(filename, 'w') as f:
json.dump(runargs, f, indent=2)
# run_safely(runargs)
if key > 0:
break
else:
run_safely(runargs)
if i + 1 >= progargs.nrounds:
break
i = i + 1
| 9,824 | 32.762887 | 134 | py |
UltraNest | UltraNest-master/examples/testasymgauss.py | import argparse
import numpy as np
def main(args):
ndim = args.x_dim
sigma = args.sigma
sigma = np.logspace(-1, np.log10(args.sigma), ndim)
width = 1 - 5 * sigma
width[width < 1e-20] = 1e-20
centers = (np.sin(np.arange(ndim)/2.) * width + 1.) / 2.
#centers = np.ones(ndim) * 0.5
adaptive_nsteps = args.adapt_steps
if adaptive_nsteps is None:
adaptive_nsteps = False
def loglike(theta):
like = -0.5 * (((theta - centers)/sigma)**2).sum(axis=1) - 0.5 * np.log(2 * np.pi * sigma**2).sum()
return like
def transform(x):
return x
def transform_loglike_gradient(u):
theta = u
like = -0.5 * (((theta - centers)/sigma)**2).sum(axis=1) - 0.5 * np.log(2 * np.pi * sigma**2).sum()
grad = (theta - centers)/sigma
return u, like, grad
def gradient(theta):
return (theta - centers) / sigma
paramnames = ['param%d' % (i+1) for i in range(ndim)]
if args.pymultinest:
from pymultinest.solve import solve
def flat_loglike(theta):
return loglike(theta.reshape((1, -1)))
result = solve(LogLikelihood=flat_loglike, Prior=transform,
n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
verbose=True, resume=True, importance_nested_sampling=False)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
elif args.reactive:
if args.slice:
log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim, args.slice_steps)
elif args.harm:
log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim, args.slice_steps)
elif args.dyhmc:
log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim, args.slice_steps)
elif args.dychmc:
log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim, args.slice_steps)
else:
log_dir = args.log_dir + 'RNS-%dd' % (ndim)
if adaptive_nsteps:
log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=log_dir, resume=True,
vectorized=True)
if args.slice:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w'))
if args.harm:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w'))
if args.dyhmc:
import ultranest.dyhmc
from ultranest.utils import verify_gradient
verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps,
transform_loglike_gradient=transform_loglike_gradient, adaptive_nsteps=adaptive_nsteps)
if args.dychmc:
import ultranest.dychmc
from ultranest.utils import verify_gradient
verify_gradient(ndim, transform, loglike, gradient)
sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps,
transform=transform, loglike=loglike, gradient=gradient, adaptive_nsteps=adaptive_nsteps)
sampler.run(frac_remain=0.5, min_num_live_points=args.num_live_points, max_num_improvement_loops=1)
sampler.print_results()
if sampler.stepsampler is not None:
sampler.stepsampler.plot(filename = log_dir + '/stepsampler_stats_region.pdf')
if ndim <= 20:
sampler.plot()
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
num_live_points=args.num_live_points, vectorized=True,
log_dir=args.log_dir + '-%dd' % ndim, resume=True)
sampler.run()
sampler.print_results()
sampler.plot()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument('--sigma', type=float, default=0.01)
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--log_dir', type=str, default='logs/asymgauss')
parser.add_argument('--pymultinest', action='store_true')
parser.add_argument('--reactive', action='store_true')
parser.add_argument('--slice', action='store_true')
parser.add_argument('--harm', action='store_true')
parser.add_argument('--dyhmc', action='store_true')
parser.add_argument('--dychmc', action='store_true')
parser.add_argument('--slice_steps', type=int, default=100)
parser.add_argument('--adapt_steps', type=str)
args = parser.parse_args()
main(args)
| 5,372 | 41.984 | 136 | py |
UltraNest | UltraNest-master/examples/evaluate_scaling.py | import matplotlib.pyplot as plt
import numpy as np
from numpy import pi, exp, log
import sys
import json
import scipy.stats, scipy.special
methods = {}
for filename in sys.argv[1:]:
info = json.load(open(filename))
ncall = info['ncall']
logz = info['logz']
logzerr = info['logzerr']
ndim = len(info['paramnames'])
path = filename.replace('/info/results.json', '')
adaptation = ''
if '-adapt' in path:
path, adaptation = path.split('-adapt')
adaptation = '-' + adaptation
parts = path.split('-')
if len(parts) == 3:
method = parts[-1].strip('1234567890') + adaptation
else:
method = 'MLFriends'
method_data = methods.get(method, [])
method_data.append([ndim, ncall])
methods[method] = method_data
print(ncall, logz, path)
lines = []
for method, data in sorted(methods.items()):
x, y = np.transpose(data)
i = np.argsort(x)
x, y = x[i], y[i]
print(method, x, y)
plt.plot(x, y, marker='o', mfc='w', label=method)
if method == 'harm-move-distance-midway':
xgrid = np.linspace(x.max() / 20, x.max()*3, 50)
#plt.plot(xgrid, (xgrid / x.max())**2 * y.max(), '--', color='k', label='quadratic scaling', alpha=0.4)
lines.append(('quadratic scaling', xgrid, (xgrid / x.max())**2 * y.max(), dict(color='k', ls='--')))
lines.append(('cubic scaling', xgrid, (xgrid / x.max())**3 * y.max(), dict(color='k', ls='-.')))
#plt.plot(xgrid, (xgrid / x.max())**3 * y.max(), '--', color='gray', label='cubic scaling', alpha=0.4)
if method == 'MLFriends':
#xgrid = np.linspace(x.max() / 3, x.max(), 50)
#plt.plot(xgrid, (xgrid / xgrid.max())**4 * y.max(), '-', color='k', label='quadratic scaling')
#xgrid = np.linspace(x.max()/2, x.max(), 50)
#plt.plot(xgrid, np.exp(xgrid - x.max()) * y.max(), '-', color='k', label='exponential scaling')
xgrid = np.linspace(x.min(), x.max() * 3, 50)
lines.append(('exponential', xgrid, np.exp((xgrid - x[-2]) / (x[-1] - x[-2]) * np.log(y[-1] / y[-2])) * y[-2],
dict(color='gray', ls=':')))
#plt.plot(xgrid, np.exp((xgrid - x[-2]) / (x[-1] - x[-2]) * np.log(y[-1] / y[-2])) * y[-2], '--', color='k', label='exponential scaling', alpha=0.4)
if 'rosen' in filename:
problemname = 'rosenbrock'
elif 'multishell' in filename:
problemname = 'multishell'
elif 'asymgauss' in filename:
problemname = 'asymgauss'
elif 'loggamma' in filename:
problemname = 'loggamma'
else:
assert False, filename
plt.xscale('log')
plt.yscale('log')
ylo, yhi = plt.ylim()
xlo, xhi = plt.xlim()
plt.ylim(ylo, yhi)
plt.xlim(xlo, xhi)
for name, x, y, opts in lines:
plt.plot(x, y, label=name, alpha=0.4, **opts)
plt.legend(loc='best')
plt.ylabel('Number of model evaluations')
plt.xlabel('Dimensionality')
print("writing to '%s_scaling.pdf'" % (problemname))
plt.savefig(problemname + '_scaling.pdf', bbox_inches='tight')
plt.close()
| 2,985 | 35.414634 | 156 | py |
UltraNest | UltraNest-master/examples/testhyperrect.py | import argparse
import numpy as np
def main(args):
ndim = args.x_dim
adaptive_nsteps = args.adapt_steps
if adaptive_nsteps is None:
adaptive_nsteps = False
def flat_loglike(theta):
delta = np.max(np.abs(theta - 0.5))
volume_enclosed = ndim * np.log(delta + 1e-15)
if volume_enclosed > -100:
return -volume_enclosed
else:
return +100
def loglike(theta):
delta = np.max(np.abs(theta - 0.5), axis=1)
volume_enclosed = ndim * np.log(delta + 1e-15)
like = -volume_enclosed
like[~(like < +100)] = 100
return like
def flat_transform(x):
return x
def transform(x):
return x
paramnames = ['param%d' % (i+1) for i in range(ndim)]
if args.pymultinest:
from pymultinest.solve import solve
result = solve(LogLikelihood=flat_loglike, Prior=flat_transform,
n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
verbose=True, resume=True, importance_nested_sampling=False)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
elif args.reactive:
if args.slice:
log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim, args.slice_steps)
elif args.harm:
log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim, args.slice_steps)
elif args.dyhmc:
log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim, args.slice_steps)
elif args.dychmc:
log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim, args.slice_steps)
else:
log_dir = args.log_dir + 'RNS-%dd' % (ndim)
if adaptive_nsteps:
log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=log_dir, resume=True,
vectorized=True)
if args.slice:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w'))
if args.harm:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w'))
"""
if args.dyhmc:
import ultranest.dyhmc
from ultranest.utils import verify_gradient
verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps,
transform_loglike_gradient=transform_loglike_gradient)
if args.dychmc:
import ultranest.dychmc
from ultranest.utils import verify_gradient
verify_gradient(ndim, transform, loglike, gradient, verbose=True)
sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps,
transform=transform, loglike=loglike, gradient=gradient)
"""
sampler.run(frac_remain=0.5, min_num_live_points=args.num_live_points, max_num_improvement_loops=1)
sampler.print_results()
if sampler.stepsampler is not None:
sampler.stepsampler.plot(filename = log_dir + '/stepsampler_stats_region.pdf')
if ndim <= 20:
sampler.plot()
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
num_live_points=args.num_live_points, vectorized=True,
log_dir=args.log_dir + '-%dd' % ndim, resume=True)
sampler.run()
sampler.print_results()
sampler.plot()
#print("expected Z=%.3f (analytic solution)" % Z_analytic)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=400)
parser.add_argument('--log_dir', type=str, default='logs/hyperrect')
parser.add_argument('--pymultinest', action='store_true')
parser.add_argument('--reactive', action='store_true')
parser.add_argument('--slice', action='store_true')
parser.add_argument('--harm', action='store_true')
parser.add_argument('--dyhmc', action='store_true')
parser.add_argument('--dychmc', action='store_true')
parser.add_argument('--slice_steps', type=int, default=100)
parser.add_argument('--adapt_steps', type=str)
args = parser.parse_args()
main(args)
| 5,061 | 40.834711 | 136 | py |
UltraNest | UltraNest-master/examples/testmultishell.py | import argparse
import numpy as np
from numpy import exp, log, pi
import scipy.stats
# analytic solution:
def shell_vol(ndim, r, w):
# integral along the radius
mom = scipy.stats.norm.moment(ndim - 1, loc=r, scale=w)
# integral along the angles is surface of hyper-ball
# which is volume of one higher dimension x (ndim + 1)
vol = pi**((ndim)/2.) / scipy.special.gamma((ndim)/2. + 1)
surf = vol * ndim
return mom * surf
"""
for ndim in [2, 4, 8, 16, 32, 64, 128, 256]:
r = 0.2
C = 0.01
#r = (C * scipy.special.gamma((ndim+3)/2)*ndim*pi**(-(ndim+1)/2) / (
# scipy.special.gamma((ndim+2)/2) * pi**(-ndim/2)))**(1 / (ndim+1))
w = (r**(ndim+1) + C * scipy.special.gamma((ndim+3)/2)*ndim*pi**(-(ndim+1)/2) / (
scipy.special.gamma((ndim+2)/2) * pi**(-ndim/2)))**(1 / (ndim+1)) - r
vol_sphere = pi**((ndim)/2.) / scipy.special.gamma((ndim)/2. + 1)
surf_shell = pi**((ndim+1)/2.) / scipy.special.gamma((ndim+1)/2. + 1)
vol_shell = surf_shell * ((w+r)**(ndim+1) - r**(ndim+1)) / ndim
#vol_shell = surf_shell * (r**(ndim+1)) / ndim
print('%4d %.3f %.4e %.4e %.4e' % (ndim, w, vol_sphere, vol_shell, vol_shell / vol_sphere))
#import sys; sys.exit()
"""
def main(args):
ndim = args.x_dim
adaptive_nsteps = args.adapt_steps
if adaptive_nsteps is None:
adaptive_nsteps = False
#C = 0.01
r = 0.2
# the shell thickness is
#w = (r**(ndim+1) + C * scipy.special.gamma((ndim+3)/2)*ndim*pi**(-(ndim+1)/2) / (
# scipy.special.gamma((ndim+2)/2) * pi**(-ndim/2)))**(1 / (ndim+1)) - r
w = 0.001 / ndim
r1, r2 = r, r
w1, w2 = w, w
c1, c2 = np.zeros(ndim) + 0.5, np.zeros(ndim) + 0.5
c1[0] -= r1 / 2
c2[0] += r2 / 2
N1 = -0.5 * log(2 * pi * w1**2)
N2 = -0.5 * log(2 * pi * w2**2)
Z_analytic = log(shell_vol(ndim, r1, w1) + shell_vol(ndim, r2, w2))
def loglike(theta):
d1 = ((theta - c1)**2).sum(axis=1)**0.5
d2 = ((theta - c2)**2).sum(axis=1)**0.5
L1 = -0.5 * ((d1 - r1)**2) / w1**2 + N1
L2 = -0.5 * ((d2 - r2)**2) / w2**2 + N2
return np.logaddexp(L1, L2)
def transform(x):
return x
def gradient(theta):
delta1 = theta - c1
delta2 = theta - c1
d1 = (delta1**2).sum()**0.5
d2 = (delta2**2).sum()**0.5
g1 = -delta1 * (1 - r1 / d1) / w1**2
g2 = -delta2 * (1 - r2 / d2) / w2**2
return np.logaddexp(g1, g2)
"""
N = 10000
x = np.random.normal(size=(N, ndim))
x *= (np.random.uniform(size=N)**(1./ndim) / (x**2).sum(axis=1)**0.5).reshape((-1, 1))
x = x * r1 + c1
print(loglike(x) - N1)
print('%.3f%%' % ((loglike(x) - N1 > -ndim*2).mean() * 100))
import sys; sys.exit()
"""
paramnames = ['param%d' % (i+1) for i in range(ndim)]
if args.pymultinest:
from pymultinest.solve import solve
def flat_loglike(theta):
return loglike(theta.reshape((1, -1)))
result = solve(LogLikelihood=flat_loglike, Prior=transform,
n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
verbose=True, resume=True, importance_nested_sampling=False)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
elif args.reactive:
if args.slice:
log_dir = args.log_dir + 'RNS-%dd-slice%d' % (ndim, args.slice_steps)
elif args.harm:
log_dir = args.log_dir + 'RNS-%dd-harm%d' % (ndim, args.slice_steps)
elif args.dyhmc:
log_dir = args.log_dir + 'RNS-%dd-dyhmc%d' % (ndim, args.slice_steps)
elif args.dychmc:
log_dir = args.log_dir + 'RNS-%dd-dychmc%d' % (ndim, args.slice_steps)
else:
log_dir = args.log_dir + 'RNS-%dd' % (ndim)
if adaptive_nsteps:
log_dir = log_dir + '-adapt%s' % (adaptive_nsteps)
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=log_dir, resume=True,
vectorized=True)
if args.slice:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w'))
if args.harm:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(nsteps=args.slice_steps, adaptive_nsteps=adaptive_nsteps,
log=open(log_dir + '/stepsampler.log', 'w'))
#if args.dyhmc:
# import ultranest.dyhmc
# from ultranest.utils import verify_gradient
# verify_gradient(ndim, transform, loglike, transform_loglike_gradient, combination=True)
# sampler.stepsampler = ultranest.dyhmc.DynamicHMCSampler(ndim=ndim, nsteps=args.slice_steps,
# transform_loglike_gradient=transform_loglike_gradient)
if args.dychmc:
import ultranest.dychmc
from ultranest.utils import verify_gradient
verify_gradient(ndim, transform, loglike, gradient, verbose=True)
sampler.stepsampler = ultranest.dychmc.DynamicCHMCSampler(ndim=ndim, nsteps=args.slice_steps,
transform=transform, loglike=loglike, gradient=gradient)
sampler.run(frac_remain=0.5, min_num_live_points=args.num_live_points, max_num_improvement_loops=1)
sampler.print_results()
if sampler.stepsampler is not None:
sampler.stepsampler.plot(filename = log_dir + '/stepsampler_stats_region.pdf')
if ndim <= 20:
sampler.plot()
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
num_live_points=args.num_live_points, vectorized=True,
log_dir=args.log_dir + '-%dd' % ndim, resume=True)
sampler.run()
sampler.print_results()
sampler.plot()
print("expected Z=%.3f (analytic solution)" % Z_analytic)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=400)
parser.add_argument('--log_dir', type=str, default='logs/multishell')
parser.add_argument('--pymultinest', action='store_true')
parser.add_argument('--reactive', action='store_true')
parser.add_argument('--slice', action='store_true')
parser.add_argument('--harm', action='store_true')
parser.add_argument('--dyhmc', action='store_true')
parser.add_argument('--dychmc', action='store_true')
parser.add_argument('--slice_steps', type=int, default=100)
parser.add_argument('--adapt_steps', type=str)
args = parser.parse_args()
main(args)
| 7,224 | 38.917127 | 136 | py |
UltraNest | UltraNest-master/examples/testgauss.py | import argparse
import numpy as np
from numpy import log
def main(args):
ndim = args.x_dim
sigma = args.sigma
width = max(0, 1 - 5 * sigma)
centers = (np.sin(np.arange(ndim)/2.) * width + 1.) / 2.
centers = np.ones(ndim) * 0.5
def flat_loglike(theta):
like = -0.5 * (((theta - centers)/sigma)**2).sum() - 0.5 * log(2 * np.pi * sigma**2) * ndim
return like
def flat_transform(x):
return x
import string
paramnames = list(string.ascii_lowercase)[:ndim]
if args.pymultinest:
from pymultinest.solve import solve
result = solve(LogLikelihood=flat_loglike, Prior=flat_transform,
n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
verbose=True, resume=True, importance_nested_sampling=False)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
elif args.reactive:
from ultranest.solvecompat import pymultinest_solve_compat as solve
result = solve(LogLikelihood=flat_loglike, Prior=flat_transform,
n_dims=ndim, outputfiles_basename=args.log_dir + 'RNS-%dd' % ndim,
verbose=True, resume=True, importance_nested_sampling=False)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--sigma', type=float, default=0.01)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--log_dir', type=str, default='logs/loggauss')
parser.add_argument('--reactive', action='store_true')
parser.add_argument('--pymultinest', action='store_true')
args = parser.parse_args()
main(args)
| 2,333 | 35.46875 | 99 | py |
UltraNest | UltraNest-master/examples/evaluate_rosenbrock.py | import matplotlib.pyplot as plt
import numpy as np
from numpy import pi, exp, log
import sys
import json
import scipy.stats, scipy.special
def shell_vol(ndim, r, w):
# integral along the radius
mom = scipy.stats.norm.moment(ndim - 1, loc=r, scale=w)
# integral along the angles is surface of hyper-ball
# which is volume of one higher dimension x (ndim + 1)
vol = pi**((ndim)/2.) / scipy.special.gamma((ndim)/2. + 1)
surf = vol * ndim
return mom * surf
methods = {}
for filename in sys.argv[1:]:
info = json.load(open(filename))
ncall = info['ncall']
logz = info['logz']
logzerr = info['logzerr']
ndim = len(info['paramnames'])
path = filename.replace('/info/results.json', '')
adaptation = ''
if '-adapt' in path:
path, adaptation = path.split('-adapt')
adaptation = '-' + adaptation
parts = path.split('-')
if len(parts) == 3:
method = parts[-1].strip('1234567890') + adaptation
else:
method = 'MLFriends'
method_data = methods.get(method, [])
method_data.append([ncall, logz, logzerr])
methods[method] = method_data
print(ncall, logz, path)
for method, data in sorted(methods.items()):
x, y, yerr = np.transpose(data)
i = np.argsort(x)
x, y, yerr = x[i], y[i], yerr[i]
print(method, x, y, yerr)
plt.errorbar(x=x, y=y, yerr=yerr, marker='x', label=method)
if 'rosen' in filename and ndim == 50:
plt.plot(1.3e9, -288.6, 'o', label='DNest4')
if 'rosen' in filename:
true_logz_lo = 3.7 + ndim*-5.7 + ndim**2 * -0.01
true_logz_hi = 3.1 + ndim*-5.7 + ndim**2 * 0.01
true_logz_lo = 3.85689+0.3732 + (-5.82502 - 0.139) * ndim + (0.00417525-0.01149) * ndim**2
true_logz_hi = 3.85689-0.3732 + (-5.82502 + 0.139) * ndim + (0.00417525+0.01149) * ndim**2
#true_logz_lo = 3.7 + ndim*-5.7 + ndim**2 * -0.01
#true_logz_hi = 3.7 + ndim*-5.7
problemname = 'rosenbrock'
elif 'multishell' in filename:
r = 0.2
w = 0.001 / ndim
r1, r2 = r, r
w1, w2 = w, w
Z_analytic = log(shell_vol(ndim, r1, w1) + shell_vol(ndim, r2, w2))
true_logz_lo, true_logz_hi = Z_analytic, Z_analytic
problemname = 'multishell'
elif 'asymgauss' in filename:
true_logz_lo, true_logz_hi = 0, 0
problemname = 'asymgauss'
elif 'loggamma' in filename:
true_logz_lo, true_logz_hi = 0, 0
problemname = 'loggamma'
else:
assert False, filename
plt.xscale('log')
plt.ylim(true_logz_lo-2*ndim, true_logz_hi+2*ndim)
xlo, xhi = plt.xlim()
plt.hlines([true_logz_lo, true_logz_hi], xlo, xhi, linestyles=':', color='k')
plt.fill_between([xlo, xhi], [true_logz_lo-ndim/3]*2, [true_logz_hi+ndim/3]*2, color='k', alpha=0.1)
plt.fill_between([xlo, xhi], [true_logz_lo-ndim/9]*2, [true_logz_hi+ndim/9]*2, color='k', alpha=0.1)
plt.xlim(xlo, xhi)
plt.legend(loc='best')
plt.xlabel('number of function calls')
plt.ylabel('$\Delta \log Z$')
print("writing to '%s_%dd_comparison.pdf'" % (problemname, ndim))
plt.savefig(problemname + '_%dd_comparison.pdf' % ndim, bbox_inches='tight')
plt.close()
| 3,067 | 32.714286 | 100 | py |
UltraNest | UltraNest-master/examples/evaluate_scaling_logz.py | import matplotlib.pyplot as plt
import numpy as np
from numpy import pi, exp, log
import sys
import json
import scipy.stats, scipy.special
methods = {}
for filename in sys.argv[1:]:
info = json.load(open(filename))
ncall = info['ncall']
logz = info['logz']
logzerr = info['logzerr']
ndim = len(info['paramnames'])
path = filename.replace('/info/results.json', '')
adaptation = ''
if '-adapt' in path:
path, adaptation = path.split('-adapt')
adaptation = '-' + adaptation
parts = path.split('-')
if len(parts) == 3:
method = parts[-1].strip('1234567890') + adaptation
else:
method = 'MLFriends'
method_data = methods.get(method, [])
method_data.append([ndim, logz, logzerr])
methods[method] = method_data
print(ncall, logz, path)
for method, data in sorted(methods.items()):
x, y, yerr = np.transpose(data)
i = np.argsort(x)
x, y, yerr = x[i], y[i], yerr[i]
if 'rosen' in filename:
y = y - (3.7 + x * -5.7)
print(method, x, y)
plt.errorbar(x=x, y=y, yerr=yerr, marker='o', mfc='w', label=method)
if 'rosen' in filename:
problemname = 'rosenbrock'
# x = np.arange(50)
# plt.plot(x, 3.7 + x*-5.7, '--', color='k')
# plt.plot(x, 3.7 + x*-5.9, '--', color='k')
# plt.plot(x, 3.7 + x*-5.5, '--', color='k')
elif 'multishell' in filename:
problemname = 'multishell'
elif 'asymgauss' in filename:
problemname = 'asymgauss'
elif 'loggamma' in filename:
problemname = 'loggamma'
else:
assert False, filename
#plt.xscale('log')
plt.legend(loc='best')
plt.ylabel('Number of model evaluations')
plt.xlabel('Dimensionality')
print("writing to '%s_scaling_logz.pdf'" % (problemname))
plt.savefig(problemname + '_scaling_logz.pdf', bbox_inches='tight')
plt.close()
| 1,820 | 27.453125 | 72 | py |
UltraNest | UltraNest-master/examples/testcorrpeak.py | # combination of:
# - varying sigmas
# - varying priors (uniform, log-uniform (every third))
# - tight parameter correlation
import argparse
import numpy as np
#from numpy import log
def main(args):
ndim = args.x_dim
sigmas = 10**(-2.0 + 2.0 * np.cos(np.arange(ndim)-2)) / (np.arange(ndim)-2)
sigmas[:2] = 1.0
def transform(x):
y = x #.copy()
#y[:,1::3] = 10**-y[:,1::3]
#y[:,::3] *= x[:,2::3]
return y
centers = transform(np.ones((1, ndim)) * 0.2).flatten()
degsigmas = 0.01
crosssigmas = args.sigma
# * sigmas[3:-1:] * sigmas[4::]
def loglike(theta):
# gaussian
like = -0.5 * (np.abs((theta[:,1:] - centers[1:])/sigmas[1:])**2).sum(axis=1)
# non-linear degeneracy correlation
like2 = -0.5 * (np.abs((theta[:,1] * theta[:,0] - centers[1] * centers[0])/degsigmas)**2) #.sum(axis=1)
# pair-wise correlation
a = (theta[:,3:-1:] - centers[3:-1:]) / sigmas[3:-1:]
b = (theta[:,4::] - centers[4::]) / sigmas[4::]
like3 = -0.5 * (np.abs((a - b) / crosssigmas)**2).sum(axis=1)
return like + like2 + like3
print(centers, crosssigmas, sigmas)
import string
paramnames = list(string.ascii_lowercase)[:ndim]
if args.pymultinest:
from pymultinest.solve import solve
import json
def flat_loglike(theta):
return loglike(theta.reshape((1, -1))).flatten()
def flat_transform(cube):
return transform(cube.reshape((1, -1))).flatten()
result = solve(LogLikelihood=flat_loglike, Prior=flat_transform,
n_dims=ndim, outputfiles_basename=args.log_dir + 'MN-%dd' % ndim,
verbose=True, resume=True, n_live_points=args.num_live_points,
importance_nested_sampling=False)
json.dump(paramnames, open(args.log_dir + 'MN-%ddparams.json' % ndim, 'w'))
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
elif args.reactive:
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=args.log_dir + 'RNS-%dd' % ndim,
vectorized=True)
sampler.run(frac_remain=0.5, min_ess=400, min_num_live_points=args.num_live_points)
sampler.print_results()
sampler.plot()
else:
from ultranest import NestedSampler
sampler = NestedSampler(paramnames, loglike, transform=transform,
num_live_points=args.num_live_points, vectorized=True,
log_dir=args.log_dir + '-%dd' % ndim)
sampler.run()
sampler.print_results()
sampler.plot()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--sigma', type=float, default=0.1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--log_dir', type=str, default='logs/corrpeak')
parser.add_argument('--reactive', action='store_true')
parser.add_argument('--pymultinest', action='store_true')
args = parser.parse_args()
main(args)
| 3,530 | 36.168421 | 111 | py |
UltraNest | UltraNest-master/ultranest/dyhmc.py | """Experimental constrained Hamiltanean Monte Carlo step sampling
Contrary to CHMC, this uses the likelihood gradients throughout the path.
A helper surface is created using the live points.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.special
import scipy.stats
def stop_criterion(thetaminus, thetaplus, rminus, rplus):
""" Compute the stop condition in the main loop
dot(dtheta, rminus) >= 0 & dot(dtheta, rplus >= 0)
Parameters
------
thetaminus: ndarray[float, ndim=1]
under position
thetaplus: ndarray[float, ndim=1]
above position
rminus: ndarray[float, ndim=1]
under momentum
rplus: ndarray[float, ndim=1]
above momentum
Returns
-------
criterion: bool
whether the condition is valid
"""
dtheta = thetaplus - thetaminus
return (np.dot(dtheta, rminus.T) >= 0) & (np.dot(dtheta, rplus.T) >= 0)
def leapfrog(theta, r, grad, epsilon, invmassmatrix, f):
"""Leap frog step from theta with momentum r and stepsize epsilon.
The local gradient grad is updated with function f"""
# make half step in r
rprime = r + 0.5 * epsilon * grad
# make new step in theta
thetaprime = theta + epsilon * np.dot(invmassmatrix, rprime)
# compute new gradient
(logpprime, gradprime), extra = f(thetaprime)
# make half step in r again
rprime = rprime + 0.5 * epsilon * gradprime
return thetaprime, rprime, gradprime, logpprime, extra
def build_tree(theta, r, grad, v, j, epsilon, invmassmatrix, f, joint0):
"""The main recursion."""
if j == 0:
# Base case: Take a single leapfrog step in the direction v.
thetaprime, rprime, gradprime, logpprime, extraprime = leapfrog(theta, r, grad, v * epsilon, invmassmatrix, f)
joint = logpprime - 0.5 * np.dot(np.dot(rprime, invmassmatrix), rprime.T)
# Is the simulation wildly inaccurate?
sprime = joint0 - 1000. < joint # and logpprime > Lmin
# Set the return values---minus=plus for all things here, since the
# "tree" is of depth 0.
thetaminus = thetaprime[:]
thetaplus = thetaprime[:]
rminus = rprime[:]
rplus = rprime[:]
r = rprime[:]
gradminus = gradprime[:]
gradplus = gradprime[:]
# Compute the acceptance probability.
if not sprime:
# print("stopped trajectory:", joint0, joint, logpprime, gradprime)
alphaprime = 0.0
else:
alphaprime = min(1., np.exp(joint - joint0))
if logpprime < -300:
# if alphaprime > 0:
# print("stopping at very low probability:", joint0, joint, logpprime, gradprime)
betaprime = 0.0
else:
betaprime = alphaprime * np.exp(-logpprime)
if betaprime == 0.0:
sprime = False
nalphaprime = 1
else:
# Recursion: Implicitly build the height j-1 left and right subtrees.
thetaminus, rminus, gradminus, thetaplus, rplus, gradplus, \
thetaprime, gradprime, logpprime, extraprime, rprime, sprime, \
alphaprime, betaprime, nalphaprime = build_tree(
theta, r, grad, v, j - 1, epsilon, invmassmatrix, f, joint0)
# No need to keep going if the stopping criteria were met in the first subtree.
if sprime:
if v == -1:
thetaminus, rminus, gradminus, _, _, _, \
thetaprime2, gradprime2, logpprime2, extraprime2, \
rprime2, sprime2, alphaprime2, betaprime2, nalphaprime2 = build_tree(
thetaminus, rminus, gradminus, v, j - 1, epsilon, invmassmatrix, f, joint0)
else:
_, _, _, thetaplus, rplus, gradplus, \
thetaprime2, gradprime2, logpprime2, extraprime2, \
rprime2, sprime2, alphaprime2, betaprime2, nalphaprime2 = build_tree(
thetaplus, rplus, gradplus, v, j - 1, epsilon, invmassmatrix, f, joint0)
# Choose which subtree to propagate a sample up from.
if betaprime + betaprime2 > 0 and np.random.uniform() < betaprime2 / (betaprime + betaprime2):
thetaprime = thetaprime2[:]
gradprime = gradprime2[:]
logpprime = logpprime2
extraprime = extraprime2
rprime = rprime2
# Update the stopping criterion.
sturn = stop_criterion(thetaminus, thetaplus, rminus, rplus)
# print(sprime, sprime2, sturn)
sprime = sprime and sprime2 and sturn
# Update the acceptance probability statistics.
alphaprime += alphaprime2
betaprime += betaprime2
nalphaprime += nalphaprime2
return thetaminus, rminus, gradminus, thetaplus, rplus, gradplus, \
thetaprime, gradprime, logpprime, extraprime, \
rprime, sprime, alphaprime, betaprime, nalphaprime
def tree_sample(theta, logp, r0, grad, extra, epsilon, invmassmatrix, f, joint, maxheight=np.inf):
"""Build NUTS-like tree of sampling path from theta towards p with stepsize epsilon."""
# initialize the tree
thetaminus = theta
thetaplus = theta
rminus = r0[:]
rplus = r0[:]
gradminus = grad[:]
gradplus = grad[:]
alpha = 1
beta = 1
nalpha = 1
j = 0 # initial heigth j = 0
s = True # Main loop: will keep going until s == 0.
while s and j < maxheight:
# Choose a direction. -1 = backwards, 1 = forwards.
v = int(2 * (np.random.uniform() < 0.5) - 1)
# Double the size of the tree.
if v == -1:
thetaminus, rminus, gradminus, _, _, _, thetaprime, gradprime, \
logpprime, extraprime, rprime, sprime, \
alphaprime, betaprime, nalphaprime = build_tree(
thetaminus, rminus, gradminus, v, j, epsilon, invmassmatrix, f, joint)
else:
_, _, _, thetaplus, rplus, gradplus, thetaprime, gradprime, \
logpprime, extraprime, rprime, sprime, \
alphaprime, betaprime, nalphaprime = build_tree(
thetaplus, rplus, gradplus, v, j, epsilon, invmassmatrix, f, joint)
assert beta > 0, beta
assert betaprime >= 0, betaprime
# Use Metropolis-Hastings to decide whether or not to move to a
# point from the half-tree we just generated.
if sprime and np.random.uniform() < betaprime / (beta + betaprime):
logp = logpprime
grad = gradprime[:]
theta = thetaprime
extra = extraprime
r0 = rprime
# print("accepting", theta, logp)
alpha += alphaprime
beta += betaprime
nalpha += nalphaprime
# Decide if it's time to stop.
sturn = stop_criterion(thetaminus, thetaplus, rminus, rplus)
# print(sprime, sturn)
s = sprime and sturn
# Increment depth.
j += 1
# print("jumping to:", theta)
# print('Tree height: %d, acceptance fraction: %03.2f%%/%03.2f%%, epsilon=%g' % (j, alpha/nalpha*100, beta/nalpha*100, epsilon))
return alpha, beta, nalpha, theta, grad, logp, extra, r0, j
def find_beta_params_static(d, u10):
""" Define auxiliary distribution following naive intuition.
Make 50% quantile to be at u=0.1, and very flat at high u. """
del d
betas = np.arange(1, 20)
z50 = scipy.special.betaincinv(1.0, betas, 0.5)
alpha = 1
beta = np.interp(u10, z50[::-1], betas[::-1])
print("Auxiliary Beta distribution(alpha=%.1f, beta=%.1f)" % (alpha, beta))
return alpha, beta
def find_beta_params_dynamic(d, u10):
""" Define auxiliary distribution taking into account
kinetic energy of a d-dimensional HMC.
Make exp(-d/2) quantile to be at u=0.1, and 95% quantile at u=0.5. """
u50 = (u10 + 1) / 2.
def minfunc(params):
""" minimization function """
alpha, beta = params
q10 = scipy.special.betainc(alpha, beta, u10)
q50 = scipy.special.betainc(alpha, beta, u50)
return (q10 - np.exp(-d / 2))**2 + (q50 - 0.98)**2
r = scipy.optimize.minimize(minfunc, [1.0, 10.0])
alpha, beta = r.x
print("Auxiliary Beta distribution(alpha=%.1f, beta=%.1f)" % (alpha, beta), u10)
return alpha, beta
def generate_momentum_normal(d, massmatrix):
""" draw direction vector according to mass matrix """
return np.random.multivariate_normal(np.zeros(d), np.dot(massmatrix, np.eye(d)))
def generate_momentum(d, massmatrix, alpha, beta):
""" draw momentum from a circle, with amplitude following the beta distribution """
momentum = np.random.multivariate_normal(np.zeros(d), np.dot(massmatrix, np.eye(d)))
# generate normalisation from beta distribution
# add a bit of noise in the step size
# norm *= np.uniform(0.2, 2)
betainc = scipy.special.betainc
auxnorm = -betainc(alpha + 1, beta, 1) + betainc(alpha + 1, beta, 0) + betainc(alpha, beta, 1)
u = np.random.uniform()
if u > 0.9:
norm = 1.
else:
u /= 0.9
norm = betainc(alpha, beta, u)
momnorm = -np.log((norm + 1e-10) / auxnorm)
assert momnorm >= 0, (momnorm, norm, auxnorm)
momentum *= momnorm / (momentum**2).sum()**0.5
return momentum
def generate_momentum_circle(d, massmatrix):
""" draw from a circle, with a little noise in amplitude """
momentum = np.random.multivariate_normal(np.zeros(d), np.dot(massmatrix, np.eye(d)))
momentum *= 10**np.random.uniform(-0.3, 0.3) / (momentum**2).sum()**0.5
return momentum
def generate_momentum_flattened(d, massmatrix):
""" like normal distribution, but make momenta distributed like a single gaussian.
**this is the one being used** """
momentum = np.random.multivariate_normal(np.zeros(d), np.dot(massmatrix, np.eye(d)))
norm = (momentum**2).sum()**0.5
assert norm > 0
momentum *= norm**(1 / d) / norm
return momentum
class FlattenedProblem(object):
"""
Creates a suitable auxiliary distribution from samples of likelihood values
The distribution is the CDF of a beta distribution, with
0 -> logLmin
1 -> 90% quantile of logLs
0.5 -> 10% quantile of logLs
.modify_Lgrad() returns the conversion from logL, grad to the
equivalents on the auxiliary distribution.
.__call__(x) returns logL, grad on the auxiliary distribution.
"""
def __init__(self, d, Ls, function, layer):
self.Lmin = Ls.min()
self.L90 = np.percentile(Ls, 90)
self.L10 = np.percentile(Ls, 10)
u10 = (self.L10 - self.Lmin) / (self.L90 - self.Lmin)
self.function = function
self.layer = layer
# self.alpha, self.beta = find_beta_params_static(d, u10)
# self.alpha, self.beta = find_beta_params_dynamic(d, u10)
self.alpha, self.beta = 1.0, 6.0
self.du_dL = 1 / (self.L90 - self.Lmin)
# print("du/dL = %g " % du_dL)
self.C = scipy.special.beta(self.alpha, self.beta)
self.d = d
if hasattr(self.layer, 'invT'):
self.invmassmatrix = self.layer.cov
self.massmatrix = np.linalg.inv(self.invmassmatrix)
# print("invM:", self.invmassmatrix.shape)
elif hasattr(self.layer, 'std'):
if np.shape(self.layer.std) == () and self.layer.std == 1:
self.massmatrix = 1
self.invmassmatrix = 1
else:
# invmassmatrix: covariance
self.invmassmatrix = np.diag(self.layer.std[0]**2)
self.massmatrix = np.diag(self.layer.std[0]**-2)
print(self.invmassmatrix.shape, self.layer.std)
else:
assert False
def modify_Lgrad(self, L, grad):
u = (L - self.Lmin) / (self.L90 - self.Lmin)
if u <= 0:
logp = -np.inf
u = 0.0
dlogp_du = 1.0
# print("L <= Lmin", L, self.Lmin)
elif u > 1:
u = 1.0
p = 1.0
logp = 0.0
# print("L > L90", L, L90)
return logp, 0 * grad
else:
# p = self.rv.cdf(u)
p = scipy.special.betainc(self.alpha, self.beta, u)
logp = np.log(p)
B = p * self.C
dlogp_du = u**(self.alpha - 1) * (1 - u)**(self.beta - 1) / B
# convert gradient to flattened space
tgrad = grad * dlogp_du * self.du_dL
return logp, tgrad
def __call__(self, u):
if not np.logical_and(u > 0, u < 1).all():
# outside unit cube, invalid.
# print("outside", u)
return (-np.inf, 0. * u), (None, -np.inf, 0. * u)
p, L, grad_orig = self.function(u)
# print("at ", u, "L:", L)
return self.modify_Lgrad(L, grad_orig), (p, L, grad_orig)
def generate_momentum(self):
return generate_momentum_flattened(self.d, self.massmatrix)
return generate_momentum_normal(self.d, self.massmatrix)
return generate_momentum(self.d, self.massmatrix, self.alpha, self.beta)
class DynamicHMCSampler(object):
"""Dynamic Hamiltonian/Hybrid Monte Carlo technique
Typically, HMC operates on the posterior. It has the benefit
of producing "orbit" trajectories, that can follow the guidance
of gradients.
In nested sampling, we need to sample the prior subject to the
likelihood constraint. This means a HMC would most of the time
go in straight lines, until it steps outside the boundary.
Techniques such as Constrained HMC and Galilean MC use the
gradient outside to select the reflection direction.
However, it would be beneficial to be repelled by the likelihood
boundary, and to take advantage of gradient guidance.
This implements a new technique that does this.
The trick is to define a auxiliary distribution from the likelihood,
generate HMC trajectories from it, and draw points from the
trajectory with inverse the probability of the auxiliary distribution
to sample from the prior. Thus, the auxiliary distribution should be
mostly flat, and go to zero at the boundaries to repell the HMC.
Given Lmin and Lmax from the live points,
use a beta approximation of log-likelihood
p=1 if L>Lmin
u = (L - Lmin) / (Lmax - Lmin)
p = Beta_PDF(u; alpha, beta)
then define
d log(p) / dx = dlog(p_orig)/dlog(p) * dlog(p_orig) / dx
new gradient = conversion * original gradient
with conversion
dlogp/du = 0 if u>1; otherwise:
dlogp/du = u**(1-alpha) * (1-u)**(1-beta) / Ic(u; alpha, beta) / Beta_PDF(u, alpha, beta)
du/dL = 1 / (Lmax - Lmin)
The beta distribution achieves:
* a flattening of the loglikelihood to avoid seeing only "walls"
* using the gradient to identify how to orbit the likelihood contour
* at higher, unseen likelihoods, the exploration is in straight lines
* trajectory do not have the energy to go below Lmin.
* alpha and beta parameters allow flexible choice of "contour avoidance"
Run HMC trajectory on p
This will draw samples proportional to p
Modify multinomial acceptance by 1/p to get uniform samples.
and reject porig < p_1
The remaining choices for HMC are how long the trajectories should
run (number of steps) and the step size. The former is solved
by No-U-Turn Sampler or dynamic HMC, which randomly build
forward and backward paths until the trajectory turns around.
Then, a random point from the trajectory is chosen.
The step size is chosen by targeting an acceptance rate of
delta~0.95, and decreasing(increasing) every time the region is
rebuilt if the acceptance rate is below(above).
"""
def __init__(self, ndim, nsteps, transform_loglike_gradient, delta=0.90, nudge=1.04):
"""Initialise sampler.
Parameters
-----------
nsteps: int
number of accepted steps until the sample is considered independent.
transform_loglike_gradient: function
called with unit cube position vector u, returns
transformed parameter vector p,
loglikelihood and gradient (dlogL/du, not just dlogL/dp)
"""
self.history = []
self.nsteps = nsteps
self.nrejects = 0
self.scale = 0.1 * ndim**0.5
self.last = None, None, None, None
self.transform_loglike_gradient = transform_loglike_gradient
self.nudge = nudge
self.delta = delta
self.problem = None
self.logstat = []
self.logstat_labels = ['acceptance_rate', 'acceptance_rate_bias', 'stepsize', 'treeheight']
self.logstat_trajectory = []
def __str__(self):
"""Get string representation."""
return type(self).__name__ + '(nsteps=%d)' % self.nsteps
def plot(self, filename):
"""Plot sampler statistics."""
if len(self.logstat) == 0:
return
parts = np.transpose(self.logstat)
plt.figure(figsize=(10, 1 + 3 * len(parts)))
for i, (label, part) in enumerate(zip(self.logstat_labels, parts)):
plt.subplot(len(parts), 1, 1 + i)
plt.ylabel(label)
plt.plot(part)
x = []
y = []
for j in range(0, len(part), 20):
x.append(j)
y.append(part[j:j + 20].mean())
plt.plot(x, y)
if np.min(part) > 0:
plt.yscale('log')
plt.savefig(filename, bbox_inches='tight')
plt.close()
def __next__(self, region, Lmin, us, Ls, transform, loglike, ndraw=40, plot=False):
"""Get a new point.
Parameters
----------
region: MLFriends
region.
Lmin: float
loglikelihood threshold
us: array of vectors
current live points
Ls: array of floats
current live point likelihoods
transform: function
transform function
loglike: function
loglikelihood function
ndraw: int
number of draws to attempt simultaneously.
plot: bool
whether to produce debug plots.
"""
# i = np.argsort(Ls)[0]
mask = Ls > Lmin
i = np.random.randint(mask.sum())
# print("starting from live point %d" % i)
self.starti = np.where(mask)[0][i]
ui = us[mask,:][i]
assert np.logical_and(ui > 0, ui < 1).all(), ui
if self.problem is None:
self.create_problem(Ls, region)
ncalls_total = 1
(Lflat, gradflat), (pi, Li, gradi) = self.problem(ui)
assert np.shape(Lflat) == (), (Lflat, Li, gradi)
assert np.shape(gradflat) == (len(ui),), (gradi, gradflat)
nsteps_remaining = self.nsteps
while nsteps_remaining > 0:
unew, pnew, Lnew, gradnew, Lflatnew, gradflatnew, nc, alpha, beta, treeheight = self.move(
ui, pi, Li, gradi, gradflat=gradflat, Lflat=Lflat, region=region, ndraw=ndraw, plot=plot)
if treeheight > 1:
# do not count failed accepts
nsteps_remaining = nsteps_remaining - 1
else:
print("stuck:", Li, "->", Lnew, "Lmin:", Lmin)
ncalls_total += nc
# print(" ->", Li, Lnew, unew)
assert np.logical_and(unew > 0, unew < 1).all(), unew
if plot:
plt.plot([ui[0], unew[:,0]], [ui[1], unew[:,1]], '-', color='k', lw=0.5)
plt.plot(ui[0], ui[1], 'd', color='r', ms=4)
plt.plot(unew[:,0], unew[:,1], 'x', color='r', ms=4)
ui, pi, Li, gradi, Lflat, gradflat = unew, pnew, Lnew, gradnew, Lflatnew, gradflatnew
self.logstat_trajectory.append([alpha, beta, treeheight])
self.adjust_stepsize()
return unew, pnew, Lnew, nc
def move(self, ui, pi, Li, gradi, region, ndraw=1, Lflat=None, gradflat=None, plot=False):
"""Move from position ui, Li, gradi with a HMC trajectory.
Return
------
unew: vector
new position in cube space
pnew: vector
new position in physical parameter space
Lnew: float
new likelihood
gradnew: vector
new gradient
Lflat: float
new likelihood on auxiliary distribution
gradflat: vector
new gradient on auxiliary distribution
nc: int
number of likelihood evaluations
alpha: float
acceptance rate of HMC trajectory
beta: float
acceptance rate of inverse-beta-biased HMC trajectory
"""
epsilon = self.scale
# epsilon_here = 10**np.random.normal(0, 0.3) * epsilon
epsilon_here = np.random.uniform() * epsilon
# epsilon_here = epsilon
problem = self.problem
d = len(ui)
assert Li > problem.Lmin
# get initial likelihood and gradient from auxiliary distribution
if Lflat is None or gradflat is None:
Lflat, gradflat = problem.modify_Lgrad(Li, gradi)
assert np.shape(Lflat) == (), (Lflat, Li, gradi)
assert np.shape(gradflat) == (d,), (gradi, gradflat)
# draw from momentum
momentum = problem.generate_momentum()
# compute current Hamiltonian
joint0 = Lflat - 0.5 * np.dot(np.dot(momentum, problem.invmassmatrix), momentum.T)
assert np.isfinite(joint0), (
Lflat, momentum, -0.5 * np.dot(np.dot(momentum, problem.invmassmatrix), momentum.T))
# explore and sample from one trajectory
alpha, beta, nalpha, theta, gradflat, Lflat, (pnew, Lnew, gradnew), rprime, treeheight = tree_sample(
ui, Lflat, momentum, gradflat, (pi, Li, gradi), epsilon_here, problem.invmassmatrix, problem, joint0, maxheight=30)
return theta, pnew, Lnew, gradnew, Lflat, gradflat, nalpha, alpha / nalpha, beta / nalpha, treeheight
def create_problem(self, Ls, region):
""" Set up auxiliary distribution.
Parameters
----------
Ls: array of floats
live point likelihoods
region: MLFriends region object
region.transformLayer is used to obtain mass matrices
"""
# problem dimensionality
d = len(region.u[0])
self.problem = FlattenedProblem(d, Ls, self.transform_loglike_gradient, region.transformLayer)
def adjust_stepsize(self):
if len(self.logstat_trajectory) == 0:
return
# log averaged acceptance and trajectory statistics
self.logstat.append([
np.mean([alpha for alpha, beta, treeheight in self.logstat_trajectory]),
float(self.scale),
np.mean([beta for alpha, beta, treeheight in self.logstat_trajectory]),
np.mean([treeheight for alpha, beta, treeheight in self.logstat_trajectory])
])
nsteps = len(self.logstat_trajectory)
# update step size based on collected acceptance rates
if any([treeheight <= 1 for alpha, beta, treeheight in self.logstat_trajectory]):
# stuck, no move. Finer steps needed.
self.scale /= self.nudge
elif all([2**treeheight > 10 for alpha, beta, treeheight in self.logstat_trajectory]):
# slowly go towards more efficiency
self.scale *= self.nudge**(1. / 40)
else:
alphamean, scale, betamean, treeheightmean = self.logstat[-1]
if alphamean < self.delta:
self.scale /= self.nudge
elif alphamean > self.delta:
self.scale *= self.nudge
self.logstat_trajectory = []
print("updating step size: %.4f %g %.4f %.1f" % tuple(self.logstat[-1]), "-->", self.scale)
def region_changed(self, Ls, region):
self.adjust_stepsize()
self.create_problem(Ls, region)
| 24,062 | 36.539782 | 132 | py |
UltraNest | UltraNest-master/ultranest/popstepsampler.py | """
Vectorized step samplers
------------------------
Likelihood based on GPUs (model emulators based on neural networks,
or JAX implementations) can evaluate hundreds of points as efficiently
as one point. The implementations in this module leverage this power,
by providing random walks of populations of walkers.
"""
import numpy as np
from ultranest.utils import submasks
from ultranest.stepfuncs import evolve, step_back
from ultranest.stepfuncs import generate_cube_oriented_direction, generate_cube_oriented_direction_scaled
from ultranest.stepfuncs import generate_random_direction, generate_region_oriented_direction, generate_region_random_direction
import scipy.stats
def unitcube_line_intersection(ray_origin, ray_direction):
r"""Compute intersection of a line (ray) and a unit box (0:1 in all axes).
Based on
http://www.iquilezles.org/www/articles/intersectors/intersectors.htm
Parameters
-----------
ray_origin: array of vectors
starting point of line
ray_direction: vector
line direction vector
Returns
--------
tleft: array
negative intersection point distance from ray\_origin in units in ray\_direction
tright: array
positive intersection point distance from ray\_origin in units in ray\_direction
"""
# make sure ray starts inside the box
assert (ray_origin >= 0).all(), ray_origin
assert (ray_origin <= 1).all(), ray_origin
assert ((ray_direction**2).sum()**0.5 > 1e-200).all(), ray_direction
# step size
with np.errstate(divide='ignore', invalid='ignore'):
m = 1. / ray_direction
n = m * (ray_origin - 0.5)
k = np.abs(m) * 0.5
# line coordinates of intersection
# find first intersecting coordinate
t1 = -n - k
t2 = -n + k
return np.nanmax(t1, axis=1), np.nanmin(t2, axis=1)
class PopulationRandomWalkSampler():
"""Vectorized Gaussian Random Walk sampler."""
def __init__(
self, popsize, nsteps, generate_direction, scale,
scale_adapt_factor=0.9, scale_min=1e-20, scale_max=20, log=False, logfile=None
):
"""Initialise.
Parameters
----------
popsize: int
number of walkers to maintain.
this should be fairly large (~100), if too large you probably get memory issues
Also, some results have to be discarded as the likelihood threshold increases.
Observe the nested sampling efficiency.
nsteps: int
number of steps to take until the found point is accepted as independent.
To calibrate, try several runs with increasing nsteps (doubling).
The ln(Z) should become stable at some value.
generate_direction: function
Function that gives proposal kernel shape, one of:
:py:func:`ultranest.popstepsampler.generate_cube_oriented_direction`
:py:func:`ultranest.popstepsampler.generate_cube_oriented_direction_scaled`
:py:func:`ultranest.popstepsampler.generate_random_direction`
:py:func:`ultranest.popstepsampler.generate_region_oriented_direction`
:py:func:`ultranest.popstepsampler.generate_region_random_direction`
scale: float
initial guess for the proposal scaling factor
scale_adapt_factor: float
if 1, no adapting is done.
if <1, the scale is increased if the acceptance rate is below 23.4%,
or decreased if it is above, by *scale_adapt_factor*.
scale_min: float
lowest value allowed for scale, do not adapt down further
scale_max: float
highest value allowed for scale, do not adapt up further
logfile: file
where to print the current scaling factor and acceptance rate
"""
self.nsteps = nsteps
self.nrejects = 0
self.scale = scale
self.ncalls = 0
assert scale_adapt_factor <= 1
self.scale_adapt_factor = scale_adapt_factor
self.scale_min = scale_min
self.scale_max = scale_max
self.log = log
self.logfile = logfile
self.prepared_samples = []
self.popsize = popsize
self.generate_direction = generate_direction
def __str__(self):
"""Return string representation."""
return 'PopulationRandomWalkSampler(popsize=%d, nsteps=%d, generate_direction=%s, scale=%.g)' % (
self.popsize, self.nsteps, self.generate_direction, self.scale)
def region_changed(self, Ls, region):
"""Act upon region changed. Currently unused."""
pass
def __next__(
self, region, Lmin, us, Ls, transform, loglike, ndraw=10,
plot=False, tregion=None, log=False
):
"""Sample a new live point.
Parameters
----------
region: MLFriends object
Region
Lmin: float
current log-likelihood threshold
us: np.array((nlive, ndim))
live points
Ls: np.array(nlive)
loglikelihoods live points
transform: function
prior transform function
loglike: function
loglikelihood function
ndraw: int
not used
plot: bool
not used
tregion: bool
not used
log: bool
not used
Returns
-------
u: np.array(ndim) or None
new point coordinates (None if not yet available)
p: np.array(nparams) or None
new point transformed coordinates (None if not yet available)
L: float or None
new point likelihood (None if not yet available)
nc: int
"""
nlive, ndim = us.shape
# fill if empty:
if len(self.prepared_samples) == 0:
# choose live points
ilive = np.random.randint(0, nlive, size=self.popsize)
allu = us[ilive,:]
allp = None
allL = Ls[ilive]
nc = self.nsteps * self.popsize
nrejects_expected = self.nrejects + self.nsteps * self.popsize * (1 - 0.234)
for i in range(self.nsteps):
# perturb walker population
v = self.generate_direction(allu, region, self.scale)
# compute intersection of u + t * v with unit cube
tleft, tright = unitcube_line_intersection(allu, v)
proposed_t = scipy.stats.truncnorm.rvs(tleft, tright, loc=0, scale=1).reshape((-1, 1))
proposed_u = allu + v * proposed_t
mask_outside = ~np.logical_and(proposed_u > 0, proposed_u < 1).all(axis=1)
assert not mask_outside.any(), proposed_u[mask_outside, :]
proposed_p = transform(proposed_u)
# accept if likelihood threshold exceeded
proposed_L = loglike(proposed_p)
mask_accept = proposed_L > Lmin
self.nrejects += (~mask_accept).sum()
allu[mask_accept,:] = proposed_u[mask_accept,:]
if allp is None:
del allp
allp = proposed_p * np.nan
allp[mask_accept,:] = proposed_p[mask_accept,:]
allL[mask_accept] = proposed_L[mask_accept]
assert np.isfinite(allp).all(), 'some walkers never moved! Double nsteps of PopulationRandomWalkSampler.'
self.prepared_samples = list(zip(allu, allp, allL))
# adapt slightly
if self.logfile:
self.logfile.write("rescale\t%.4f\t%.4f\t%g\n" % (
mask_accept.mean() * 100,
100 - (self.nrejects - (nrejects_expected - self.nsteps * self.popsize * (1 - 0.234))) * 100. / (self.nsteps * self.popsize),
self.scale))
if self.nrejects > nrejects_expected and self.scale > self.scale_min:
# lots of rejects, decrease scale
self.scale *= self.scale_adapt_factor
elif self.nrejects < nrejects_expected and self.scale < self.scale_max:
self.scale /= self.scale_adapt_factor
else:
nc = 0
u, p, L = self.prepared_samples.pop(0)
return u, p, L, nc
class PopulationSliceSampler():
"""Vectorized slice/HARM sampler.
Can revert until all previous steps have likelihoods allL above Lmin.
Updates currentt, generation and allL, in-place.
"""
def __init__(
self, popsize, nsteps, generate_direction, scale=1.0,
scale_adapt_factor=0.9, log=False, logfile=None
):
"""Initialise.
Parameters
----------
popsize: int
number of walkers to maintain
nsteps: int
number of steps to take until the found point is accepted as independent.
generate_direction: function `(u, region, scale) -> v`
function such as `generate_unit_directions`, which
generates a random slice direction.
scale: float
initial guess scale for the length of the slice
scale_adapt_factor: float
smoothing factor for updating scale.
if near 1, scale is barely updating, if near 0,
the last slice length is used as a initial guess for the next.
"""
self.nsteps = nsteps
self.nrejects = 0
self.scale = scale
self.scale_adapt_factor = scale_adapt_factor
self.allu = []
self.allL = []
self.currentt = []
self.currentv = []
self.currentp = []
self.generation = []
self.current_left = []
self.current_right = []
self.searching_left = []
self.searching_right = []
self.ringindex = 0
self.log = log
self.logfile = logfile
self.popsize = popsize
self.generate_direction = generate_direction
def __str__(self):
"""Return string representation."""
return 'PopulationSliceSampler(popsize=%d, nsteps=%d, generate_direction=%s, scale=%.g)' % (
self.popsize, self.nsteps, self.generate_direction, self.scale)
def region_changed(self, Ls, region):
"""Act upon region changed. Currently unused."""
# self.scale = region.us.std(axis=1).mean()
if self.logfile:
self.logfile.write("region-update\t%g\t%g\n" % (self.scale, region.us.std(axis=1).mean()))
def _setup(self, ndim):
"""Allocate arrays."""
self.allu = np.zeros((self.popsize, self.nsteps + 1, ndim)) + np.nan
self.allL = np.zeros((self.popsize, self.nsteps + 1)) + np.nan
self.currentt = np.zeros(self.popsize) + np.nan
self.currentv = np.zeros((self.popsize, ndim)) + np.nan
self.generation = np.zeros(self.popsize, dtype=int) - 1
self.current_left = np.zeros(self.popsize)
self.current_right = np.zeros(self.popsize)
self.searching_left = np.zeros(self.popsize, dtype=bool)
self.searching_right = np.zeros(self.popsize, dtype=bool)
def setup_start(self, us, Ls, starting):
"""Initialize walker starting points.
For iteration zero, randomly selects a live point as starting point.
Parameters
----------
us: np.array((nlive, ndim))
live points
Ls: np.array(nlive)
loglikelihoods live points
starting: np.array(nwalkers, dtype=bool)
which walkers to initialize.
"""
if self.log:
print("setting up:", starting)
nlive = len(us)
i = np.random.randint(nlive, size=starting.sum())
if not starting.all():
while starting[self.ringindex]:
# if the one we are waiting for is being restarted,
# we may as well pick the next one to wait for
# because every other one is started from a random point
# as well
self.shift()
self.allu[starting,0] = us[i]
self.allL[starting,0] = Ls[i]
self.generation[starting] = 0
@property
def status(self):
"""Return compact string representation of the current status."""
s1 = ('G:' + ''.join(['%d' % g if g >= 0 else '_' for g in self.generation]))
s2 = ('S:' + ''.join([
'S' if not np.isfinite(self.currentt[i]) else 'L' if self.searching_left[i] else 'R' if self.searching_right[i] else 'B'
for i in range(self.popsize)]))
return s1 + ' ' + s2
def setup_brackets(self, mask_starting, region):
"""Pick starting direction and range for slice.
Parameters
----------
region: MLFriends object
Region
mask_starting: np.array(nwalkers, dtype=bool)
which walkers to set up.
"""
if self.log:
print("starting brackets:", mask_starting)
i_starting, = np.where(mask_starting)
self.current_left[i_starting] = -self.scale
self.current_right[i_starting] = self.scale
self.searching_left[i_starting] = True
self.searching_right[i_starting] = True
self.currentt[i_starting] = 0
# choose direction for new slice
self.currentv[i_starting,:] = self.generate_direction(
self.allu[i_starting, self.generation[i_starting]],
region)
def _setup_currentp(self, nparams):
if self.log:
print("setting currentp")
self.currentp = np.zeros((self.popsize, nparams)) + np.nan
def advance(self, transform, loglike, Lmin):
"""Advance the walker population.
Parameters
----------
transform: function
prior transform function
loglike: function
loglikelihood function
Lmin: float
current log-likelihood threshold
"""
movable = self.generation < self.nsteps
all_movable = movable.all()
# print("moving ", movable.sum(), self.popsize)
if all_movable:
i = np.arange(self.popsize)
args = [
self.allu[i, self.generation],
self.allL[i, self.generation],
# pass values directly
self.currentt,
self.currentv,
self.current_left,
self.current_right,
self.searching_left,
self.searching_right
]
del i
else:
args = [
self.allu[movable, self.generation[movable]],
self.allL[movable, self.generation[movable]],
# this makes copies
self.currentt[movable],
self.currentv[movable],
self.current_left[movable],
self.current_right[movable],
self.searching_left[movable],
self.searching_right[movable]
]
if self.log:
print("evolve will advance:", movable)
(
(
currentt, currentv,
current_left, current_right, searching_left, searching_right
),
(success, unew, pnew, Lnew),
nc
) = evolve(transform, loglike, Lmin, *args)
if self.log:
print("movable", movable.shape, movable.sum(), success.shape)
moved = submasks(movable, success)
if self.log:
print("evolve moved:", moved)
self.generation[moved] += 1
if len(pnew) > 0:
if len(self.currentp) == 0:
self._setup_currentp(nparams=pnew.shape[1])
if self.log:
print("currentp", self.currentp[moved,:].shape, pnew.shape)
self.currentp[moved,:] = pnew
# update with what we learned
# print(currentu.shape, currentL.shape, success.shape, self.generation[movable])
self.allu[moved, self.generation[moved]] = unew
self.allL[moved, self.generation[moved]] = Lnew
if all_movable:
# in this case, the values were directly overwritten
pass
else:
self.currentt[movable] = currentt
self.currentv[movable] = currentv
self.current_left[movable] = current_left
self.current_right[movable] = current_right
self.searching_left[movable] = searching_left
self.searching_right[movable] = searching_right
return nc
def shift(self):
"""Update walker from which to pick next."""
# this is a ring buffer
# shift index forward, wrapping around
# this is better than copying memory around when a element is removed
self.ringindex = (self.ringindex + 1) % self.popsize
def __next__(
self, region, Lmin, us, Ls, transform, loglike, ndraw=10,
plot=False, tregion=None, log=False
):
"""Sample a new live point.
Parameters
----------
region: MLFriends object
Region
Lmin: float
current log-likelihood threshold
us: np.array((nlive, ndim))
live points
Ls: np.array(nlive)
loglikelihoods live points
transform: function
prior transform function
loglike: function
loglikelihood function
ndraw: int
not used
plot: bool
not used
tregion: bool
not used
log: bool
not used
Returns
-------
u: np.array(ndim) or None
new point coordinates (None if not yet available)
p: np.array(nparams) or None
new point transformed coordinates (None if not yet available)
L: float or None
new point likelihood (None if not yet available)
nc: int
"""
nlive, ndim = us.shape
# initialize
if len(self.allu) == 0:
self._setup(ndim)
step_back(Lmin, self.allL, self.generation, self.currentt)
starting = self.generation < 0
if starting.any():
self.setup_start(us[Ls > Lmin], Ls[Ls > Lmin], starting)
assert (self.generation >= 0).all(), self.generation
# find those where bracket is undefined:
mask_starting = ~np.isfinite(self.currentt)
if mask_starting.any():
self.setup_brackets(mask_starting, region)
if self.log:
print(str(self), "(before)")
nc = self.advance(transform, loglike, Lmin)
if self.log:
print(str(self), "(after)")
# harvest top individual if possible
if self.generation[self.ringindex] == self.nsteps:
if self.log:
print("have a candidate")
u, p, L = self.allu[self.ringindex, self.nsteps, :].copy(), self.currentp[self.ringindex, :].copy(), self.allL[self.ringindex, self.nsteps].copy()
assert np.isfinite(u).all(), u
assert np.isfinite(p).all(), p
self.generation[self.ringindex] = -1
self.currentt[self.ringindex] = np.nan
self.allu[self.ringindex,:,:] = np.nan
self.allL[self.ringindex,:] = np.nan
# adjust guess length
newscale = (self.current_right[self.ringindex] - self.current_left[self.ringindex]) / 2
self.scale = self.scale * 0.9 + 0.1 * newscale
self.shift()
return u, p, L, nc
else:
return None, None, None, nc
__all__ = [
"generate_cube_oriented_direction", "generate_cube_oriented_direction_scaled",
"generate_random_direction", "generate_region_oriented_direction", "generate_region_random_direction",
"PopulationRandomWalkSampler", "PopulationSliceSampler"]
| 19,858 | 35.572744 | 158 | py |
UltraNest | UltraNest-master/ultranest/netiter.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = """
Graph-based nested sampling
---------------------------
A formulation of nested sampling exploration as a tree, presented in
section 3.4 of Buchner (2023, https://arxiv.org/abs/2101.09675).
The root represents the prior volume, branches and sub-branches split the volume.
The leaves of the tree are the integration tail.
Nested sampling proceeds as a breadth first graph search,
with active nodes sorted by likelihood value.
The number of live points are the number of parallel edges (active nodes to do).
Most functions receive the argument "roots", which are the
children of the tree root (main branches).
The exploration is bootstrap-capable without requiring additional
computational effort: The roots are indexed, and the bootstrap explorer
can ignore the rootids it does not know about.
"""
import numpy as np
from numpy import log, log1p, exp, logaddexp
import math
import sys
from .utils import resample_equal
from .ordertest import UniformOrderAccumulator
class TreeNode(object):
"""Simple tree node."""
def __init__(self, value=None, id=None, children=None):
"""Initialise.
Parameters
----------
value: float
value used to order nodes (typically log-likelihood)
id: int
identifier, refers to the order of discovery and storage (PointPile)
children: list
children nodes, should be :py:class:`TreeNode` objects. if None, a empty list is used.
"""
self.value = value
self.id = id
self.children = children or []
def __str__(self, indent=0):
"""Visual representation of the node and its children (recursive)."""
return ' ' * indent + '- Node: %s\n' % self.value + '\n'.join(
[c.__str__(indent=indent + 2) for c in self.children])
def __lt__(self, other):
"""Define order of node based on value attribute."""
return self.value < other.value
class BreadthFirstIterator(object):
"""Generator exploring the tree.
Nodes are ordered by value and expanded in order.
The number of edges passing the node "in parallel" are "active".
"""
def __init__(self, roots):
"""Start with initial set of nodes *roots*."""
self.roots = roots
self.reset()
def reset(self):
"""(Re)start exploration from the top."""
self.active_nodes = list(self.roots)
self.active_root_ids = np.arange(len(self.active_nodes))
self.active_node_values = np.array([n.value for n in self.active_nodes])
self.active_node_ids = np.array([n.id for n in self.active_nodes])
assert len(self.active_nodes) == len(self.active_root_ids)
assert len(self.active_nodes) == len(self.active_node_values)
# print("starting live points from %d roots" % len(self.roots), len(self.active_nodes))
def next_node(self):
"""Get next node in order.
Does not remove the node from active set.
Returns
--------
tuple or None:
None if done.
`rootid, node, (active_nodes, active_root_ids, active_node_values, active_node_ids)`
otherwise
"""
if self.active_nodes == []:
return None
self.next_index = np.argmin(self.active_node_values)
i = self.next_index
node = self.active_nodes[i]
rootid = self.active_root_ids[i]
assert not isinstance(rootid, float)
# print("consuming %.1f" % node.value, len(node.children), 'nlive:', len(self.active_nodes))
assert len(self.active_nodes) == len(self.active_root_ids)
assert len(self.active_nodes) == len(self.active_node_values)
return rootid, node, (self.active_nodes, self.active_root_ids, self.active_node_values, self.active_node_ids)
def drop_next_node(self):
"""Forget the current node."""
i = self.next_index
mask = np.ones(len(self.active_nodes), dtype=bool)
mask[i] = False
self.active_nodes.pop(i)
self.active_node_values = self.active_node_values[mask]
self.active_root_ids = self.active_root_ids[mask]
self.active_node_ids = self.active_node_ids[mask]
assert len(self.active_nodes) == len(self.active_root_ids)
assert len(self.active_nodes) == len(self.active_node_values)
def expand_children_of(self, rootid, node):
"""Replace the current node with its children in the iterators list of active nodes.
Parameters
----------
rootid: int
index of the root returned by the most recent call to :py:meth:`BreadthFirstIterator.next_node`
node: :py:class:`TreeNode`
node returned by the most recent call to :py:meth:`BreadthFirstIterator.next_node`
"""
# print("replacing %.1f" % node.value, len(node.children))
i = self.next_index
newnnodes = len(self.active_nodes) - 1 + len(node.children)
if len(node.children) == 1:
self.active_nodes[i] = node.children[0]
self.active_node_values[i] = node.children[0].value
self.active_root_ids[i] = rootid
self.active_node_ids[i] = node.children[0].id
else:
mask = np.ones(len(self.active_nodes), dtype=bool)
mask[i] = False
self.active_nodes.pop(i)
if len(node.children) == 0:
self.active_node_values = self.active_node_values[mask]
self.active_root_ids = self.active_root_ids[mask]
self.active_node_ids = self.active_node_ids[mask]
else:
self.active_nodes += node.children
self.active_node_values = np.concatenate((self.active_node_values[mask], [c.value for c in node.children]))
# print(self.active_root_ids, '+', [rootid for c in node.children], '-->')
self.active_root_ids = np.concatenate((self.active_root_ids[mask], [rootid for c in node.children]))
self.active_node_ids = np.concatenate((self.active_node_ids[mask], [c.id for c in node.children]))
# print(self.active_root_ids)
assert len(self.active_nodes) == len(self.active_root_ids)
assert len(self.active_nodes) == len(self.active_node_values)
assert len(self.active_nodes) == len(self.active_node_ids)
assert newnnodes == len(self.active_nodes), (len(self.active_nodes), newnnodes, len(node.children))
assert newnnodes == len(self.active_root_ids), (len(self.active_root_ids), newnnodes, len(node.children))
assert newnnodes == len(self.active_node_values), (len(self.active_node_values), newnnodes, len(node.children))
assert newnnodes == len(self.active_node_ids), (len(self.active_node_ids), newnnodes, len(node.children))
def _stringify_lanes(lanes, char='║'):
"""unicode-draw lanes, fill with vertical stripes or spaces."""
return ''.join([' ' if n is None else char for n in lanes])
def print_tree(roots, title='Tree:'):
"""Print a pretty yet compact graphic of the tree.
Parameters
----------
roots: list
list of :py:class:`TreeNode` specifying the roots of the tree.
title: str
Print this string first.
"""
print()
print(title)
explorer = BreadthFirstIterator(roots)
lanes = list(roots)
lastlane = -1
while True:
next_node = explorer.next_node()
if next_node is None:
break
rootid, node, (active_nodes, active_rootids, active_values, active_nodeids) = next_node
laneid = lanes.index(node)
nchildren = len(node.children)
leftstr = _stringify_lanes(lanes[:laneid])
rightstr = _stringify_lanes(lanes[laneid + 1:])
if lastlane == laneid:
sys.stdout.write(leftstr + '║' + rightstr + "\n")
rightstr = rightstr + " \t" + str(node.value)
if nchildren == 0:
sys.stdout.write(leftstr + 'O' + rightstr + "\n")
lanes[laneid] = None # keep lane empty
elif nchildren == 1:
sys.stdout.write(leftstr + '+' + rightstr + "\n")
lanes[laneid] = node.children[0]
else:
# expand width:
for j, child in enumerate(node.children):
rightstr2 = _stringify_lanes(lanes[laneid + 1:], char='\\')
if len(rightstr2) != 0:
sys.stdout.write(leftstr + '║' + ' ' * j + rightstr2 + "\n")
sys.stdout.write(leftstr + '╠' + '╦' * (nchildren - 2) + '╗' + rightstr + "\n")
lanes.pop(laneid)
for j, child in enumerate(node.children):
lanes.insert(laneid, child)
explorer.expand_children_of(rootid, node)
lastlane = laneid
def dump_tree(filename, roots, pointpile):
"""Write a copy of the tree to a HDF5 file.
Parameters
----------
filename: str
output filename
roots: list
list of :py:class:`TreeNode` specifying the roots of the tree.
pointpile: :py:class:`PointPile`
information on the node points
"""
import h5py
nodes_from_ids = []
nodes_to_ids = []
nodes_values = []
explorer = BreadthFirstIterator(roots)
while True:
next_node = explorer.next_node()
if next_node is None:
break
rootid, node, (active_nodes, active_rootids, active_values, active_nodeids) = next_node
for c in node.children:
nodes_from_ids.append(node.id)
nodes_to_ids.append(c.id)
nodes_values.append(c.value)
explorer.expand_children_of(rootid, node)
with h5py.File(filename, 'w') as f:
f.create_dataset('unit_points', data=pointpile.us[:pointpile.nrows,:], compression='gzip', shuffle=True)
f.create_dataset('points', data=pointpile.ps[:pointpile.nrows,:], compression='gzip', shuffle=True)
f.create_dataset('nodes_parent_id', data=nodes_from_ids, compression='gzip', shuffle=True)
f.create_dataset('nodes_child_id', data=nodes_to_ids, compression='gzip', shuffle=True)
f.create_dataset('nodes_child_logl', data=nodes_values, compression='gzip', shuffle=True)
def count_tree(roots):
"""Return the total number of nodes and maximum number of parallel edges.
Parameters
----------
roots: list
list of :py:class:`TreeNode` specifying the roots of the tree.
Returns
--------
count: int
total number of nodes
maxwidth: int
maximum number of active/parallel nodes encountered
"""
explorer = BreadthFirstIterator(roots)
nnodes = 0
maxwidth = 0
while True:
next_node = explorer.next_node()
if next_node is None:
return nnodes, maxwidth
rootid, node, (active_nodes, active_rootids, active_values, active_nodeids) = next_node
maxwidth = max(maxwidth, len(active_rootids))
nnodes += 1
explorer.expand_children_of(rootid, node)
def count_tree_between(roots, lo, hi):
"""Compute basic statistics about a tree.
Return the total number of nodes and maximum number of parallel edges,
but only considering a interval of the tree.
Parameters
----------
roots: list
list of :py:class:`TreeNode` specifying the roots of the tree.
lo: float
lower value threshold
hi: float
upper value threshold
Returns
--------
nnodes: int
total number of nodes in the value interval lo .. hi (inclusive).
maxwidth: int
maximum number of parallel edges
"""
explorer = BreadthFirstIterator(roots)
nnodes = 0
maxwidth = 0
while True:
next_node = explorer.next_node()
if next_node is None:
return nnodes, maxwidth
rootid, node, (active_nodes, active_rootids, active_values, active_nodeids) = next_node
if node.value > hi:
# can stop already
return nnodes, maxwidth
if lo <= node.value <= hi:
maxwidth = max(maxwidth, len(active_rootids))
nnodes += 1
explorer.expand_children_of(rootid, node)
def find_nodes_before(root, value):
"""Identify all nodes that have children above *value*.
If a root child is above the value, its parent (*root*) is the leaf returned.
Parameters
----------
root: :py:class:`TreeNode`
tree
value: float
selection threshold
Returns
--------
list_of_parents: list of nodes
parents
list_of_nforks: list of floats
The list of number of forks experienced is:
1 if direct descendent of one of the root node's children,
where no node had more than one child.
12 if the root child had 4 children, one of which had 3 children.
"""
roots = root.children
parents = []
parent_weights = []
weights = {n.id: 1. for n in roots}
explorer = BreadthFirstIterator(roots)
while True:
next_node = explorer.next_node()
if next_node is None:
break
rootid, node, _ = next_node
if node.value >= value:
# already past (root child)
parents.append(root)
parent_weights.append(1)
break
elif any(n.value >= value for n in node.children):
# found matching parent
parents.append(node)
parent_weights.append(weights[node.id])
explorer.drop_next_node()
else:
# continue exploring
explorer.expand_children_of(rootid, node)
weights.update({n.id: weights[node.id] * len(node.children)
for n in node.children})
del weights[node.id]
return parents, parent_weights
class PointPile(object):
"""A in-memory linearized storage of point coordinates.
:py:class:`TreeNode` objects only store the logL value and id,
which is the index in the point pile. The point pile stores
the point coordinates in u and p-space (transformed and untransformed).
"""
def __init__(self, udim, pdim, chunksize=1000):
"""Set up point pile.
Parameters
-----------
udim: int
number of parameters, dimension of unit cube points
pdim: int
number of physical (and derived) parameters
chunksize: int
the point pile grows as needed, in these intervals.
"""
self.nrows = 0
self.chunksize = chunksize
self.us = np.zeros((self.chunksize, udim))
self.ps = np.zeros((self.chunksize, pdim))
self.udim = udim
self.pdim = pdim
def add(self, newpointu, newpointp):
"""Save point.
Parameters
-----------
newpointu: array
point (in u-space)
newpointp: array
point (in p-space)
Returns
---------
index: int
index of the new point in the pile
"""
if self.nrows >= self.us.shape[0]:
self.us = np.concatenate((self.us, np.zeros((self.chunksize, self.udim))))
self.ps = np.concatenate((self.ps, np.zeros((self.chunksize, self.pdim))))
assert len(newpointu) == self.us.shape[1], (newpointu, self.us.shape)
assert len(newpointp) == self.ps.shape[1], (newpointp, self.ps.shape)
self.us[self.nrows,:] = newpointu
self.ps[self.nrows,:] = newpointp
self.nrows += 1
return self.nrows - 1
def getu(self, i):
"""Get cube point(s) with index(indices) `i`."""
return self.us[i]
def getp(self, i):
"""Get parameter point(s) with index(indices) `i`."""
return self.ps[i]
def make_node(self, value, u, p):
"""Store point in pile, and create a new tree node that points to it.
Parameters
-----------
value: float
value to store in node (loglikelihood)
u: array
point (in u-space)
p: array
point (in p-space)
Returns
---------
node: :py:class:`TreeNode`
node
"""
index = self.add(u, p)
return TreeNode(value=value, id=index)
class SingleCounter(object):
"""Evidence log(Z) and posterior weight summation for a Nested Sampling tree."""
def __init__(self, random=False):
"""Initialise.
Parameters
----------
random: bool
if False, use mean estimator for volume shrinkage
if True, draw a random sample
"""
self.reset()
self.random = random
def reset(self):
"""Reset counters and integration."""
self.logweights = []
self.H = None
self.logZ = -np.inf
self.logZerr = np.inf
self.logVolremaining = 0
self.i = 0
self.fraction_remaining = np.inf
self.Lmax = -np.inf
@property
def logZremain(self):
"""Estimate conservatively the logZ of the current tail (un-opened nodes)."""
return self.Lmax + self.logVolremaining
def passing_node(self, node, parallel_nodes):
"""Accumulate node to the integration.
Parameters
-----------
node: TreeNode
breadth-first removed node
parallel_nodes: list
nodes active next to node
"""
# node is being consumed
# we have parallel arcs to parallel_nodes
nchildren = len(node.children)
Li = node.value
nlive = len(parallel_nodes)
if nchildren >= 1:
# one arc terminates, another is spawned
# weight is the size of the slice off the volume
logleft = log1p(-exp(-1. / nlive))
logright = -1. / nlive
if self.random:
randompoint = np.random.beta(1, nlive)
logleft = log(randompoint)
logright = log1p(-randompoint)
logwidth = logleft + self.logVolremaining
wi = logwidth + Li
self.logweights.append(logwidth)
if math.isinf(self.logZ):
self.logZ = wi
self.H = Li - self.logZ
else:
logZnew = logaddexp(self.logZ, wi)
self.H = exp(wi - logZnew) * Li + exp(self.logZ - logZnew) * (self.H + self.logZ) - logZnew
assert np.all(np.isfinite(self.H)), (self.H, wi, logZnew, Li, self.logZ)
self.logZ = logZnew
# print(self.H)
# self.Lmax = max(node.value, self.Lmax)
# self.Lmax = max((n.value for n in parallel_nodes))
# logZremain = parallel_nodes.max() + self.logVolremaining
# print("L=%.1f N=%d V=%.2e logw=%.2e logZ=%.1f logZremain=%.1f" % (Li, nlive, self.logVolremaining, wi, self.logZ, logZremain))
# volume is reduced by exp(-1/N)
self.logVolremaining += logright
# TODO: this needs to change if nlive varies
if self.H >= 0:
self.logZerr = (self.H / nlive)**0.5
assert np.isfinite(self.logZerr), (self.H, nlive)
else:
# contracting!
# weight is simply volume / Nlive
logwidth = self.logVolremaining - log(nlive)
wi = logwidth + Li
self.logweights.append(logwidth)
self.logZ = logaddexp(self.logZ, wi)
# print("L=%.1f N=%d V=%.2e logw=%.2e logZ=%.1f" % (Li, nlive, self.logVolremaining, wi, self.logZ))
# the volume shrinks by (N - 1) / N
# self.logVolremaining += log(1 - exp(-1. / nlive))
# if nlive = 1, we are removing the last point, so remaining
# volume is zero (leads to log of -inf, as expected)
with np.errstate(divide='ignore'):
self.logVolremaining += log1p(-1.0 / nlive)
class MultiCounter(object):
"""Like :py:class:`SingleCounter`, but bootstrap capable.
**Attributes**:
- ``logZ``, ``logZerr``, ``logVolremaining``: main estimator
``logZerr`` is probably not reliable, because it needs ``nlive``
to convert ``H`` to ``logZerr``.
- ``Lmax``: highest loglikelihood currently known
- ``logZ_bs``, ``logZerr_bs``: bootstrapped logZ estimate
- ``logZremain``, ``remainder_ratio``: weight and fraction of the unexplored remainder
Each of the following has as many entries as number of iterations:
- ``all_H``, ``all_logZ``, ``all_logVolremaining``, ``logweights``:
information for all instances
first entry is the main estimator, i.e., not bootstrapped
- ``istail``: whether that node was a leaf.
- ``nlive``: number of parallel arcs ("live points")
"""
def __init__(self, nroots, nbootstraps=10, random=False, check_insertion_order=False):
"""Initialise counter.
Parameters
----------
nroots: int
number of children the tree root has
nbootstraps: int
number of bootstrap rounds
random: bool
if False, use mean estimator for volume shrinkage
if True, draw a random sample
check_insertion_order: bool
whether to run insertion order rank U test
"""
allyes = np.ones(nroots, dtype=bool)
# the following is a masked array of size (nbootstraps+1, nroots)
# which rootids are active in each bootstrap instance
# the first one contains everything
self.rootids = [allyes]
self.insertion_order_sample = []
# np.random.seed(1)
for i in range(nbootstraps):
mask = ~allyes
rootids = np.unique(np.random.randint(nroots, size=nroots))
mask[rootids] = True
self.rootids.append(mask)
self.rootids = np.array(self.rootids)
self.random = random
self.ncounters = len(self.rootids)
self.check_insertion_order = check_insertion_order
self.insertion_order_threshold = 4
self.insertion_order_accumulator = UniformOrderAccumulator()
self.reset(len(self.rootids))
def reset(self, nentries):
"""Reset counters/integrator.
Parameters
----------
nentries: int
number of iterators
"""
self.logweights = []
self.istail = []
self.logZ = -np.inf
self.logZerr = np.inf
self.all_H = -np.nan * np.ones(nentries)
self.all_logZ = -np.inf * np.ones(nentries)
self.all_logVolremaining = np.zeros(nentries)
self.logVolremaining = 0.0
self.Lmax = -np.inf
self.all_logZremain = np.inf * np.ones(nentries)
self.logZremainMax = np.inf
self.logZremain = np.inf
self.remainder_ratio = 1.0
self.remainder_fraction = 1.0
self.insertion_order_accumulator.reset()
self.insertion_order_runs = []
@property
def logZ_bs(self):
"""Estimate logZ from the bootstrap ensemble."""
return self.all_logZ[1:].mean()
@property
def logZerr_bs(self):
"""Estimate logZ error from the bootstrap ensemble."""
return self.all_logZ[1:].std()
@property
def insertion_order_runlength(self):
"""Get shortest insertion order test run.
Returns
--------
shortest_run_length: int
Shortest insertion order test run length.
The MWW (U-test) statistic is considered at each iteration.
When it exceeds a threshold (4 sigma by default, `insertion_order_threshold`),
the statistic is reset. The run length is recorded.
This property returns the shortest run length of all recorded
so far, or infinity otherwise.
At 4 sigma, run lengths no shorter than 10^5.5 are expected
in unbiased runs.
"""
runs = self.insertion_order_runs
if len(runs) == 0:
return np.inf
else:
return min(runs)
@property
def insertion_order_converged(self):
"""Check convergence.
Returns
--------
converged: bool
Whether the run is unbiased according to a U-test.
The MWW (U-test) statistic is considered at each iteration.
When it exceeds a threshold (4 sigma by default, `insertion_order_threshold`),
the statistic is reset. The run length is recorded.
This property returns the shortest run length of all recorded
so far, or infinity otherwise.
At 4 sigma, run lengths no shorter than 10^5.5 are expected
in unbiased runs. If the number of runs exceeds the number
of iterations divided by 10^5.5, the run is likely biased
and not converged.
If not converged, the step sampler may need to use more steps,
or the problem needs to be reparametrized.
"""
# we expect run lengths not shorter than 300000 for 4sigma
# if we get many more than expected from the number of iterations
# the run has not converged
niter = len(self.logweights)
expected_number = max(1, int(np.ceil(niter / 10**(5.5))))
return len(self.insertion_order_runs) <= expected_number
def passing_node(self, rootid, node, rootids, parallel_values):
"""Accumulate node to the integration.
Breadth-first removed `node` and nodes active next to node (`parallel_nodes`).
rootid and rootids are needed to identify which bootstrap instance
should accumulate.
Parameters
----------
rootid: :py:class:`TreeNode`
root node this `node` is from.
node: :py:class:`TreeNode`
node being processed.
rootids: array of ints
for each parallel node, which root it belongs to.
parallel_values: float array
loglikelihood values of nodes passing `node`.
"""
# node is being consumed
# we have parallel arcs to parallel_nodes
assert not isinstance(rootid, float)
nchildren = len(node.children)
Li = node.value
# in which bootstraps is rootid?
active = self.rootids[:,rootid]
# how many live points does each bootstrap have?
nlive = self.rootids[:,rootids].sum(axis=1)
nlive0 = nlive[0]
if nchildren >= 1:
# one arc terminates, another is spawned
# weight is the size of the slice off the volume
if self.random:
randompoint = np.random.beta(1, nlive, size=self.ncounters)
logleft = log(randompoint)
logright = log1p(-randompoint)
logleft[0] = log1p(-exp(-1. / nlive0))
logright[0] = -1. / nlive0
else:
logleft = log1p(-exp(-1. / nlive))
logright = -1. / nlive
logwidth = logleft + self.all_logVolremaining
logwidth[~active] = -np.inf
wi = logwidth[active] + Li
self.logweights.append(logwidth)
self.istail.append(False)
# print("updating continuation...", Li)
assert active[0], (active, rootid)
logZ = self.all_logZ[active]
logZnew = logaddexp(logZ, wi)
H = exp(wi - logZnew) * Li + exp(logZ - logZnew) * (self.all_H[active] + logZ) - logZnew
first_setting = np.isnan(H)
# print()
# print("Hnext:", H[0], first_setting[0])
assert np.isfinite(H[~first_setting]).all(), (first_setting, self.all_H[active][~first_setting], H, wi, logZnew, Li, logZ)
self.all_logZ[active] = np.where(first_setting, wi, logZnew)
# print("logZ:", self.all_logZ[0])
if first_setting[0]:
assert np.all(np.isfinite(Li - wi)), (Li, wi)
else:
assert np.isfinite(self.all_H[0]), self.all_H[0]
assert np.isfinite(H[0]), (first_setting[0], H[0], self.all_H[0], wi[0], logZnew[0], Li, logZ[0])
self.all_H[active] = np.where(first_setting, -logwidth[active], H)
# print("H:", self.all_H)
assert np.isfinite(self.all_H[active]).all(), (self.all_H[active], first_setting[0], H[0], self.all_H[0], wi[0], logZnew[0], Li, logZ[0])
# assert np.all(np.isfinite(self.all_H[active])), (H, self.all_H[active], wi, logZnew, Li, logZ)
self.logZ = self.all_logZ[0]
assert np.all(np.isfinite(self.all_logZ[active])), (self.all_logZ[active])
# self.Lmax = max((n.value for n in parallel_nodes))
# print("L=%.1f N=%d V=%.2e logw=%.2e logZ=%.1f logZremain=%.1f" % (
# Li, nlive[0], self.logVolremaining, wi[0], self.logZ, logZremain))
# print("L=%.1f N=%d V=%.2e logw=%.2e logZ=%.1f logZremain=%.1f" % (
# Li, nlive[0], self.all_logVolremaining[0], (logwidth + Li)[0], self.all_logZ[0], logZremain))
# print("L=%.1f N=%d V=%.2e logw=%.2e logZ=<%.1f logZremain=%.1f" % (
# Li, nlive[1], self.all_logVolremaining[1], (logwidth + Li)[1], self.all_logZ[1], logZremain))
if self.all_H[0] > 0:
# TODO: this needs to change if nlive varies
self.logZerr = (self.all_H[0] / nlive0)**0.5
# assert np.all(np.isfinite(self.logZerr)), (self.logZerr, self.all_H[0], nlive)
# volume is reduced by exp(-1/N)
self.all_logVolremaining[active] += logright[active]
self.logVolremaining = self.all_logVolremaining[0]
if self.check_insertion_order and len(np.unique(parallel_values)) == len(parallel_values):
acc = self.insertion_order_accumulator
parallel_values_here = parallel_values[self.rootids[0, rootids]]
for child in node.children:
# rootids is 400 ints pointing to the root id where each parallel_values is from
# self.rootids[i] says which rootids belong to this bootstrap
# need which of the parallel_values are active here
acc.add((parallel_values_here < child.value).sum(), nlive0)
if abs(acc.zscore) > self.insertion_order_threshold:
self.insertion_order_runs.append(len(acc))
acc.reset()
else:
# contracting!
# print("contracting...", Li)
# weight is simply volume / Nlive
logwidth = -np.inf * np.ones(self.ncounters)
logwidth[active] = self.all_logVolremaining[active] - log(nlive[active])
wi = logwidth + Li
self.logweights.append(logwidth)
self.istail.append(True)
self.all_logZ[active] = logaddexp(self.all_logZ[active], wi[active])
self.logZ = self.all_logZ[0]
# print("L=%.1f N=%d V=%.2e logw=%.2e logZ=%.1f" % (Li, nlive, self.logVolremaining, wi, self.logZ))
# the volume shrinks by (N - 1) / N
# self.logVolremaining += log(1 - exp(-1. / nlive))
# if nlive = 1, we are removing the last point, so remaining
# volume is zero (leads to log of -inf, as expected)
with np.errstate(divide='ignore'):
self.all_logVolremaining[active] += log1p(-1.0 / nlive[active])
self.logVolremaining = self.all_logVolremaining[0]
V = self.all_logVolremaining - log(nlive0)
Lmax = np.max(parallel_values)
self.all_logZremain = V + log(np.sum(exp(parallel_values - Lmax))) + Lmax
self.logZremainMax = self.all_logZremain.max()
self.logZremain = self.all_logZremain[0]
with np.errstate(over='ignore', under='ignore'):
self.remainder_ratio = exp(self.logZremain - self.logZ)
self.remainder_fraction = 1.0 / (1 + exp(self.logZ - self.logZremain))
def combine_results(saved_logl, saved_nodeids, pointpile, main_iterator, mpi_comm=None):
"""Combine a sequence of likelihoods and nodes into a summary dictionary.
Parameters
----------
saved_logl: list of floats
loglikelihoods of dead points
saved_nodeids: list of ints
indices of dead points
pointpile: :py:class:`PointPile`
Point pile.
main_iterator: :py:class:`BreadthFirstIterator`
iterator used
mpi_comm:
MPI communicator object, or None if MPI is not used.
Returns
--------
results: dict
All information of the run. Important keys:
Number of nested sampling iterations (niter),
Evidence estimate (logz),
Effective Sample Size (ess),
H (information gain),
weighted samples (weighted_samples),
equally weighted samples (samples),
best-fit point information (maximum_likelihood),
posterior summaries (posterior).
The rank order test score (insertion_order_MWW_test) is
included if the iterator has it.
"""
assert np.shape(main_iterator.logweights) == (len(saved_logl), len(main_iterator.all_logZ)), (
np.shape(main_iterator.logweights),
np.shape(saved_logl),
np.shape(main_iterator.all_logZ))
saved_logl = np.array(saved_logl)
saved_u = pointpile.getu(saved_nodeids)
saved_v = pointpile.getp(saved_nodeids)
saved_logwt = np.array(main_iterator.logweights)
saved_logwt0 = saved_logwt[:,0]
saved_logwt_bs = saved_logwt[:,1:]
logZ_bs = main_iterator.all_logZ[1:]
assert len(saved_logwt_bs) == len(saved_nodeids), (saved_logwt_bs.shape, len(saved_nodeids))
if mpi_comm is not None:
# spread logZ_bs, saved_logwt_bs
recv_saved_logwt_bs = mpi_comm.gather(saved_logwt_bs, root=0)
recv_saved_logwt_bs = mpi_comm.bcast(recv_saved_logwt_bs, root=0)
saved_logwt_bs = np.concatenate(recv_saved_logwt_bs, axis=1)
recv_logZ_bs = mpi_comm.gather(logZ_bs, root=0)
recv_logZ_bs = mpi_comm.bcast(recv_logZ_bs, root=0)
logZ_bs = np.concatenate(recv_logZ_bs)
saved_wt_bs = exp(saved_logwt_bs + saved_logl.reshape((-1, 1)) - logZ_bs)
saved_wt0 = exp(saved_logwt0 + saved_logl - main_iterator.all_logZ[0])
# compute fraction in tail
w = saved_wt0 / saved_wt0.sum()
assert np.isclose(w.sum() - 1, 0), w.sum()
ess = len(w) / (1.0 + ((len(w) * w - 1)**2).sum() / len(w))
tail_fraction = w[np.asarray(main_iterator.istail)].sum()
if tail_fraction != 0:
logzerr_tail = logaddexp(log(tail_fraction) + main_iterator.logZ, main_iterator.logZ) - main_iterator.logZ
else:
logzerr_tail = 0
logzerr_bs = (logZ_bs - main_iterator.logZ).max()
logzerr_total = (logzerr_tail**2 + logzerr_bs**2)**0.5
samples = resample_equal(saved_v, w)
ndim = saved_u.shape[1]
information_gain_bits = []
for i in range(ndim):
H, _ = np.histogram(saved_u[:,i], weights=saved_wt0, density=True, bins=np.linspace(0, 1, 40))
information_gain_bits.append(float((np.log2(1 / ((H + 0.001) * 40)) / 40).sum()))
j = saved_logl.argmax()
results = dict(
niter=len(saved_logl),
logz=main_iterator.logZ, logzerr=logzerr_total,
logz_bs=logZ_bs.mean(),
logz_single=main_iterator.logZ,
logzerr_tail=logzerr_tail,
logzerr_bs=logzerr_bs,
ess=ess,
H=main_iterator.all_H[0], Herr=main_iterator.all_H.std(),
posterior=dict(
mean=samples.mean(axis=0).tolist(),
stdev=samples.std(axis=0).tolist(),
median=np.percentile(samples, 50, axis=0).tolist(),
errlo=np.percentile(samples, 15.8655, axis=0).tolist(),
errup=np.percentile(samples, 84.1345, axis=0).tolist(),
information_gain_bits=information_gain_bits,
),
weighted_samples=dict(
upoints=saved_u, points=saved_v, weights=saved_wt0, logw=saved_logwt0,
bootstrapped_weights=saved_wt_bs, logl=saved_logl),
samples=samples,
maximum_likelihood=dict(
logl=saved_logl[j],
point=saved_v[j,:].tolist(),
point_untransformed=saved_u[j,:].tolist(),
),
)
if getattr(main_iterator, 'check_insertion_order', False):
results['insertion_order_MWW_test'] = dict(
independent_iterations=main_iterator.insertion_order_runlength,
converged=main_iterator.insertion_order_converged,
)
return results
def logz_sequence(root, pointpile, nbootstraps=12, random=True, onNode=None, verbose=False, check_insertion_order=True):
"""Run MultiCounter through tree `root`.
Keeps track of, and returns ``(logz, logzerr, logv, nlive)``.
Parameters
----------
root: :py:class:`TreeNode`
Tree
pointpile: :py:class:`PointPile`
Point pile
nbootstraps: int
Number of independent iterators
random: bool
Whether to randomly draw volume estimates
onNode: function
Function to call for every node.
receives current node and the iterator
verbose: bool
Whether to show a progress indicator on stderr
check_insertion_order: bool
Whether to perform a rolling insertion order rank test
Returns
--------
results: dict
Run information, see :py:func:`combine_results`
sequence: dict
Each entry of the dictionary is results['niter'] long,
and contains the state of information at that iteration.
Important keys are:
Iteration number (niter),
Volume estimate (logvol), loglikelihood (logl), absolute logarithmic weight (logwt),
Relative weight (weights), point (samples),
Number of live points (nlive),
Evidence estimate (logz) and its uncertainty (logzerr),
Rank test score (insert_order).
"""
roots = root.children
Lmax = -np.inf
explorer = BreadthFirstIterator(roots)
# Integrating thing
main_iterator = MultiCounter(
nroots=len(roots), nbootstraps=max(1, nbootstraps),
random=random, check_insertion_order=check_insertion_order)
main_iterator.Lmax = max(Lmax, max(n.value for n in roots))
logz = []
logzerr = []
nlive = []
logvol = []
niter = 0
saved_nodeids = []
saved_logl = []
insert_order = []
# we go through each live point (regardless of root) by likelihood value
while True:
next_node = explorer.next_node()
if next_node is None:
break
rootid, node, (active_nodes, active_rootids, active_values, active_node_ids) = next_node
# this is the likelihood level we have to improve upon
Lmin = node.value
if onNode:
onNode(node, main_iterator)
logz.append(main_iterator.logZ)
with np.errstate(invalid='ignore'):
# first time they are all the same
logzerr.append(main_iterator.logZerr_bs)
nactive = len(active_values)
if len(np.unique(active_values)) == nactive and len(node.children) > 0:
child_insertion_order = (active_values > node.children[0].value).sum()
insert_order.append(2 * (child_insertion_order + 1.) / nactive)
else:
insert_order.append(np.nan)
nlive.append(nactive)
logvol.append(main_iterator.logVolremaining)
niter += 1
if verbose:
sys.stderr.write("%d...\r" % niter)
saved_logl.append(Lmin)
saved_nodeids.append(node.id)
# inform iterators (if it is their business) about the arc
main_iterator.passing_node(rootid, node, active_rootids, active_values)
explorer.expand_children_of(rootid, node)
logwt = np.asarray(saved_logl) + np.asarray(main_iterator.logweights)[:,0]
logvol[-1] = logvol[-2]
results = combine_results(saved_logl, saved_nodeids, pointpile, main_iterator)
sequence = dict(
logz=np.asarray(logz),
logzerr=np.asarray(logzerr),
logvol=np.asarray(logvol),
samples_n=np.asarray(nlive),
nlive=np.asarray(nlive),
insert_order=np.asarray(insert_order),
logwt=logwt,
niter=niter,
logl=saved_logl,
weights=results['weighted_samples']['weights'],
samples=results['weighted_samples']['points'],
)
return sequence, results
| 40,477 | 36.067766 | 149 | py |
UltraNest | UltraNest-master/ultranest/samplingpath.py | """Sparsely sampled, virtual sampling path.
Supports reflections at unit cube boundaries, and regions.
"""
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
def nearest_box_intersection_line(ray_origin, ray_direction, fwd=True):
r"""Compute intersection of a line (ray) and a unit box (0:1 in all axes).
Based on
http://www.iquilezles.org/www/articles/intersectors/intersectors.htm
To continue forward traversing at the reflection point use::
while True:
# update current point x
x, _, i = box_line_intersection(x, v)
# change direction
v[i] *= -1
Parameters
-----------
ray_origin: vector
starting point of line
ray_direction: vector
line direction vector
Returns
--------
p: vector
intersection point
t: float
intersection point distance from ray\_origin in units in ray\_direction
i: int
axes which change direction at pN
"""
# make sure ray starts inside the box
assert (ray_origin >= 0).all(), ray_origin
assert (ray_origin <= 1).all(), ray_origin
assert ((ray_direction**2).sum()**0.5 > 1e-200).all(), ray_direction
# step size
with np.errstate(divide='ignore', invalid='ignore'):
m = 1. / ray_direction
n = m * (ray_origin - 0.5)
k = np.abs(m) * 0.5
# line coordinates of intersection
# find first intersecting coordinate
if fwd:
t2 = -n + k
tF = np.nanmin(t2)
iF = np.where(t2 == tF)[0]
else:
t1 = -n - k
tF = np.nanmax(t1)
iF = np.where(t1 == tF)[0]
pF = ray_origin + ray_direction * tF
eps = 1e-6
assert (pF >= -eps).all(), (pF, ray_origin, ray_direction)
assert (pF <= 1 + eps).all(), (pF, ray_origin, ray_direction)
pF[pF < 0] = 0
pF[pF > 1] = 1
return pF, tF, iF
def box_line_intersection(ray_origin, ray_direction):
"""Find intersections of a line with the unit cube, in both sides.
Parameters
-----------
ray_origin: vector
starting point of line
ray_direction: vector
line direction vector
Returns
--------
left: nearest_box_intersection_line return value
from negative direction
right: nearest_box_intersection_line return value
from positive direction
"""
pF, tF, iF = nearest_box_intersection_line(ray_origin, ray_direction, fwd=True)
pN, tN, iN = nearest_box_intersection_line(ray_origin, ray_direction, fwd=False)
if tN > tF or tF < 0:
assert False, "no intersection"
return (pN, tN, iN), (pF, tF, iF)
def linear_steps_with_reflection(ray_origin, ray_direction, t, wrapped_dims=None):
"""Go `t` steps in direction `ray_direction` from `ray_origin`.
Reflect off the unit cube if encountered, respecting wrapped dimensions.
In any case, the distance should be ``t * ray_direction``.
Returns
--------
new_point: vector
end point
new_direction: vector
new direction.
"""
if t == 0:
return ray_origin, ray_direction
if t < 0:
new_point, new_direction = linear_steps_with_reflection(ray_origin, -ray_direction, -t)
return new_point, -new_direction
if wrapped_dims is not None:
reflected = np.zeros(len(ray_origin), dtype=bool)
tleft = 1.0 * t
while True:
p, t, i = nearest_box_intersection_line(ray_origin, ray_direction, fwd=True)
# print(p, t, i, ray_origin, ray_direction)
assert np.isfinite(p).all()
assert t >= 0, t
if tleft <= t: # stopping before reaching any border
assert np.all(ray_origin + tleft * ray_direction >= 0), (ray_origin, tleft, ray_direction)
assert np.all(ray_origin + tleft * ray_direction <= 1), (ray_origin, tleft, ray_direction)
return ray_origin + tleft * ray_direction, ray_direction
# go to reflection point
ray_origin = p
assert np.isfinite(ray_origin).all(), ray_origin
# reflect
ray_direction = ray_direction.copy()
if wrapped_dims is None:
ray_direction[i] *= -1
else:
# if we already once bumped into that (wrapped) axis,
# do not continue but return this as end point
if np.logical_and(reflected[i], wrapped_dims[i]).any():
return ray_origin, ray_direction
# note which axes we already flipped
reflected[i] = True
# in wrapped axes, we can keep going. Otherwise, reflects
ray_direction[i] *= np.where(wrapped_dims[i], 1, -1)
# in the i axes, we should wrap the coordinates
assert np.logical_or(np.isclose(ray_origin[i], 1), np.isclose(ray_origin[i], 0)).all(), ray_origin[i]
ray_origin[i] = np.where(wrapped_dims[i], 1 - ray_origin[i], ray_origin[i])
assert np.isfinite(ray_direction).all(), ray_direction
# reduce remaining distance
tleft -= t
def get_sphere_tangent(sphere_center, edge_point):
"""Compute tangent at sphere surface point.
Assume a sphere centered at sphere_center with radius
so that edge_point is on the surface. At edge_point, in
which direction does the normal vector point?
Parameters
-----------
sphere_center: vector
center of sphere
edge_point: vector
point at the surface
Returns
--------
tangent: vector
vector pointing to the sphere center.
"""
arrow = sphere_center - edge_point
return arrow / norm(arrow)
def get_sphere_tangents(sphere_center, edge_point):
"""Compute tangent at sphere surface point.
Assume a sphere centered at sphere_center with radius
so that edge_point is on the surface. At edge_point, in
which direction does the normal vector point?
This function is vectorized and handles arrays of arguments.
Parameters
-----------
sphere_center: array
centers of spheres
edge_point: array
points at the surface
Returns
--------
tangent: array
vectors pointing to the sphere center.
"""
arrow = sphere_center - edge_point
return arrow / norm(arrow, axis=1).reshape((-1, 1))
def reflect(v, normal):
"""Reflect vector ``v`` off a ``normal`` vector, return new direction vector."""
return v - 2 * (normal * v).sum() * normal
def distances(direction, center, r=1):
"""Compute sphere-line intersection.
Parameters
-----------
direction: vector
direction vector (line starts at 0)
center: vector
center of sphere (coordinate vector)
r: float
radius of sphere
Returns
--------
tpos, tneg: floats
the positive and negative coordinate along the `l` vector where `r` is intersected.
If no intersection, throws AssertError.
"""
loc = (direction * center).sum()
osqrnorm = (center**2).sum()
# print(loc.shape, loc.shape, osqrnorm.shape)
rootterm = loc**2 - osqrnorm + r**2
# make sure we are crossing the sphere
assert (rootterm > 0).all(), rootterm
return -loc + rootterm**0.5, -loc - rootterm**0.5
def isunitlength(vec):
"""Verify that `vec` is of unit length."""
assert np.isclose(norm(vec), 1), norm(vec)
def angle(a, b):
"""Compute dot product between vectors `a` and `b`.
The arccos of the return value would give an actual angle.
"""
# anorm = (a**2).sum()**0.5
# bnorm = (b**2).sum()**0.5
return (a * b).sum() # / anorm / bnorm
def extrapolate_ahead(dj, xj, vj, contourpath=None):
"""Make `di` steps of size `vj` from `xj`.
Reflect off unit cube if necessary.
"""
assert dj == int(dj)
# optimistically try to go there directly
xk, vk = linear_steps_with_reflection(xj, vj, dj)
return xk, vk # deactivate feature below
if contourpath is None:
return xk, vk
# check if we can already tell that the point will be outside
region = contourpath.region
if contourpath.region.inside(xk.reshape((1, -1))):
return xk, vk
# find first point outside region
sign = 1 if dj > 0 else -1
d = np.arange(0, dj, sign)
first_point_outside = dj, xk, vk
for di in d:
xi, vi = linear_steps_with_reflection(xj, vj, di)
if not region.inside(xk.reshape((1, -1))):
first_point_outside = di, xi, vi
break
# reflect at this point (first outside)
dout, reflpoint, v = first_point_outside
if dout == 0:
# already the starting point is outside.
# return extrapolation and hope caller handles it
return xk, vk
# reversing:
normal = contourpath.gradient(reflpoint) # , v * sign)
if normal is None:
vnew = -v
else:
vnew = (v - 2 * angle(normal, v) * normal) * sign
assert vnew.shape == v.shape, (vnew.shape, v.shape)
assert np.isclose(norm(vnew), norm(v)), (vnew, v, norm(vnew), norm(v))
# make one step (xl replaces first_point_outside/reflpoint)
xl, vl = linear_steps_with_reflection(reflpoint, vnew, sign)
# how many steps are still to do?
dleft = dj - dout
# make that many step in that direction, by recursing.
# it is possible that this point is also outside. The next iteration
# will catch that case.
return extrapolate_ahead(dleft, xl, vl, contourpath=contourpath)
def interpolate(i, points, fwd_possible, rwd_possible, contourpath=None):
"""Interpolate a point on the path indicated by `points`.
Given a sparsely sampled track (stored in .points),
potentially encountering reflections,
extract the corrdinates of the point with index `i`.
That point may not have been evaluated yet.
Parameters
-----------
i: int
position on track to return.
points: list of tuples (index, coordinate, direction, loglike)
points on the path
fwd_possible: bool
whether the path could be extended in the positive direction.
rwd_possible: bool
whether the path could be extended in the negative direction.
contourpath: ContourPath
Use region to reflect. Not used at the moment.
"""
points_before = [(j, xj, vj, Lj) for j, xj, vj, Lj in points if j <= i]
points_after = [(j, xj, vj, Lj) for j, xj, vj, Lj in points if j >= i]
# check if the point after is really after i
if len(points_after) == 0 and not fwd_possible:
# the path cannot continue, and i does not exist.
# print(" interpolate_point %d: the path cannot continue fwd, and i does not exist." % i)
j, xj, vj, Lj = max(points_before)
return xj, vj, Lj, False
# check if the point before is really before i
if len(points_before) == 0 and not rwd_possible:
# the path cannot continue, and i does not exist.
k, xk, vk, Lk = min(points_after)
# print(" interpolate_point %d: the path cannot continue rwd, and i does not exist." % i)
return xk, vk, Lk, False
if len(points_before) == 0 or len(points_after) == 0:
# return None, None, None, False
raise KeyError("cannot extrapolate outside path")
j, xj, vj, Lj = max(points_before)
k, xk, vk, Lk = min(points_after)
# print(" interpolate_point %d between %d-%d" % (i, j, k))
if j == i: # we have this exact point in the chain
return xj, vj, Lj, True
assert not k == i # otherwise the above would be true too
# expand_to_step explores each reflection in detail, so
# any points with change in v should have j == i
# therefore we can assume:
# assert (vj == vk).all()
# this ^ is not true, because reflections on the unit cube can
# occur, and change v without requiring a intermediate point.
# j....i....k
xl1, vj1 = extrapolate_ahead(i - j, xj, vj, contourpath=contourpath)
xl2, vj2 = extrapolate_ahead(i - k, xk, vk, contourpath=contourpath)
assert np.allclose(xl1, xl2), (xl1, xl2, i, j, k, xj, vj, xk, vk)
assert np.allclose(vj1, vj2), (xl1, vj1, xl2, vj2, i, j, k, xj, vj, xk, vk)
xl = xl1
# xl = interpolate_between_two_points(i, xj, j, xk, k)
# the new point is then just a linear interpolation
# w = (i - k) * 1. / (j - k)
# xl = xj * w + (1 - w) * xk
return xl, vj, None, True
class SamplingPath(object):
"""Path described by a (potentially sparse) sequence of points.
Convention of the stored point tuple ``(i, x, v, L)``:
`i`: index (0 is starting point)
`x`: point
`v`: direction
`L`: loglikelihood value
"""
def __init__(self, x0, v0, L0):
"""Initialise with path starting point.
Starting point (`x0`), direction (`v0`) and
loglikelihood value (`L0`) of the path. Is given index 0.
"""
self.reset(x0, v0, L0)
def add(self, i, xi, vi, Li):
"""Add point `xi`, direction `vi` and value `Li` with index `i` to the path."""
assert Li is not None
assert len(xi.shape) == 1, (xi, xi.shape)
assert len(vi.shape) == 1, (vi, vi.shape)
assert len(np.shape(Li)) == 0, (Li, Li.shape)
self.points.append((i, xi, vi, Li))
def reset(self, x0, v0, L0):
"""Reset path, start from ``x0, v0, L0``."""
self.points = []
self.add(0, x0, v0, L0)
self.fwd_possible = True
self.rwd_possible = True
def plot(self, **kwargs):
"""Plot the current path.
Only uses first two dimensions.
"""
x = np.array([x for i, x, v, L in sorted(self.points)])
p, = plt.plot(x[:,0], x[:,1], 'o ', **kwargs)
ilo, _, _, _ = min(self.points)
ihi, _, _, _ = max(self.points)
x = np.array([self.interpolate(i)[0] for i in range(ilo, ihi + 1)])
kwargs['color'] = p.get_color()
plt.plot(x[:,0], x[:,1], 'o-', ms=4, mfc='None', **kwargs)
def interpolate(self, i):
"""Interpolate point with index `i` on path."""
return interpolate(i, self.points, fwd_possible=self.fwd_possible, rwd_possible=self.rwd_possible)
def extrapolate(self, i):
"""Advance beyond the current path, extrapolate from the end point.
Parameters
-----------
i: int
index on path.
returns
--------
coords: vector
coordinates of the new point.
"""
if i >= 0:
j, xj, vj, Lj = max(self.points)
deltai = i - j
assert deltai > 0, ("should be extrapolating", i, j)
else:
j, xj, vj, Lj = min(self.points)
deltai = i - j
assert deltai < 0, ("should be extrapolating", i, j)
newpoint = extrapolate_ahead(deltai, xj, vj)
return newpoint
class ContourSamplingPath(object):
"""Region-aware form of the sampling path.
Uses region points to guess a likelihood contour gradient.
"""
def __init__(self, samplingpath, region):
"""Initialise with `samplingpath` and `region`."""
self.samplingpath = samplingpath
self.points = self.samplingpath.points
self.region = region
def add(self, i, x, v, L):
"""Add point `xi`, direction `vi` and value `Li` with index `i` to the path."""
self.samplingpath.add(i, x, v, L)
def interpolate(self, i):
"""Interpolate point with index `i` on path."""
return interpolate(
i, self.samplingpath.points,
fwd_possible=self.samplingpath.fwd_possible,
rwd_possible=self.samplingpath.rwd_possible,
contourpath=self)
def extrapolate(self, i):
"""Advance beyond the current path, extrapolate from the end point.
Parameters
-----------
i: int
index on path.
returns
--------
coords: vector
coordinates of the new point.
"""
if i >= 0:
j, xj, vj, Lj = max(self.samplingpath.points)
deltai = i - j
assert deltai > 0, ("should be extrapolating", i, j)
else:
j, xj, vj, Lj = min(self.samplingpath.points)
deltai = i - j
assert deltai < 0, ("should be extrapolating", i, j)
newpoint = extrapolate_ahead(deltai, xj, vj, contourpath=self)
return newpoint
def gradient(self, reflpoint, plot=False):
"""Compute gradient approximation.
Finds spheres enclosing the `reflpoint`, and chooses their mean
as the direction to go towards. If no spheres enclose the
reflpoint, use nearest sphere.
v is not used, because that would break detailed balance.
Considerations:
- in low-d, we want to focus on nearby live point spheres
The border traced out is fairly accurate, at least in the
normal away from the inside.
- in high-d, reflpoint is contained by all live points,
and none of them point to the center well. Because the
sampling is poor, the "region center" position
will be very stochastic.
Parameters
-----------
reflpoint: vector
point outside the likelihood contour, reflect there
v: vector
previous direction vector
Returns
---------
gradient: vector
normal of ellipsoid
"""
if plot:
plt.plot(reflpoint[0], reflpoint[1], '+ ', color='k', ms=10)
# check which the reflections the ellipses would make
region = self.region
bpts = region.transformLayer.transform(reflpoint.reshape((1,-1)))
dist = ((bpts - region.unormed)**2).sum(axis=1)
assert dist.shape == (len(region.unormed),), (dist.shape, len(region.unormed))
nearby = dist < region.maxradiussq
assert nearby.shape == (len(region.unormed),), (nearby.shape, len(region.unormed))
if not nearby.any():
nearby = dist == dist.min()
sphere_centers = region.u[nearby,:]
tsphere_centers = region.unormed[nearby,:]
nlive, ndim = region.unormed.shape
assert tsphere_centers.shape[1] == ndim, (tsphere_centers.shape, ndim)
# choose mean among those points
tsphere_center = tsphere_centers.mean(axis=0)
assert tsphere_center.shape == (ndim,), (tsphere_center.shape, ndim)
tt = get_sphere_tangent(tsphere_center, bpts.flatten())
assert tt.shape == tsphere_center.shape, (tt.shape, tsphere_center.shape)
# convert back to u space
sphere_center = region.transformLayer.untransform(tsphere_center)
t = region.transformLayer.untransform(tt * 1e-3 + tsphere_center) - sphere_center
if plot:
tt_all = get_sphere_tangent(tsphere_centers, bpts)
t_all = region.transformLayer.untransform(tt_all * 1e-3 + tsphere_centers) - sphere_centers
"""
# plot in transformed space too:
origax = plt.gca()
ax = plt.axes([0.7, 0.7, 0.27, 0.27])
plt.plot(bpts[:,0], bpts[:,1], '+ ', color='k', ms=10)
plt.plot(region.unormed[:,0], region.unormed[:,1], 'x ', color='k', ms=4)
plt.plot(tsphere_centers[:,0], tsphere_centers[:,1], 'o ', mfc='None', mec='b', ms=10, mew=1)
for si, ti in zip(tsphere_centers, tt_all):
plt.plot([si[0], ti[0] + si[0]], [si[1], ti[1] + si[1]], '--', lw=2, color='gray', alpha=0.5)
plt.plot(tsphere_center[0], tsphere_center[1], '^ ', mfc='None', mec='g', ms=10, mew=3)
plt.plot([tsphere_center[0], tt[0] + tsphere_center[0]],
[tsphere_center[1], tt[1] + tsphere_center[1]], lw=1, color='gray')
plt.sca(origax)
"""
plt.plot(sphere_centers[:,0], sphere_centers[:,1], 'o ', mfc='None', mec='b', ms=10, mew=1)
if not (dist < region.maxradiussq).any():
plt.plot(sphere_centers[:,0], sphere_centers[:,1], 's ', mfc='None', mec='b', ms=10, mew=1)
for si, ti in zip(sphere_centers, t_all):
plt.plot([si[0], ti[0] * 1000 + si[0]], [si[1], ti[1] * 1000 + si[1]], '--', lw=2, color='gray', alpha=0.5)
plt.plot(sphere_center[0], sphere_center[1], '^ ', mfc='None', mec='g', ms=10, mew=3)
plt.plot([sphere_center[0], t[0] * 1000 + sphere_center[0]], [sphere_center[1], t[1] * 1000 + sphere_center[1]], color='gray')
# compute new vector
normal = t / norm(t)
isunitlength(normal)
assert normal.shape == t.shape, (normal.shape, t.shape)
return normal
| 20,765 | 32.931373 | 138 | py |
UltraNest | UltraNest-master/ultranest/utils.py | """
Utility functions for logging and statistics
--------------------------------------------
"""
from __future__ import print_function, division
import logging
import sys
import os
import numpy as np
from numpy import pi
import errno
def create_logger(module_name, log_dir=None, level=logging.INFO):
"""
Set up the logging channel `module_name`.
Append to ``debug.log`` in `log_dir` (if not ``None``).
Write to stdout with output level `level`.
If logging handlers are already registered for this module,
no new handlers are registered.
Parameters
----------
module_name: str
logger module
log_dir: str
directory to write debug.log file into
level: logging level
which level (and above) to log to stdout.
Returns
-------
logger:
logger instance
"""
logger = logging.getLogger(str(module_name))
first_logger = logger.handlers == []
if log_dir is not None and first_logger:
# create file handler which logs even debug messages
handler = logging.FileHandler(os.path.join(log_dir, 'debug.log'))
msgformat = '%(asctime)s [{}] [%(levelname)s] %(message)s'
formatter = logging.Formatter(
msgformat.format(module_name), datefmt='%H:%M:%S')
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
if first_logger:
logger.setLevel(logging.DEBUG)
# if it is new, register to write to stdout
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging.Formatter('[{}] %(message)s'.format(module_name))
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(logging.NullHandler())
return logger
def _makedirs(name):
"""python2-compatible makedir."""
# for Python2 compatibility:
try:
os.makedirs(name)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# Python 3:
# os.makedirs(name, exist_ok=True)
def make_run_dir(log_dir, run_num=None, append_run_num=True, max_run_num=10000):
"""Generate a new numbered directory for this run to store output.
Parameters
----------
log_dir: str
base path
run_num: int
folder to add to path, such as prefix/run1/
append_run_num: bool
If true, set run_num to next unused number
max_run_num: int
Maximum number of automatic run subfolders
Returns
-------
folderpath: dict
dictionary of folder paths for different purposes.
Keys are "run_dir" (the path), "info", "results", "chains", "plots".
"""
_makedirs(log_dir)
if run_num is None or run_num == '':
# loop over existing folders (or files) of the form log_dir/runX
# to find next available run_num (up to the hardcoded maximum of 1000)
for run_num in range(1, max_run_num):
if os.path.exists(os.path.join(log_dir, 'run%s' % run_num)):
continue
else:
break
else:
raise ValueError("log directory '%s' already contains maximum number of run subdirectories (%d)" % (log_dir, max_run_num))
if append_run_num:
run_dir = os.path.join(log_dir, 'run%s' % run_num)
else:
run_dir = log_dir
if not os.path.isdir(run_dir):
print('Creating directory for new run %s' % run_dir)
_makedirs(run_dir)
if not os.path.isdir(os.path.join(run_dir, 'info')):
_makedirs(os.path.join(run_dir, 'info'))
_makedirs(os.path.join(run_dir, 'results'))
_makedirs(os.path.join(run_dir, 'chains'))
_makedirs(os.path.join(run_dir, 'extra'))
_makedirs(os.path.join(run_dir, 'plots'))
return {'run_dir': run_dir,
'info': os.path.join(run_dir, 'info'),
'results': os.path.join(run_dir, 'results'),
'chains': os.path.join(run_dir, 'chains'),
'extra': os.path.join(run_dir, 'extra'),
'plots': os.path.join(run_dir, 'plots')
}
def vectorize(function):
"""Vectorize likelihood or prior_transform function."""
def vectorized(args):
"""Vectorized version of function."""
return np.asarray([function(arg) for arg in args])
# give a user-friendly name to the vectorized version of the function
# getattr works around methods, which do not have __name__
vectorized.__name__ = getattr(function, '__name__', vectorized.__name__)
return vectorized
"""Square root of a small number."""
SQRTEPS = (float(np.finfo(float).eps))**0.5
def resample_equal(samples, weights, rstate=None):
"""Resample the samples so that the final samples all have equal weight.
Each input sample appears in the output array either
`floor(weights[i] * N)` or `ceil(weights[i] * N)` times, with
`floor` or `ceil` randomly selected (weighted by proximity).
Parameters
----------
samples : `~numpy.ndarray`
Unequally weight samples returned by the nested sampling algorithm.
Shape is (N, ...), with N the number of samples.
weights : `~numpy.ndarray`
Weight of each sample. Shape is (N,).
rstate : `~numpy.random.RandomState`
random number generator. If not provided, numpy.random is used.
Returns
-------
equal_weight_samples : `~numpy.ndarray`
Samples with equal weights, same shape as input samples.
Examples
--------
>>> x = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
>>> w = np.array([0.6, 0.2, 0.15, 0.05])
>>> nestle.resample_equal(x, w)
array([[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 3., 3.]])
Notes
-----
Implements the systematic resampling method described in
`this PDF <http://people.isy.liu.se/rt/schon/Publications/HolSG2006.pdf>`_.
Another way to sample according to weights would be::
N = len(weights)
new_samples = samples[np.random.choice(N, size=N, p=weights)]
However, the method used in this function is less "noisy".
"""
if abs(np.sum(weights) - 1.) > SQRTEPS: # same tol as in np.random.choice.
raise ValueError("weights do not sum to 1 (%g)" % np.sum(weights))
if rstate is None:
rstate = np.random
N = len(weights)
# make N subdivisions, and choose positions with a consistent random offset
positions = (rstate.random() + np.arange(N)) / N
idx = np.zeros(N, dtype=int)
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
idx[i] = j
i += 1
else:
j += 1
rstate.shuffle(idx)
return samples[idx]
def listify(*args):
"""
Concatenate args, which are (made to be) lists.
Parameters
----------
args: iterable
Lists to concatenate.
Returns
-------
list:
Concatenation of the lists in args.
"""
out = []
for a in args:
out += list(a)
return out
def quantile(x, q, weights=None):
"""Compute (weighted) quantiles from an input set of samples.
Parameters
----------
x : `~numpy.ndarray` with shape (nsamps,)
Input samples.
q : `~numpy.ndarray` with shape (nquantiles,)
The list of quantiles to compute from `[0., 1.]`.
weights : `~numpy.ndarray` with shape (nsamps,), optional
The associated weight from each sample.
Returns
-------
quantiles : `~numpy.ndarray` with shape (nquantiles,)
The weighted sample quantiles computed at `q`.
"""
# Initial check.
x = np.atleast_1d(x)
q = np.atleast_1d(q)
# Quantile check.
if np.any(q < 0.0) or np.any(q > 1.0):
raise ValueError("Quantiles must be between 0. and 1.")
if weights is None:
# If no weights provided, this simply calls `np.percentile`.
return np.percentile(x, list(100.0 * q))
else:
# If weights are provided, compute the weighted quantiles.
weights = np.atleast_1d(weights)
if len(x) != len(weights):
raise ValueError("Dimension mismatch: len(weights) != len(x).")
idx = np.argsort(x) # sort samples
sw = weights[idx] # sort weights
cdf = np.cumsum(sw)[:-1] # compute CDF
cdf /= cdf[-1] # normalize CDF
cdf = np.append(0, cdf) # ensure proper span
quantiles = np.interp(q, cdf, x[idx]).tolist()
return quantiles
def vol_prefactor(n):
"""Volume constant for an `n`-dimensional sphere.
for `n` even: $$ (2pi)^(n /2) / (2 * 4 * ... * n)$$
for `n` odd : $$2 * (2pi)^((n-1)/2) / (1 * 3 * ... * n)$$
Parameters
----------
n: int
Dimensionality
Returns
-------
Volume: float
"""
if n % 2 == 0:
f = 1.
i = 2
else:
f = 2.
i = 3
while i <= n:
f *= 2. / i * pi
i += 2
return f
def is_affine_transform(a, b):
"""
Check if one points *a* and *b* are related by an affine transform.
The implementation currently returns False for rotations.
Parameters
----------
a: array
transformed points
b: array
original points
Returns
-------
is_affine: bool
"""
n, da = a.shape
nb, db = b.shape
assert n == nb
assert db >= da
n = (n // 2) * 2
a1 = a[0:n:2]
a2 = a[1:n:2]
b1 = b[0:n:2,:da]
b2 = b[1:n:2,:da]
slopes = (b2 - b1) / (a2 - a1)
if not np.allclose(slopes, slopes[0]):
return False
offsets = b1 - slopes * a1
if not np.allclose(offsets, offsets[0]):
return False
return True
def normalised_kendall_tau_distance(values1, values2, i=None, j=None):
"""
Normalised Kendall tau distance between two equally sized arrays.
see https://en.wikipedia.org/wiki/Kendall_tau_distance
You can optionally pass precomputed indices::
i, j = np.meshgrid(np.arange(N), np.arange(N))
Parameters
----------
values1: array of ints
ranks
values2: array of ints
other ranks (same length as values1)
i: array of ints
2d indices selecting values1
j: array of ints
2d indices selecting values2
Returns
-------
distance: float
"""
N = len(values1)
assert len(values2) == N, "Both lists have to be of equal length"
if i is None or j is None:
i, j = np.meshgrid(np.arange(N), np.arange(N))
a = np.argsort(values1)
b = np.argsort(values2)
ndisordered = np.logical_or(np.logical_and(a[i] < a[j], b[i] > b[j]), np.logical_and(a[i] > a[j], b[i] < b[j])).sum()
return ndisordered / (N * (N - 1))
def _merge_transform_loglike_gradient_function(transform, loglike, gradient):
def transform_loglike_gradient(u):
"""Combine transform, likelihood and gradient function."""
p = transform(u.reshape((1, -1)))
return p[0], loglike(p)[0], gradient(u)
return transform_loglike_gradient
def verify_gradient(ndim, transform, loglike, gradient, verbose=False, combination=False):
"""
Check with numerical differentiation if gradient function is plausibly correct.
Raises AssertError if not fulfilled.
All functions are vectorized.
Parameters
----------
ndim : int
dimensionality
transform : function
transform unit cube parameters to physical parameters, vectorized
loglike : function
loglikelihood function, vectorized
gradient : function
computes gradient of loglike to unit cube parameters.
Takes a single point and returns a single vector.
verbose : bool
whether to show intermediate test results
combination : bool
if true, the gradient function should return a tuple of:
(transformed parameters, loglikelihood, gradient) for a
given unit cube point.
"""
if combination:
transform_loglike_gradient = gradient
else:
transform_loglike_gradient = _merge_transform_loglike_gradient_function(transform, loglike, gradient)
eps = 1e-6
N = 10
for i in range(N):
u = np.random.uniform(2 * eps, 1 - 2 * eps, size=(1, ndim))
theta = transform(u)
if verbose:
print("---")
print()
print("starting at:", u, ", theta=", theta)
Lref = loglike(theta)[0]
if verbose:
print("Lref=", Lref)
p, L, grad = transform_loglike_gradient(u[0,:])
assert np.allclose(p, theta), (p, theta)
if verbose:
print("gradient function gave: L=", L, "grad=", grad)
assert np.allclose(L, Lref), (L, Lref)
# walk so that L increases by 10
step = eps * grad / (grad**2).sum()**0.5
uprime = u + step
thetaprime = transform(uprime)
if verbose:
print("new position:", uprime, ", theta=", thetaprime)
Lprime = loglike(thetaprime)[0]
if verbose:
print("L=", Lprime)
# going a step of eps in the prior, should be a step in L by:
Lexpected = Lref + np.dot(step, grad)
if verbose:
print("expectation was L=", Lexpected, ", given", Lref, grad, eps)
assert np.allclose(Lprime, Lexpected, atol=0.1 / ndim), \
(u, uprime, theta, thetaprime, grad, eps * grad / L, L, Lprime, Lexpected)
def distributed_work_chunk_size(num_total_tasks, mpi_rank, mpi_size):
"""
Divide tasks uniformly.
Computes the number of tasks for process number `mpi_rank`, so that
`num_total_tasks` tasks are spread uniformly among `mpi_size` processes.
Parameters
----------
num_total_tasks : int
total number of tasks to be split
mpi_rank : int
process id
mpi_size : int
total number of processes
"""
return (num_total_tasks + mpi_size - 1 - mpi_rank) // mpi_size
def submasks(mask, *masks):
"""
Get indices for a submasked array.
Returns indices, so that a[indices] is equivalent to a[mask][mask1][mask2].
Parameters
----------
mask : np.array(dtype=bool)
selection of some array
masks : list of np.array(dtype=bool)
each further mask is a subselection
Returns
-------
indices : np.array(dtype=int)
indices which select the subselection in the original array
"""
indices, = np.where(mask)
for othermask in masks:
indices = indices[othermask]
return indices
| 14,604 | 28.505051 | 134 | py |
UltraNest | UltraNest-master/ultranest/ordertest.py | """
Mann-Whitney-Wilcoxon U test for a uniform distribution of integers
-------------------------------------------------------------------
A test for biased nested sampling, presented in
section 4.5.2 of Buchner (2023, https://arxiv.org/abs/2101.09675).
This implements the same idea as https://arxiv.org/abs/2006.03371
except their KS test is problematic because the variable (insertion order)
is not continuous. Instead, this implements a Mann-Whitney-Wilcoxon
U test, which also is in practice more sensitive than the KS test.
A highly efficient implementation is achieved by keeping only
a histogram of the insertion orders and comparing those
to expectations from a uniform distribution.
To quantify the convergence of a run, one route is to apply this test
at the end of the run. Another approach is to reset the counters every
time the test exceeds a z-score of 3 sigma, and report the run lengths,
which quantify how many iterations nested sampling was able to proceed
without detection of a insertion order problem.
"""
from __future__ import print_function, division
__all__ = ['infinite_U_zscore', 'UniformOrderAccumulator']
def infinite_U_zscore(sample, B):
"""
Compute Mann-Whitney-Wilcoxon U test for a *sample* of integers to be uniformly distributed between 0 and *B*.
Parameters
----------
B: int
maximum rank allowed.
sample: array of integers
values between 0 and B (inclusive).
Returns
-------
zscore: float
"""
N = len(sample)
return ((sample + 0.5).sum() - N * B * 0.5) / ((N / 12.0)**0.5 * B)
class UniformOrderAccumulator():
"""Mann-Whitney-Wilcoxon U test accumulator.
Stores rank orders (1 to N), for comparison with a uniform order.
"""
def __init__(self):
"""Initiate empty accumulator."""
self.N = 0
self.U = 0.0
def reset(self):
"""Set all counts to zero."""
self.N = 0
self.U = 0.0
def add(self, order, N):
"""
Accumulate rank *order* (0 to N).
Parameters
----------
N: int
maximum rank allowed.
order: int
rank between 0 and N (inclusive).
"""
if not 0 <= order <= N:
raise ValueError("order %d out of %d invalid" % (order, N))
self.U += (order + 0.5) / N
self.N += 1
@property
def zscore(self):
"""z-score of the null hypothesis (uniform distribution) probability."""
N = self.N
if N == 0:
return 0.0
m_U = N * 0.5
sigma_U_corr = (N / 12.0)**0.5
return (self.U - m_U) / sigma_U_corr
def __len__(self):
"""Return number of samples accumulated so far."""
return self.N
| 2,760 | 28.688172 | 114 | py |
UltraNest | UltraNest-master/ultranest/dychmc.py | """Constrained Hamiltanean Monte Carlo step sampling.
Uses gradient to reflect at nested sampling boundaries.
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
def stop_criterion(thetaminus, thetaplus, rminus, rplus):
"""Compute the stop condition in the main loop
computes:
`dot(dtheta, rminus) >= 0 & dot(dtheta, rplus >= 0)`
Parameters
------
thetaminus: ndarray[float, ndim=1]
under position
thetaplus: ndarray[float, ndim=1]
above position
rminus: ndarray[float, ndim=1]
under momentum
rplus: ndarray[float, ndim=1]
above momentum
Returns
-------
criterion: bool
whether the condition is valid
"""
dtheta = thetaplus - thetaminus
#print("stop?", dtheta, rminus, rplus, np.dot(dtheta, rminus.T), np.dot(dtheta, rplus.T))
return (np.dot(dtheta, rminus.T) >= 0) & (np.dot(dtheta, rplus.T) >= 0)
def step_or_reflect(theta, v, epsilon, transform, loglike, gradient, Lmin):
"""Make a step from `theta` towards `v` with stepsize `epsilon`. """
# step in position:
thetaprime = theta + epsilon * v
# check if still inside
mask = np.logical_and(thetaprime > 0, thetaprime < 1)
if mask.all():
p = transform(thetaprime.reshape((1, -1)))
logL = loglike(p)[0]
if logL > Lmin:
return thetaprime, v, p[0], logL, False
# outside, need to reflect
normal = gradient(thetaprime)
#print("reflecting with gradient", normal)
else:
# make a unit vector pointing inwards
normal = np.where(thetaprime <= 0, 1, np.where(thetaprime >= 1, -1, 0))
#print("reflecting to inside", mask, normal)
# project outside gradient onto our current velocity
# subtract that part
vnew = v - 2 * np.dot(normal, v) * normal
# if the reflection is a reverse, it cannot be helpful. Stop.
if np.dot(v, vnew) <= 0:
return thetaprime, vnew, None, -np.inf, True
# get new location, to check if we are back in the constraint
thetaprime2 = thetaprime + epsilon * vnew
# check if inside
mask2 = np.logical_and(thetaprime2 > 0, thetaprime2 < 1)
if mask2.all():
p2 = transform(thetaprime2.reshape((1, -1)))
logL2 = loglike(p2)[0]
#if logL2 < Lmin:
# #print("new point is also outside", (theta, thetaprime, thetaprime2), (v, vnew), (Lmin, logL2))
#else:
# #print("recovered to inside", (theta, thetaprime, thetaprime2), (v, vnew), (Lmin, logL2))
# caller needs to figure out if this is ok
return thetaprime2, vnew, p2[0], logL2, True
else:
#print("new point is also outside cube", (theta, thetaprime, thetaprime2), (v, vnew))
return thetaprime2, vnew, None, -np.inf, True
def build_tree(theta, v, direction, j, epsilon, transform, loglike, gradient, Lmin):
"""The main recursion."""
if j == 0:
# Base case: Take a single leapfrog step in the direction v.
thetaprime, vprime, pprime, logpprime, reflected = step_or_reflect(
theta=theta, v=v * direction, epsilon=epsilon,
transform=transform, loglike=loglike, gradient=gradient, Lmin=Lmin)
#if not sprime:
# print("stopped trajectory:", direction, logpprime, Lmin, (theta, thetaprime, epsilon))
# Set the return values---minus=plus for all things here, since the
# "tree" is of depth 0.
thetaminus = thetaprime
thetaplus = thetaprime
if reflected and np.dot(v, vprime) <= 0:
# if reversing locally, store that in can_continue, not s
sprime = True
#print(" reversed locally")
can_continue = False
vminus = v * direction
vplus = v * direction
v = v * direction
else:
# Is the point acceptable?
sprime = logpprime > Lmin
#if sprime:
# print(" -->")
#else:
# print(" stuck")
can_continue = True
vminus = vprime * direction
vplus = vprime * direction
v = vprime * direction
pminus = pprime
pplus = pprime
#print(direction, (theta, thetaprime), (v, vprime))
# probability is zero if it is an invalid state
alphaprime = 1.0 * (sprime and can_continue)
nalphaprime = 1
nreflectprime = reflected * 1
else:
# Recursion: Implicitly build the height j-1 left and right subtrees.
thetaminus, vminus, pminus, thetaplus, vplus, pplus, thetaprime, vprime, pprime, logpprime, sprime, can_continue, alphaprime, nalphaprime, nreflectprime = \
build_tree(theta, v, direction, j - 1, epsilon, transform, loglike, gradient, Lmin)
# No need to keep going if the stopping criteria were met in the first subtree.
if can_continue and sprime:
if direction == -1:
thetaminus, vminus, pminus, _, _, _, thetaprime2, vprime2, pprime2, logpprime2, sprime2, can_continue2, alphaprime2, nalphaprime2, nreflectprime2 = \
build_tree(thetaminus, vminus, direction, j - 1, epsilon, transform, loglike, gradient, Lmin)
else:
_, _, _, thetaplus, vplus, pplus, thetaprime2, vprime2, pprime2, logpprime2, sprime2, can_continue2, alphaprime2, nalphaprime2, nreflectprime2 = \
build_tree(thetaplus, vplus, direction, j - 1, epsilon, transform, loglike, gradient, Lmin)
# Choose which subtree to propagate a sample up from.
if np.random.uniform() < alphaprime2 / (alphaprime + alphaprime2):
thetaprime = thetaprime2[:]
vprime = vprime2[:]
pprime = pprime2[:]
logpprime = logpprime2
# Update the stopping criterion.
sturn = stop_criterion(thetaminus, thetaplus, vminus, vplus)
#print(sprime, sprime2, sturn)
#if not (sprime and sprime2 and sturn):
# print("sprime stop:", sprime, sprime2, sturn)
sprime = sprime and sprime2 and sturn
can_continue = can_continue and can_continue2
# Update the acceptance probability statistics.
alphaprime += alphaprime2
nalphaprime += nalphaprime2
nreflectprime += nreflectprime2
return thetaminus, vminus, pminus, thetaplus, vplus, pplus, thetaprime, vprime, pprime, logpprime, sprime, can_continue, alphaprime, nalphaprime, nreflectprime
def tree_sample(theta, p, logL, v, epsilon, transform, loglike, gradient, Lmin, maxheight=np.inf):
"""Build NUTS-like tree of sampling path from `theta` towards `p` with stepsize `epsilon`."""
# initialize the tree
thetaminus = theta
thetaplus = theta
vminus = v[:]
vplus = v[:]
alpha = 1
nalpha = 1
nreflect = 0
logp = logL
fwd_possible = True
rwd_possible = True
j = 0 # initial heigth j = 0
s = True # Main loop: will keep going until s == 0.
while s and j < maxheight:
# Choose a direction. -1 = backwards, 1 = forwards.
if fwd_possible and rwd_possible:
direction = int(2 * (np.random.uniform() < 0.5) - 1)
elif fwd_possible:
direction = 1
elif rwd_possible:
direction = -1
else:
print("stuck in both ends")
break
# Double the size of the tree.
if direction == -1:
thetaminus, vminus, pminus, _, _, _, thetaprime, vprime, pprime, logpprime, sprime, can_continue, alphaprime, nalphaprime, nreflectprime = \
build_tree(thetaminus, vminus, direction, j, epsilon, transform, loglike, gradient, Lmin)
else:
_, _, _, thetaplus, vplus, pplus, thetaprime, vprime, pprime, logpprime, sprime, can_continue, alphaprime, nalphaprime, nreflectprime = \
build_tree(thetaplus, vplus, direction, j, epsilon, transform, loglike, gradient, Lmin)
# Use Bernoulli trial to decide whether or not to move to a
# point from the half-tree we just generated.
if sprime and np.random.uniform() < alphaprime / (alpha + alphaprime):
theta = thetaprime
p = pprime
logp = logpprime
v = vprime
alpha += alphaprime
nalpha += nalphaprime
nreflect += nreflectprime
# Decide if it's time to stop.
sturn = stop_criterion(thetaminus, thetaplus, vminus, vplus)
#print(sprime, sturn)
s = sprime and sturn
if not can_continue:
if direction == 1:
fwd_possible = False
if direction == -1:
rwd_possible = False
#if not s and (fwd_possible or rwd_possible):
# print("U-turn found a:%d r:%d t:%d" % (alpha, nreflect, nalpha), sturn, sprime, (thetaminus, thetaplus), (vminus, vplus))
#assert False
# Increment depth.
j += 1
#print("jumping to:", theta)
#print('Tree height: %d, accepts: %03.2f%%, reflects: %03.2f%%, epsilon=%g' % (j, alpha/nalpha*100, nreflect/nalpha*100, epsilon))
return alpha, nreflect, nalpha, theta, p, logp, j
def generate_uniform_direction(d, massmatrix):
"""Draw unit direction vector according to mass matrix."""
momentum = np.random.multivariate_normal(np.zeros(d), np.dot(massmatrix, np.eye(d)))
momentum /= (momentum**2).sum()**0.5
return momentum
class DynamicCHMCSampler(object):
"""Dynamic Constrained Hamiltonian/Hybrid Monte Carlo technique
Run a billiard ball inside the likelihood constrained.
The ball reflects off the constraint.
The trajectory is explored in steps of stepsize epsilon.
A No-U-turn criterion and randomized doubling of forward or backward
steps is used to avoid repeating circular trajectories.
Because of this, the number of steps is dynamic.
Parameters
-----------
nsteps: int
number of accepted steps until the sample is considered independent.
adaptive_nsteps: False, 'proposal-distance', 'move-distance'
if not false, allow earlier termination than nsteps.
The 'proposal-distance' strategy stops when the sum of
all proposed vectors exceeds the mean distance
between pairs of live points.
As distance, the Mahalanobis distance is used.
The 'move-distance' strategy stops when the distance between
start point and current position exceeds the mean distance
between pairs of live points.
delta: float
step size
nudge: float
change in step size, must be >1.
"""
def __init__(self, scale, nsteps, adaptive_nsteps=False, delta=0.9, nudge=1.04):
self.history = []
self.nsteps = nsteps
self.scale = scale
self.nudge = nudge
self.nsteps_nudge = 1.01
adaptive_nsteps_options = (False, 'proposal-total-distances-NN', 'proposal-summed-distances-NN',
'proposal-total-distances', 'proposal-summed-distances',
'move-distance', 'move-distance-midway', 'proposal-summed-distances-min-NN',
'proposal-variance-min', 'proposal-variance-min-NN')
if adaptive_nsteps not in adaptive_nsteps_options:
raise ValueError("adaptive_nsteps must be one of: %s, not '%s'" % (adaptive_nsteps_options, adaptive_nsteps))
self.adaptive_nsteps = adaptive_nsteps
self.mean_pair_distance = np.nan
self.delta = delta
self.massmatrix = 1
self.invmassmatrix = 1
self.logstat = []
self.logstat_labels = ['acceptance_rate', 'reflect_fraction', 'stepsize', 'treeheight']
if adaptive_nsteps:
self.logstat_labels += ['jump-distance', 'reference-distance']
self.logstat_trajectory = []
def set_gradient(self, gradient):
self.gradient = gradient
def __str__(self):
"""Get string representation."""
if not self.adaptive_nsteps:
return type(self).__name__ + '(nsteps=%d)' % self.nsteps
else:
return type(self).__name__ + '(adaptive_nsteps=%s)' % self.adaptive_nsteps
def plot(self, filename):
"""Plot sampler statistics."""
if len(self.logstat) == 0:
return
plt.figure(figsize=(10, 1 + 3 * len(self.logstat_labels)))
for i, label in enumerate(self.logstat_labels):
part = [entry[i] for entry in self.logstat]
plt.subplot(len(self.logstat_labels), 1, 1 + i)
plt.ylabel(label)
plt.plot(part)
x = []
y = []
for j in range(0, len(part), 20):
x.append(j)
y.append(np.mean(part[j:j + 20]))
plt.plot(x, y)
if np.min(part) > 0:
plt.yscale('log')
plt.savefig(filename, bbox_inches='tight')
np.savetxt(filename + '.txt.gz', self.logstat,
header=','.join(self.logstat_labels), delimiter=',')
plt.close()
def __next__(self, region, Lmin, us, Ls, transform, loglike, ndraw=40, plot=False):
"""Get a new point.
Parameters
----------
region: MLFriends
region.
Lmin: float
loglikelihood threshold
us: array of vectors
current live points
Ls: array of floats
current live point likelihoods
transform: function
transform function
loglike: function
loglikelihood function
ndraw: int
number of draws to attempt simultaneously.
plot: bool
whether to produce debug plots.
"""
self.transform = transform
self.loglike = loglike
i = np.random.randint(len(Ls))
#print("starting from live point %d" % i)
self.starti = i
ui = us[i,:]
Li = Ls[i]
pi = None
assert np.logical_and(ui > 0, ui < 1).all(), ui
ncalls_total = 1
history = [(ui, Li)]
nsteps_remaining = self.nsteps
while nsteps_remaining > 0:
unew, pnew, Lnew, nc, alpha, fracreflect, treeheight = self.move(ui, pi, Li,
region=region, ndraw=ndraw, plot=plot, Lmin=Lmin)
if pnew is not None:
# do not count failed accepts
nsteps_remaining = nsteps_remaining - 1
#else:
# print("stuck:", Li, "->", Lnew, "Lmin:", Lmin, nsteps_remaining)
ncalls_total += nc
#print(" ->", Li, Lnew, unew, pnew)
assert np.logical_and(unew > 0, unew < 1).all(), unew
if plot:
plt.plot([ui[0], unew[:,0]], [ui[1], unew[:,1]], '-', color='k', lw=0.5)
plt.plot(ui[0], ui[1], 'd', color='r', ms=4)
plt.plot(unew[:,0], unew[:,1], 'x', color='r', ms=4)
ui, pi, Li = unew, pnew, Lnew
history.append((ui, Li))
self.logstat_trajectory.append([alpha, fracreflect, treeheight])
self.adjust_stepsize()
self.adjust_nsteps(region, history)
return ui, pi, Li, ncalls_total
def move(self, ui, pi, Li, region, Lmin, ndraw=1, plot=False):
"""Move from position ui, Li, gradi with a HMC trajectory.
Return
------
unew: vector
new position in cube space
pnew: vector
new position in physical parameter space
Lnew: float
new likelihood
nc: int
number of likelihood evaluations
alpha: float
acceptance rate of trajectory
treeheight: int
height of NUTS tree
"""
epsilon = self.scale
epsilon_here = 10**np.random.normal(0, 0.3) * epsilon
#epsilon_here = np.random.uniform() * epsilon
#epsilon_here = epsilon
d = len(ui)
assert Li >= Lmin
# draw from momentum
v = generate_uniform_direction(d, self.massmatrix)
# explore and sample from one trajectory
alpha, nreflects, nalpha, theta, pnew, Lnew, treeheight = tree_sample(
ui, pi, Li, v, epsilon_here,
self.transform, self.loglike, self.gradient, Lmin, maxheight=15)
return theta, pnew, Lnew, nalpha, alpha / nalpha, nreflects / nalpha, treeheight
def create_problem(self, Ls, region):
""" Set up auxiliary distribution.
Parameters
----------
Ls: array of floats
live point likelihoods
region: MLFriends region object
region.transformLayer is used to obtain mass matrices
"""
# problem dimensionality
layer = region.transformLayer
if hasattr(layer, 'invT'):
self.invmassmatrix = layer.cov
self.massmatrix = np.linalg.inv(self.invmassmatrix)
elif hasattr(layer, 'std'):
if np.shape(layer.std) == () and layer.std == 1:
self.massmatrix = 1
self.invmassmatrix = 1
else:
# invmassmatrix: covariance
self.invmassmatrix = np.diag(layer.std[0]**2)
self.massmatrix = np.diag(layer.std[0]**-2)
print(self.invmassmatrix.shape, layer.std)
def adjust_stepsize(self):
"""Store chain statistics and adapt proposal."""
if len(self.logstat_trajectory) == 0:
return
# log averaged acceptance and trajectory statistics
self.logstat.append([
np.mean([alpha for alpha, fracreflect, treeheight in self.logstat_trajectory]),
np.mean([fracreflect for alpha, fracreflect, treeheight in self.logstat_trajectory]),
float(self.scale),
np.mean([2**treeheight for alpha, fracreflect, treeheight in self.logstat_trajectory])
])
N = int(max(200 // self.nsteps, 1))
alphamean = np.mean([parts[0] for parts in self.logstat[-N:]])
reflectmean = np.mean([parts[1] for parts in self.logstat[-N:]])
treeheightmean = np.mean([parts[3] for parts in self.logstat[-N:]])
# aim towards an acceptance rate of delta
if alphamean > self.delta:
self.scale *= self.nudge**(1./N)
else:
self.scale /= self.nudge**(1./N)
self.logstat_trajectory = []
if len(self.logstat) % N == 0:
print("updating step size: alpha=%.4f refl=%.4f treeheight=%.1f --> scale=%g " % (
alphamean, reflectmean, treeheightmean, self.scale))
def region_changed(self, Ls, region):
"""React to change of region. """
self.adjust_stepsize()
self.create_problem(Ls, region)
if self.adaptive_nsteps or True:
self.mean_pair_distance = region.compute_mean_pair_distance()
#print("region changed. new mean_pair_distance: %g" % self.mean_pair_distance)
def adjust_nsteps(self, region, history):
if not self.adaptive_nsteps:
return
elif len(history) < self.nsteps:
# incomplete or aborted for some reason
print("not adapting, incomplete history", len(history), self.nsteps)
return
#assert self.nrejects < len(history), (self.nsteps, self.nrejects, len(history))
#assert self.nrejects <= self.nsteps, (self.nsteps, self.nrejects, len(history))
assert np.isfinite(self.mean_pair_distance)
nlive, ndim = region.u.shape
if self.adaptive_nsteps == 'proposal-total-distances':
# compute mean vector of each proposed jump
# compute total distance of all jumps
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
assert len(tproposed.sum(axis=1)) == len(tproposed)
d2 = ((((tproposed[0] - tproposed)**2).sum(axis=1))**0.5).sum()
far_enough = d2 > self.mean_pair_distance / ndim
self.logstat[-1] = self.logstat[-1] + [d2, self.mean_pair_distance]
#print(self.adaptive_nsteps, self.nsteps, self.nrejects, far_enough, self.mean_pair_distance, d2)
elif self.adaptive_nsteps == 'proposal-total-distances-NN':
# compute mean vector of each proposed jump
# compute total distance of all jumps
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
assert len(tproposed.sum(axis=1)) == len(tproposed)
d2 = ((((tproposed[0] - tproposed)**2).sum(axis=1))**0.5).sum()
far_enough = d2 > region.maxradiussq**0.5
self.logstat[-1] = self.logstat[-1] + [d2, region.maxradiussq**0.5]
#print(self.adaptive_nsteps, self.nsteps, self.nrejects, far_enough, region.maxradiussq**0.5, d2)
elif self.adaptive_nsteps == 'proposal-summed-distances':
# compute sum of distances from each jump
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
d2 = (((tproposed[1:,:] - tproposed[:-1,:])**2).sum(axis=1)**0.5).sum()
far_enough = d2 > self.mean_pair_distance / ndim
#print(self.adaptive_nsteps, self.nsteps, self.nrejects, far_enough, self.mean_pair_distance, d2)
self.logstat[-1] = self.logstat[-1] + [d2, self.mean_pair_distance]
elif self.adaptive_nsteps == 'proposal-summed-distances-NN':
# compute sum of distances from each jump
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
d2 = (((tproposed[1:,:] - tproposed[:-1,:])**2).sum(axis=1)**0.5).sum()
far_enough = d2 > region.maxradiussq**0.5
self.logstat[-1] = self.logstat[-1] + [d2, region.maxradiussq**0.5]
#print(self.adaptive_nsteps, self.nsteps, self.nrejects, far_enough, region.maxradiussq**0.5, d2)
elif self.adaptive_nsteps == 'proposal-summed-distances-min-NN':
# compute sum of distances from each jump
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
d2 = (np.abs(tproposed[1:,:] - tproposed[:-1,:]).sum(axis=1)).min()
far_enough = d2 > region.maxradiussq**0.5
self.logstat[-1] = self.logstat[-1] + [d2, region.maxradiussq**0.5]
#print(self.adaptive_nsteps, self.nsteps, self.nrejects, far_enough, region.maxradiussq**0.5, d2)
elif self.adaptive_nsteps == 'proposal-variance-min':
# compute sum of distances from each jump
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
d2 = tproposed.std(axis=0).min()
far_enough = d2 > self.mean_pair_distance / ndim
self.logstat[-1] = self.logstat[-1] + [d2, self.mean_pair_distance]
#print(self.adaptive_nsteps, self.nsteps, self.nrejects, far_enough, region.maxradiussq**0.5, d2)
elif self.adaptive_nsteps == 'proposal-variance-min-NN':
# compute sum of distances from each jump
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
d2 = tproposed.std(axis=0).min()
far_enough = d2 > region.maxradiussq**0.5
self.logstat[-1] = self.logstat[-1] + [d2, region.maxradiussq**0.5]
#print(self.adaptive_nsteps, self.nsteps, self.nrejects, far_enough, region.maxradiussq**0.5, d2)
elif self.adaptive_nsteps == 'move-distance':
# compute distance from start to end
ustart, _ = history[0]
ufinal, _ = history[-1]
tstart, tfinal = region.transformLayer.transform(np.vstack((ustart, ufinal)))
d2 = ((tstart - tfinal)**2).sum()
far_enough = d2 > region.maxradiussq
self.logstat[-1] = self.logstat[-1] + [d2, region.maxradiussq**0.5]
#print(self.adaptive_nsteps, self.nsteps, self.nrejects, far_enough, region.maxradiussq**0.5, d2)
elif self.adaptive_nsteps == 'move-distance-midway':
# compute distance from start to end
ustart, _ = history[0]
middle = max(1, len(history) // 2)
ufinal, _ = history[middle]
tstart, tfinal = region.transformLayer.transform(np.vstack((ustart, ufinal)))
d2 = ((tstart - tfinal)**2).sum()
far_enough = d2 > region.maxradiussq
self.logstat[-1] = self.logstat[-1] + [d2, region.maxradiussq**0.5]
#print(self.adaptive_nsteps, self.nsteps, self.nrejects, far_enough, region.maxradiussq**0.5, d2)
else:
assert False, self.adaptive_nsteps
# adjust nsteps
if far_enough:
self.nsteps = min(self.nsteps - 1, int(self.nsteps / self.nsteps_nudge))
else:
self.nsteps = max(self.nsteps + 1, int(self.nsteps * self.nsteps_nudge))
self.nsteps = max(1, min(1000, self.nsteps))
if len(self.logstat) % 50 == 0:
print("updating number of steps: %d " % (self.nsteps))
| 25,176 | 40.683775 | 165 | py |
UltraNest | UltraNest-master/ultranest/flatnuts.py | """
FLATNUTS is a implementation of No-U-turn sampler
for nested sampling assuming a flat prior space (hyper-cube u-space).
This is highly experimental. It is similar to NoGUTS and suffers from
the same stability problems.
Directional sampling within regions.
Work in unit cube space. assume a step size.
1. starting from a live point
2. choose a random direction based on whitened space metric
3. for forward and backward direction:
1. find distance where leaving spheres (surely outside)
2. bisect the step that leads out of the likelihood threshold
3. can we scatter forward?
- if we stepped outside the unit cube, use normal to the parameter(s) we stepped out from
- if gradient available, use it at first outside point
- for each sphere that contains the last inside point:
- resize so that first outside point is on the surface, get tangential vector there
(this vector is just the difference between sphere center and last inside point)
- compute reflection of direction vector with tangential plane
- choose a forward reflection at random (if any)
3.4) test if next point is inside again. If yes, continue NUTS
NUTS:
- alternatingly double the number of steps to the forward or backward side
- build a tree; terminate when start and end directions are not forward any more
- choose a end point at random out of the sequence
If the number of steps on any straight line is <10 steps, make step size smaller
If the number of steps on any straight line is >100 steps, make step size slightly bigger
Parameters:
- Number of NUTS tracks (has to be user-tuned to ensure sufficiently independent samples; starting from 1, look when Z does not change anymore)
- Step size (self-adjusting)
Benefit of this algorithm:
- insensitive to step size
- insensitive to dimensionality (sqrt scaling), better than slice sampling
- takes advantage of region information, can accelerate low-d problems as well
Drawbacks:
- inaccurate reflections degrade dimensionality scaling
- more complex to implement than slice sampling
"""
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
from .samplingpath import angle, extrapolate_ahead
class SingleJumper(object):
""" Jump on step at a time. If unsuccessful, reverse direction. """
def __init__(self, stepsampler, nsteps=0):
self.stepsampler = stepsampler
self.direction = +1
assert nsteps > 0
self.nsteps = nsteps
self.isteps = 0
self.currenti = 0
self.naccepts = 0
self.nrejects = 0
def prepare_jump(self):
target = self.currenti + self.direction
self.stepsampler.set_nsteps(target)
def check_gaps(self, gaps):
# gaps cannot happen, because we make each jump explicitly
pass
# then user runs stepsampler until it is done
def make_jump(self, gaps={}):
target = self.currenti + self.direction
pointi = [(j, xj, vj, Lj) for j, xj, vj, Lj in self.stepsampler.points if j == target]
accept = len(pointi) > 0
if accept:
self.currenti = target
self.naccepts += 1
else:
pointi = [(j, xj, vj, Lj) for j, xj, vj, Lj in self.stepsampler.points if j == self.currenti]
# reverse
self.direction *= -1
self.nrejects += 1
self.isteps += 1
return pointi[0][1], pointi[0][3]
class DirectJumper(object):
""" Jump to n steps immediately. If unsuccessful, takes rest in other direction. """
def __init__(self, stepsampler, nsteps, log=False):
self.stepsampler = stepsampler
self.direction = +1
assert nsteps > 0
self.nsteps = nsteps
self.isteps = 0
self.currenti = 0
self.naccepts = 0
self.nrejects = 0
self.log = log
def prepare_jump(self):
target = self.currenti + self.nsteps
self.stepsampler.set_nsteps(target)
# then user runs stepsampler until it is done
def check_gaps(self, gaps):
pointi = {j: (xj, Lj) for j, xj, vj, Lj in self.stepsampler.points}
ilo, ihi = min(pointi.keys()), max(pointi.keys())
currenti = self.currenti
direction = self.direction
for isteps in range(self.nsteps):
target = currenti + direction
accept = ilo <= target <= ihi and not gaps.get(target, False)
if accept:
currenti = target
if self.log:
print("accepted jump %d->%d" % (self.currenti, target), 'fwd' if self.direction == 1 else 'rwd')
else:
# reverse
if self.log:
print("rejected jump %d->%d" % (self.currenti, target), 'fwd' if self.direction == 1 else 'rwd')
direction *= -1
if self.log: print("--> %d" % currenti)
# double-check that final point is OK:
# if we already evaluated it, it is OK
if currenti in pointi:
return None, None
if currenti in gaps:
assert gaps[currenti] == False, "could not have jumped into a known gap"
return None, None
xj, vj, Lj, onpath = self.stepsampler.contourpath.interpolate(currenti)
if Lj is not None:
return None, None
if self.log: print(" checking for gap ...")
# otherwise ask caller to verify it and call us again with
# gaps[i] = True if outside, gaps[i] = False if OK
return xj, currenti
def make_jump(self, gaps={}):
pointi = {j: (xj, Lj) for j, xj, vj, Lj in self.stepsampler.points}
ilo, ihi = min(pointi.keys()), max(pointi.keys())
for self.isteps in range(self.nsteps):
target = self.currenti + self.direction
accept = ilo <= target <= ihi and not gaps.get(target, False)
if accept:
if self.log:
print("accepted jump %d->%d" % (self.currenti, target), 'fwd' if self.direction == 1 else 'rwd')
self.currenti = target
self.naccepts += 1
else:
if self.log:
print("rejected jump %d->%d" % (self.currenti, target), 'fwd' if self.direction == 1 else 'rwd')
# reverse
self.direction *= -1
self.nrejects += 1
self.isteps += 1
return pointi[self.currenti]
class IntervalJumper(object):
""" Use interval to choose final point randomly """
def __init__(self, stepsampler, nsteps):
self.stepsampler = stepsampler
self.direction = +1
assert nsteps >= 0
self.nsteps = nsteps
self.isteps = 0
self.currenti = 0
self.naccepts = 0
self.nrejects = 0
def prepare_jump(self):
target = self.currenti + self.nsteps
self.stepsampler.set_nsteps(target)
self.stepsampler.set_nsteps(-target)
# then user runs stepsampler until it is done
def make_jump(self):
pointi = {j: (xj, Lj) for j, xj, vj, Lj in self.stepsampler.points}
ilo, ihi = min(pointi.keys()), max(pointi.keys())
a, b = self.nutssampler.validrange
nused = b - a
# these were not used:
ntotal = ihi - ilo
# count the number of accepts and rejects
self.naccepts = nused
self.nrejects = ntotal - nused
return None
class ClockedSimpleStepSampler(object):
"""
Find a new point with a series of small steps
"""
def __init__(self, contourpath, plot=False, log=False):
"""
Starts a sampling track from x in direction v.
is_inside is a function that returns true when a given point is inside the volume
epsilon gives the step size in direction v.
samples, if given, helps choose the gradient -- To be removed
plot: if set to true, make some debug plots
"""
self.contourpath = contourpath
self.points = self.contourpath.points
self.nreflections = 0
self.nreverses = 0
self.plot = plot
self.log = log
self.reset()
def reset(self):
self.goals = []
def reverse(self, reflpoint, v, plot=False):
"""
Reflect off the surface at reflpoint going in direction v
returns the new direction.
"""
normal = self.contourpath.gradient(reflpoint, plot=plot)
if normal is None:
#assert False
return -v
vnew = v - 2 * angle(normal, v) * normal
if self.log: print(" new direction:", vnew)
assert vnew.shape == v.shape, (vnew.shape, v.shape)
assert np.isclose(norm(vnew), norm(v)), (vnew, v, norm(vnew), norm(v))
#isunitlength(vnew)
if plot:
plt.plot([reflpoint[0], (-v + reflpoint)[0]], [reflpoint[1], (-v + reflpoint)[1]], '-', color='k', lw=2, alpha=0.5)
plt.plot([reflpoint[0], (vnew + reflpoint)[0]], [reflpoint[1], (vnew + reflpoint)[1]], '-', color='k', lw=3)
return vnew
def set_nsteps(self, i):
self.goals.insert(0, ('sample-at', i))
def is_done(self):
return self.goals == []
def expand_onestep(self, fwd, transform, loglike, Lmin):
""" Helper interface, make one step (forward fwd=True or backward fwd=False) """
if fwd:
starti, _, _, _ = max(self.points)
i = starti + 1
else:
starti, _, _, _ = min(self.points)
i = starti - 1
return self.expand_to_step(i, transform, loglike, Lmin)
def expand_to_step(self, nsteps, transform, loglike, Lmin):
""" Helper interface, go to step nstep """
self.set_nsteps(nsteps)
return self.get_independent_sample(transform, loglike, Lmin)
def get_independent_sample(self, transform, loglike, Lmin):
""" Helper interface, call next() until a independent sample is returned """
Llast = None
while True:
sample, is_independent = self.next(Llast)
if sample is None:
return None, None
if is_independent:
unew, Lnew = sample
return unew, Lnew
else:
unew = sample
xnew = transform(unew)
Llast = loglike(xnew)
if Llast < Lmin:
Llast = None
class ClockedStepSampler(ClockedSimpleStepSampler):
"""
Find a new point with a series of small steps
"""
def continue_sampling(self, i):
if i > 0 and self.contourpath.samplingpath.fwd_possible \
or i < 0 and self.contourpath.samplingpath.rwd_possible:
# we are not done:
self.goals.insert(0, ('expand-to', i))
self.goals.append(('sample-at', i))
else:
# we are not done, but cannot reach the goal.
# reverse. Find position from where to reverse
if i > 0:
starti, _, _, _ = max(self.points)
reversei = starti + 1
else:
starti, _, _, _ = min(self.points)
reversei = starti - 1
if self.log: print("reversing at %d..." % starti)
# how many steps are missing?
self.nreverses += 1
deltai = i - starti
# request one less because one step is spent on
# the outside try
#if self.log: print(" %d steps to do at %d -> [from %d, delta=%d] targeting %d." % (
# i - starti, starti, reversei, deltai, reversei - deltai))
# make this many steps in the other direction
self.goals.append(('sample-at', reversei - deltai))
def expand_to(self, i):
if i > 0 and self.contourpath.samplingpath.fwd_possible:
starti, startx, startv, _ = max(self.points)
if i > starti:
if self.log: print("going forward...", i, starti)
j = starti + 1
xj, v = self.contourpath.extrapolate(j)
if j != i: # ultimate goal not reached yet
self.goals.insert(0, ('expand-to', i))
self.goals.insert(0, ('eval-at', j, xj, v, +1))
return xj, False
else:
if self.log: print("already done...", i, starti)
# we are already done
pass
elif i < 0 and self.contourpath.samplingpath.rwd_possible:
starti, startx, startv, _ = min(self.points)
if i < starti:
if self.log: print("going backwards...", i, starti)
j = starti - 1
xj, v = self.contourpath.extrapolate(j)
if j != i: # ultimate goal not reached yet
self.goals.insert(0, ('expand-to', i))
self.goals.insert(0, ('eval-at', j, xj, v, -1))
return xj, False
else:
if self.log: print("already done...", i, starti)
# we are already done
pass
else:
# we are trying to go somewhere we cannot.
# skip to other goals
pass
def eval_at(self, j, xj, v, sign, Llast):
if Llast is not None:
# we can go about our merry way.
self.contourpath.add(j, xj, v, Llast)
else:
# We stepped outside, so now we need to reflect
self.nreflections += 1
if self.log: print("reflecting:", xj, v)
if self.plot: plt.plot(xj[0], xj[1], 'xr')
vk = self.reverse(xj, v * sign, plot=self.plot) * sign
if self.log: print("new direction:", vk)
xk, vk = extrapolate_ahead(sign, xj, vk, contourpath=self.contourpath)
if self.log: print("reflection point:", xk)
self.goals.insert(0, ('reflect-at', j, xk, vk, sign))
return xk, False
def reflect_at(self, j, xk, vk, sign, Llast):
self.nreflections += 1
if Llast is not None:
# we can go about our merry way.
self.contourpath.add(j, xk, vk, Llast)
else:
# we are stuck and have to give up this direction
if self.plot: plt.plot(xk[0], xk[1], 's', mfc='None', mec='r', ms=10)
if sign == 1:
self.contourpath.samplingpath.fwd_possible = False
else:
self.contourpath.samplingpath.rwd_possible = False
def next(self, Llast=None):
"""
Run steps forward or backward to step i (can be positive or
negative, 0 is the starting point)
"""
if self.log: print("next() call", Llast)
while self.goals:
if self.log: print("goals: ", self.goals)
goal = self.goals.pop(0)
if goal[0] == 'sample-at':
i = goal[1]
assert Llast is None
if not self.contourpath.samplingpath.fwd_possible \
and not self.contourpath.samplingpath.rwd_possible \
and len(self.points) == 1:
# we are stuck and cannot move.
# return the starting point as our best effort
starti, startx, startv, startL = self.points[0]
if self.log: print("stuck! returning start point", starti)
return (startx, startL), True
# find point
# here we assume all intermediate points have been sampled
pointi = [(j, xj, vj, Lj) for j, xj, vj, Lj in self.points if j == i]
if len(pointi) != 0:
# return the previously sampled point
_, xj, _, Lj = pointi[0]
if self.log: print("returning point", i)
return (xj, Lj), True
self.continue_sampling(i)
elif goal[0] == 'expand-to':
i = goal[1]
ret = self.expand_to(i)
if ret is not None:
return ret
elif goal[0] == 'eval-at':
_, j, xj, v, sign = goal
ret = self.eval_at(j, xj, v, sign, Llast)
Llast = None
if ret is not None:
return ret
elif goal[0] == 'reflect-at':
_, j, xk, vk, sign = goal
self.reflect_at(j, xk, vk, sign, Llast)
Llast = None
else:
assert False, goal
return None, False
class ClockedBisectSampler(ClockedStepSampler):
"""
Step sampler that does not require each step to be evaluated
"""
def continue_sampling(self, i):
if i > 0:
starti, _, _, _ = max(self.points)
#fwd = True
inside = i < starti
more_possible = self.contourpath.samplingpath.fwd_possible
else:
starti, _, _, _ = min(self.points)
#fwd = False
inside = starti < i
more_possible = self.contourpath.samplingpath.rwd_possible
if inside:
# interpolate point on track
xj, vj, Lj, onpath = self.contourpath.interpolate(i)
if self.log: print("target is on track, returning interpolation at %d..." % i, xj, Lj)
return (xj, Lj), True
elif more_possible:
# we are not done:
self.goals.insert(0, ('expand-to', i))
if self.log: print("not done yet, continue expanding to %d..." % i)
self.goals.append(('sample-at', i))
else:
# we are not done, but cannot reach the goal.
# reverse. Find position from where to reverse
if i > 0:
starti, _, _, _ = max(self.points)
reversei = starti + 1
else:
starti, _, _, _ = min(self.points)
reversei = starti - 1
if self.log: print("reversing at %d..." % starti)
# how many steps are missing?
self.nreverses += 1
deltai = i - starti
# request one less because one step is spent on
# the outside try
if self.log: print(" %d steps to do at %d -> [from %d, delta=%d] targeting %d." % (
i - starti, starti, reversei, deltai, reversei - deltai))
# make this many steps in the other direction
self.goals.append(('sample-at', reversei - deltai))
def expand_to(self, j):
# check if we already tried
if j > 0 and self.contourpath.samplingpath.fwd_possible:
#print("going forward...", j)
starti, startx, startv, _ = max(self.points)
if j > starti:
xj, v = self.contourpath.extrapolate(j)
self.goals.insert(0, ('bisect', starti, startx, startv, None, None, None, j, xj, v, +1))
#self.goals.append(goal)
return xj, False
else:
# we are already done
if self.log: print("done going to", j, starti)
pass
elif j < 0 and self.contourpath.samplingpath.rwd_possible:
#print("going backward...", j)
starti, startx, startv, _ = min(self.points)
if j < starti:
xj, v = self.contourpath.extrapolate(j)
self.goals.insert(0, ('bisect', starti, startx, startv, None, None, None, j, xj, v, -1))
#self.goals.append(goal)
return xj, False
else:
# we are already done
if self.log: print("done going to", j)
pass
else:
# we are trying to go somewhere we cannot.
# skip to other goals
if self.log: print("cannot go there", j)
pass
def bisect_at(self, lefti, leftx, leftv, midi, midx, midv, righti, rightx, rightv, sign, Llast):
# Bisect to find first point outside
# left is inside (i: index, x: coordinate, v: direction)
# mid is the middle just evaluated (if not None)
# right is outside
if self.log: print("bisecting ...", lefti, midi, righti)
if midi is None:
# check if right is actually outside
if Llast is None:
# yes it is. continue below
pass
else:
# right is actually inside
# so we successfully jumped all the way successfully
if self.log: print("successfully went all the way in one jump!")
self.contourpath.add(righti, rightx, rightv, Llast)
Llast = None
return
else:
# shrink interval based on previous evaluation point
if Llast is not None:
#print(" inside. updating interval %d-%d" % (midi, righti))
lefti, leftx, leftv = midi, midx, midv
self.contourpath.add(midi, midx, midv, Llast)
Llast = None
else:
#print(" outside. updating interval %d-%d" % (lefti, midi))
righti, rightx, rightv = midi, midx, midv
# we need to bisect. righti was outside
midi = (righti + lefti) // 2
if midi == lefti or midi == righti:
# we are done bisecting. right is the first point outside
if self.log: print(" bisecting gave reflection point", righti, rightx, rightv)
if self.plot: plt.plot(rightx[0], rightx[1], 'xr')
# compute reflected direction
vk = self.reverse(rightx, rightv * sign, plot=self.plot) * sign
if self.log: print(" reversing there", rightv)
# go from reflection point one step in that direction
# that is our new point
xk, vk = extrapolate_ahead(sign, rightx, vk, contourpath=self.contourpath)
if self.log: print(" making one step from", rightx, rightv, '-->', xk, vk)
self.nreflections += 1
if self.log: print(" trying new point,", xk)
self.goals.insert(0, ('reflect-at', righti, xk, vk, sign))
return xk, False
else:
if self.log: print(" continue bisect at", midi)
# we should evaluate the middle point
midx, midv = extrapolate_ahead(midi - lefti, leftx, leftv, contourpath=self.contourpath)
# continue bisecting
self.goals.insert(0, ('bisect', lefti, leftx, leftv, midi, midx, midv, righti, rightx, rightv, sign))
return midx, False
def next(self, Llast=None):
"""
Run steps forward or backward to step i (can be positive or
negative, 0 is the starting point)
"""
if self.log: print()
if self.log: print("next() call", Llast)
while self.goals:
if self.log: print("goals: ", self.goals)
goal = self.goals.pop(0)
if goal[0] == 'sample-at':
i = goal[1]
assert Llast is None
if not self.contourpath.samplingpath.fwd_possible and not self.contourpath.samplingpath.rwd_possible \
and len(self.points) == 1:
# we are stuck and cannot move.
# return the starting point as our best effort
if self.log: print("stuck! returning start point.")
starti, startx, startv, startL = self.points[0]
return (startx, startL), True
# check if point already sampled
pointi = [(j, xj, vj, Lj) for j, xj, vj, Lj in self.points if j == i]
if len(pointi) == 1:
# return the previously sampled point
_, xj, _, Lj = pointi[0]
return (xj, Lj), True
self.continue_sampling(i)
elif goal[0] == 'expand-to':
ret = self.expand_to(goal[1])
if ret is not None:
return ret
elif goal[0] == 'bisect':
_, lefti, leftx, leftv, midi, midx, midv, righti, rightx, rightv, sign = goal
ret = self.bisect_at(lefti, leftx, leftv, midi, midx, midv, righti, rightx, rightv, sign, Llast)
Llast = None
if ret is not None:
return ret
elif goal[0] == 'reflect-at':
_, j, xk, vk, sign = goal
self.reflect_at(j, xk, vk, sign, Llast)
Llast = None
else:
assert False, goal
return None, False
class ClockedNUTSSampler(ClockedBisectSampler):
"""
No-U-turn sampler (NUTS) on flat surfaces.
"""
def reset(self):
self.goals = []
self.left_state = self.points[0][:3]
self.right_state = self.points[0][:3]
self.left_warmed_up = False
self.right_warmed_up = False
self.tree_built = False
self.validrange = (0, 0)
self.tree_depth = 0
self.current_direction = np.random.randint(2) == 1
def next(self, Llast=None):
"""
Alternatingly doubles the number of steps to forward and backward
direction (which may include reflections, see StepSampler and
BisectSampler).
When track returns (start and end of tree point toward each other),
terminates and returns a random point on that track.
"""
while not self.tree_built:
if self.log: print("continue building tree")
rwd = self.current_direction
if True or self.tree_depth > 7:
print("NUTS step: tree depth %d, %s" % (self.tree_depth, "rwd" if rwd else "fwd"))
# make sure the path is prepared for the desired tree
if rwd:
goal = ('expand-to', self.left_state[0] - 2**self.tree_depth)
else:
goal = ('expand-to', self.right_state[0] + 2**self.tree_depth)
if goal not in self.goals:
self.goals.append(goal)
# work down any open tasks
while self.goals:
sample, is_independent = ClockedBisectSampler.next(self, Llast=Llast)
Llast = None
if sample is not None:
return sample, is_independent
# now check if terminating
if rwd:
self.left_state, _, newrange, newstop = self.build_tree(self.left_state, self.tree_depth, rwd=rwd)
else:
_, self.right_state, newrange, newstop = self.build_tree(self.right_state, self.tree_depth, rwd=rwd)
if not newstop:
self.validrange = (min(self.validrange[0], newrange[0]), max(self.validrange[1], newrange[1]))
print(" new NUTS range: %d..%d" % (self.validrange[0], self.validrange[1]))
ileft, xleft, vleft = self.left_state
iright, xright, vright = self.right_state
if self.plot: plt.plot([xleft[0], xright[0]], [xleft[1] + (self.tree_depth+1)*0.02, xright[1] + (self.tree_depth+1)*0.02], '--')
#if j > 5:
# print(" first-to-last arrow", ileft, iright, xleft, xright, xright-xleft, " velocities:", vright, vleft)
# print(" stopping criteria: ", newstop, angle(xright-xleft, vleft), angle(xright-xleft, vright))
# avoid U-turns:
stop = newstop or angle(xright - xleft, vleft) <= 0 or angle(xright - xleft, vright) <= 0
# stop when we cannot continue in any direction
stop = stop and (self.contourpath.samplingpath.fwd_possible or self.contourpath.samplingpath.rwd_possible)
if stop:
self.tree_built = True
else:
self.tree_depth = self.tree_depth + 1
self.current_direction = np.random.randint(2) == 1
# Tree was built, we only need to sample from it
print("sampling between", self.validrange)
return self.sample_chain_point(self.validrange[0], self.validrange[1])
def sample_chain_point(self, a, b):
"""
Gets a point on the track between a and b (inclusive).
Parameters
----------
a: array
starting point
b: array
end point
Returns
--------
newpoint: tuple
tuple of point_coordinates and loglikelihood
is_independent: bool
always True
"""
if self.plot:
for i in range(a, b+1):
xi, vi, Li, onpath = self.contourpath.interpolate(i)
plt.plot(xi[0], xi[1], '_ ', color='b', ms=10, mew=2)
while True:
i = np.random.randint(a, b+1)
xi, vi, Li, onpath = self.contourpath.interpolate(i)
if not onpath:
continue
return (xi, Li), True
def build_tree(self, startstate, j, rwd):
"""
Build sub-trees of depth j in direction rwd
startstate: (i, x, v) state information of first node
j: int height of the tree
rwd: bool whether we go backward
"""
if j == 0:
# base case: go forward one step
i = startstate[0] + (-1 if rwd else +1)
#self.expand_to_step(i)
#print(" build_tree@%d" % i, rwd, self.contourpath.samplingpath.fwd_possible, self.contourpath.samplingpath.rwd_possible)
xi, vi, _, _ = self.contourpath.interpolate(i)
if self.plot: plt.plot(xi[0], xi[1], 'x', color='gray')
# this is a good state, so return it
return (i, xi, vi), (i, xi, vi), (i,i), False
# recursion-build the left and right subtrees
(ileft, xleft, vleft), (iright, xright, vright), rangea, stopa = self.build_tree(startstate, j-1, rwd)
if stopa:
#print(" one subtree already terminated; returning")
#plt.plot([xright[0], xleft[0]], [xright[1], xleft[1]], ':', color='navy')
return (ileft, xleft, vleft), (iright, xright, vright), (ileft,iright), stopa
if rwd:
# go back
(ileft, xleft, vleft), _, rangeb, stopb = self.build_tree((ileft, xleft, vleft), j-1, rwd)
else:
_, (iright, xright, vright), rangeb, stopb = self.build_tree((iright, xright, vright), j-1, rwd)
#print(" subtree termination at %d" % j, stopa, stopb, angle(xright-xleft, vleft), angle(xright-xleft, vright), angle(vleft, vright))
#plt.plot([xright[0], xleft[0]], [xright[1], xleft[1]], ':', color='gray')
# NUTS criterion: start to end vector must point in the same direction as velocity at end-point
# additional criterion: start and end velocities must point in opposite directions
stop = stopa or stopb or angle(xright-xleft, vleft) <= 0 or angle(xright-xleft, vright) <= 0 or angle(vleft, vright) <= 0
return (ileft, xleft, vleft), (iright, xright, vright), (ileft,iright), stop
| 31,708 | 39.548593 | 144 | py |
UltraNest | UltraNest-master/ultranest/solvecompat.py | """Drop-in replacement for pymultinest.solve.
Example::
from ultranest.solvecompat import pymultinest_solve_compat as solve
# is a drop-in replacement for
from pymultinest.solve import solve
"""
import numpy as np
import string
from .integrator import ReactiveNestedSampler
from .stepsampler import SliceSampler, generate_mixture_random_direction
def pymultinest_solve_compat(
LogLikelihood, Prior, n_dims, paramnames=None,
outputfiles_basename=None, resume=False,
n_live_points=400, evidence_tolerance=0.5,
seed=-1, max_iter=0, wrapped_params=None, verbose=True,
speed="safe",
**kwargs
):
"""Run nested sampling analysis.
Disadvantages compared to using ReactiveNestedSampler directly:
cannot resume easily, cannot plot interactively.
Limited results.
It is recommended that you directly use::
sampler = ReactiveNestedSampler(paramnames, LogLikelihood, transform=Prior)
sampler.run()
following the UltraNest documentation and manuals,
as this gives you more control on resuming and sampler options.
"""
if paramnames is None:
paramnames = list(string.ascii_lowercase)[:n_dims]
if seed >= 0:
np.random.seed(seed)
assert len(paramnames) == n_dims
min_ess = kwargs.pop('min_ess', 0)
frac_remain = kwargs.pop('frac_remain', 0.01)
Lepsilon = kwargs.pop('Lepsilon', 0.001)
outputkwargs = {}
if not verbose:
outputkwargs = dict(viz_callback=False, show_status=False)
sampler = ReactiveNestedSampler(
paramnames, LogLikelihood, transform=Prior,
log_dir=outputfiles_basename, resume='resume' if resume else 'overwrite',
wrapped_params=wrapped_params, draw_multiple=False, vectorized=False,
**outputkwargs)
if speed == "safe":
pass
elif speed == "auto":
sampler.run(
dlogz=evidence_tolerance,
max_iters=max_iter if max_iter > 0 else None,
min_num_live_points=n_live_points,
min_ess=min_ess, frac_remain=frac_remain,
Lepsilon=Lepsilon, max_ncalls=40000)
sampler.stepsampler = SliceSampler(
nsteps=1000,
generate_direction=generate_mixture_random_direction,
adaptive_nsteps='move-distance',
region_filter=kwargs.get('region_filter', True)
)
else:
sampler.stepsampler = SliceSampler(
generate_direction=generate_mixture_random_direction,
nsteps=speed,
adaptive_nsteps=False,
region_filter=False)
sampler.run(dlogz=evidence_tolerance,
max_iters=max_iter if max_iter > 0 else None,
min_num_live_points=n_live_points,
min_ess=min_ess, frac_remain=frac_remain,
Lepsilon=Lepsilon)
if verbose:
sampler.print_results()
results = sampler.results
sampler.plot()
return dict(logZ=results['logz'],
logZerr=results['logzerr'],
samples=results['samples'],
weighted_samples=results['weighted_samples'])
| 3,121 | 30.535354 | 83 | py |
UltraNest | UltraNest-master/ultranest/viz.py | """
Live point visualisations
-------------------------
Gives a live impression of current exploration.
This is powerful because the user can abort partial runs if the fit
converges to unreasonable values.
"""
from __future__ import print_function, division
import sys
import shutil
from numpy import log10
import numpy as np
import string
from xml.sax.saxutils import escape as html_escape
clusteridstrings = ['%d' % i for i in range(10)] + list(string.ascii_uppercase) + list(string.ascii_lowercase)
spearman = None
try:
import scipy.stats
spearman = scipy.stats.spearmanr
except ImportError:
pass
def round_parameterlimits(plo, phi, paramlimitguess=None):
"""Guess the current parameter range.
Parameters
-----------
plo: array of floats
for each parameter, current minimum value
phi: array of floats
for each parameter, current maximum value
paramlimitguess: array of float tuples
for each parameter, guess of parameter range if available
Returns
-------
plo_rounded: array of floats
for each parameter, rounded minimum value
phi_rounded: array of floats
for each parameter, rounded maximum value
formats: array of float tuples
for each parameter, string format for representing it.
"""
with np.errstate(divide='ignore'):
expos = log10(np.abs([plo, phi]))
expolo = np.floor(np.min(expos, axis=0))
expohi = np.ceil(np.max(expos, axis=0))
is_negative = plo < 0
plo_rounded = np.where(is_negative, -10**expohi, 0)
phi_rounded = np.where(is_negative, 10**expohi, 10**expohi)
if paramlimitguess is not None:
for i, (plo_guess, phi_guess) in enumerate(paramlimitguess):
# if plo_guess is higher than what we thought, we can increase to match
if plo_guess <= plo[i] and plo_guess >= plo_rounded[i]:
plo_rounded[i] = plo_guess
if phi_guess >= phi[i] and phi_guess <= phi_rounded[i]:
phi_rounded[i] = phi_guess
formats = []
for i in range(len(plo)):
fmt = '%+.1e'
if -1 <= expolo[i] <= 2 and -1 <= expohi[i] <= 2:
fmt = '%+.1f'
if -4 <= expolo[i] <= 0 and -4 <= expohi[i] <= 0:
fmt = '%%+.%df' % (max(0, -min(expolo[i], expohi[i])))
if phi[i] == plo[i]:
fmt = '%+.1f'
elif fmt % plo[i] == fmt % phi[i]:
fmt = '%%+.%df' % (max(0, -int(np.floor(log10(phi[i] - plo[i])))))
formats.append(fmt)
return plo_rounded, phi_rounded, formats
def nicelogger(points, info, region, transformLayer, region_fresh=False):
"""Log current live points and integration progress to stdout.
Parameters
-----------
points: dict with keys "u", "p", "logl"
live points (u: cube coordinates, p: transformed coordinates,
logl: loglikelihood values)
info: dict
integration information. Keys are:
- paramlims (optional): parameter ranges
- logvol: expected volume at this iteration
region: MLFriends
Current region.
transformLayer: ScaleLayer or AffineLayer
Current transformLayer (for clustering information).
region_fresh: bool
Whether the region was just updated.
"""
p = points['p']
paramnames = info['paramnames']
# print()
# print('lnZ = %.1f, remainder = %.1f, lnLike = %.1f | Efficiency: %d/%d = %.4f%%\r' % (
# logz, logz_remain, np.max(logl), ncall, it, it * 100 / ncall))
plo = p.min(axis=0)
phi = p.max(axis=0)
plo_rounded, phi_rounded, paramformats = round_parameterlimits(plo, phi, paramlimitguess=info.get('paramlims'))
if sys.stderr.isatty() and hasattr(shutil, 'get_terminal_size'):
columns, _ = shutil.get_terminal_size(fallback=(80, 25))
else:
columns, _ = 80, 25
paramwidth = max([len(pname) for pname in paramnames])
width = columns - 23 - paramwidth
width = max(width, 10)
indices = ((p - plo_rounded) * width / (phi_rounded - plo_rounded).reshape((1, -1))).astype(int)
indices[indices >= width] = width - 1
indices[indices < 0] = 0
ndim = len(plo)
print()
print()
clusterids = transformLayer.clusterids % len(clusteridstrings)
nmodes = transformLayer.nclusters
print(
"Mono-modal" if nmodes == 1 else "Have %d modes" % nmodes,
"Volume: ~exp(%.2f)" % region.estimate_volume(), '*' if region_fresh else ' ',
"Expected Volume: exp(%.2f)" % info['logvol'],
'' if 'order_test_correlation' not in info else
("Quality: correlation length: %d (%s)" % (info['order_test_correlation'], '+' if info['order_test_direction'] >= 0 else '-'))
if np.isfinite(info['order_test_correlation']) else "Quality: ok",
)
print()
if ndim == 1:
pass
elif ndim == 2 and spearman is not None:
rho, pval = spearman(p)
if pval < 0.01 and abs(rho) > 0.75:
print(" %s between %s and %s: rho=%.2f" % (
'positive degeneracy' if rho > 0 else 'negative degeneracy',
paramnames[0], paramnames[1], rho))
elif spearman is not None:
rho, pval = spearman(p)
if np.isfinite(pval).all() and pval.ndim == 2:
for i, param in enumerate(paramnames):
for j, param2 in enumerate(paramnames[:i]):
if pval[i,j] < 0.01 and abs(rho[i,j]) > 0.99:
s = 'positive relation' if rho[i,j] > 0 else 'negative relation'
print(" perfect %s between %s and %s" % (s, param, param2))
elif pval[i,j] < 0.01 and abs(rho[i,j]) > 0.75:
s = 'positive degeneracy' if rho[i,j] > 0 else 'negative degeneracy'
print(" %s between %s and %s: rho=%.2f" % (s, param, param2, rho[i,j]))
for i, (param, fmt) in enumerate(zip(paramnames, paramformats)):
if nmodes == 1:
line = [' ' for _ in range(width)]
for j in np.unique(indices[:,i]):
line[j] = '*'
linestr = ''.join(line)
else:
line = [' ' for _ in range(width)]
for clusterid, j in zip(clusterids, indices[:,i]):
if clusterid > 0 and line[j] in (' ', '0'):
# set it to correct cluster id
line[j] = clusteridstrings[clusterid]
elif clusterid == 0 and line[j] == ' ':
# empty, so set it although we don't know the cluster id
line[j] = '0'
# else:
# line[j] = '*'
linestr = ''.join(line)
line = linestr
ilo, ihi = indices[:,i].min(), indices[:,i].max()
if ilo > 10:
assert line[:10] == ' ' * 10
leftstr = fmt % plo[i]
j = ilo - 2 - len(leftstr) # left-bound
if j < width and j > 0:
line = line[:j] + leftstr + line[j + len(leftstr):]
if ihi < width - 10:
rightstr = fmt % phi[i]
j = ihi + 3 # right-bound
if j < width and j > 0:
line = line[:j] + rightstr + line[j + len(rightstr):]
parampadded = ('%%-%ds' % paramwidth) % param
print('%s: %09s|%s|%9s' % (parampadded, fmt % plo_rounded[i], line, fmt % phi_rounded[i]))
print()
def isnotebook():
"""Check if running in a Jupyter notebook."""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
class LivePointsWidget(object):
"""
Widget for ipython and jupyter notebooks.
Shows where the live points are currently in parameter space.
"""
def __init__(self):
"""Initialise. To draw, call .initialize()."""
self.grid = None
self.label = None
self.laststatus = None
def initialize(self, paramnames, width):
"""Set up and display widget.
Parameters
----------
paramnames: list of str
Parameter names
width: int
number of html table columns.
"""
from ipywidgets import HTML, VBox, Layout, GridspecLayout
from IPython.display import display
grid = GridspecLayout(len(paramnames), width + 3)
self.laststatus = []
for a, paramname in enumerate(paramnames):
self.laststatus.append('*' * width)
htmlcode = "<div style='background-color:#6E6BF4;'> </div>"
for b in range(width):
grid[a, b + 2] = HTML(htmlcode, layout=Layout(margin="0"))
htmlcode = "<div style='background-color:#FFB858; font-weight:bold; padding-right: 2em;'>%s</div>"
grid[a, 0] = HTML(htmlcode % html_escape(paramname), layout=Layout(margin="0"))
grid[a, 1] = HTML("...", layout=Layout(margin="0"))
grid[a,-1] = HTML("...", layout=Layout(margin="0"))
self.grid = grid
self.label = HTML()
box = VBox(children=[self.label, grid])
display(box)
def __call__(self, points, info, region, transformLayer, region_fresh=False):
"""Update widget to show current live points and integration progress to stdout.
Parameters
-----------
points: dict with keys u, p, logl
live points (u: cube coordinates, p: transformed coordinates,
logl: loglikelihood values)
info: dict
integration information. Keys are:
- paramlims (optional): parameter ranges
- logvol: expected volume at this iteration
region: MLFriends
Current region.
transformLayer: ScaleLayer or AffineLayer
Current transformLayer (for clustering information).
region_fresh: bool
Whether the region was just updated.
"""
# t = time.time()
# if self.lastupdate is not None and self.lastupdate < t - 5:
# return
# self.lastupdate = t
# u, p, logl = points['u'], points['p'], points['logl']
p = points['p']
paramnames = info['paramnames']
# print()
# print('lnZ = %.1f, remainder = %.1f, lnLike = %.1f | Efficiency: %d/%d = %.4f%%\r' % (
# logz, logz_remain, np.max(logl), ncall, it, it * 100 / ncall))
plo = p.min(axis=0)
phi = p.max(axis=0)
plo_rounded, phi_rounded, paramformats = round_parameterlimits(plo, phi, paramlimitguess=info.get('paramlims'))
width = 50
if self.grid is None:
self.initialize(paramnames, width)
with np.errstate(invalid="ignore"):
indices = ((p - plo_rounded) * width / (phi_rounded - plo_rounded).reshape((1, -1))).astype(int)
indices[indices >= width] = width - 1
indices[indices < 0] = 0
ndim = len(plo)
clusterids = transformLayer.clusterids % len(clusteridstrings)
nmodes = transformLayer.nclusters
labeltext = ("Mono-modal" if nmodes == 1 else "Have %d modes" % nmodes) + \
(" | Volume: ~exp(%.2f) " % region.estimate_volume()) + ('*' if region_fresh else ' ') + \
" | Expected Volume: exp(%.2f)" % info['logvol'] + \
('' if 'order_test_correlation' not in info else
(" | Quality: correlation length: %d (%s)" % (info['order_test_correlation'], '+' if info['order_test_direction'] >= 0 else '-'))
if np.isfinite(info['order_test_correlation']) else " | Quality: ok")
if ndim == 1:
pass
elif ndim == 2 and spearman is not None:
rho, pval = spearman(p)
if pval < 0.01 and abs(rho) > 0.75:
labeltext += ("<br/> %s between %s and %s: rho=%.2f" % (
'positive degeneracy' if rho > 0 else 'negative degeneracy',
paramnames[0], paramnames[1], rho))
elif spearman is not None:
rho, pval = spearman(p)
for i, param in enumerate(paramnames):
for j, param2 in enumerate(paramnames[:i]):
if pval[i,j] < 0.01 and abs(rho[i,j]) > 0.99:
labeltext += ("<br/> perfect %s between %s and %s" % (
'positive relation' if rho[i,j] > 0 else 'negative relation',
param2, param))
elif pval[i,j] < 0.01 and abs(rho[i,j]) > 0.75:
labeltext += ("<br/> %s between %s and %s: rho=%.2f" % (
'positive degeneracy' if rho[i,j] > 0 else 'negative degeneracy',
param2, param, rho[i,j]))
for i, (param, fmt) in enumerate(zip(paramnames, paramformats)):
if nmodes == 1:
line = [' ' for _ in range(width)]
for j in np.unique(indices[:,i]):
line[j] = '*'
linestr = ''.join(line)
else:
line = [' ' for _ in range(width)]
for clusterid, j in zip(clusterids, indices[:,i]):
if clusterid > 0 and line[j] in (' ', '0'):
# set it to correct cluster id
line[j] = clusteridstrings[clusterid]
elif clusterid == 0 and line[j] == ' ':
# empty, so set it although we don't know the cluster id
line[j] = '0'
# else:
# line[j] = '*'
linestr = ''.join(line)
oldlinestr = self.laststatus[i]
for j, (c, d) in enumerate(zip(linestr, oldlinestr)):
if c != d:
if c == ' ':
self.grid[i, j + 2].value = "<div style='background-color:white;'> </div>"
else:
self.grid[i, j + 2].value = "<div style='background-color:#6E6BF4; font-family:monospace'>%s</div>" % c.replace('*', ' ')
self.laststatus[i] = linestr
# self.grid[i,0].value = param
self.grid[i, 1].value = fmt % plo_rounded[i]
self.grid[i,-1].value = fmt % phi_rounded[i]
self.label.value = labeltext
def get_default_viz_callback():
"""Get default callback.
LivePointsWidget for Jupyter notebook, nicelogger otherwise.
"""
if isnotebook():
return LivePointsWidget()
else:
return nicelogger
| 14,799 | 37.642298 | 150 | py |
UltraNest | UltraNest-master/ultranest/plot.py | """
Plotting utilities
------------------
"""
from __future__ import (print_function, division)
from six.moves import range
import logging
import types
import warnings
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator, NullLocator
# from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
import scipy.stats
import matplotlib.pyplot as plt
import numpy
from .utils import resample_equal
from .utils import quantile as _quantile
try:
str_type = types.StringTypes
float_type = types.FloatType
int_type = types.IntType
except Exception:
str_type = str
float_type = float
int_type = int
import corner
__all__ = ["runplot", "cornerplot", "traceplot", "PredictionBand"]
def cornerplot(results, logger=None):
"""Make a corner plot with corner."""
paramnames = results['paramnames']
data = np.array(results['weighted_samples']['points'])
weights = np.array(results['weighted_samples']['weights'])
cumsumweights = np.cumsum(weights)
mask = cumsumweights > 1e-4
if mask.sum() == 1:
if logger is not None:
warn = 'Posterior is still concentrated in a single point:'
for i, p in enumerate(paramnames):
v = results['samples'][mask,i]
warn += "\n" + ' %-20s: %s' % (p, v)
logger.warning(warn)
logger.info('Try running longer.')
return
# monkey patch to disable a useless warning
oldfunc = logging.warning
logging.warning = lambda *args, **kwargs: None
corner.corner(data[mask,:], weights=weights[mask],
labels=paramnames, show_titles=True, quiet=True)
logging.warning = oldfunc
class PredictionBand(object):
"""Plot bands of model predictions as calculated from a chain.
call add(y) to add predictions from each chain point
Example::
x = numpy.linspace(0, 1, 100)
band = PredictionBand(x)
for c in chain:
band.add(c[0] * x + c[1])
# add median line
band.line(color='k')
# add 1 sigma quantile
band.shade(color='k', alpha=0.3)
# add wider quantile
band.shade(q=0.01, color='gray', alpha=0.1)
plt.show()
Parameters
----------
x: array
The independent variable
"""
def __init__(self, x, shadeargs={}, lineargs={}):
"""Initialise with independent variable *x*."""
self.x = x
self.ys = []
self.shadeargs = shadeargs
self.lineargs = lineargs
def add(self, y):
"""Add a possible prediction *y*."""
self.ys.append(y)
def set_shadeargs(self, **kwargs):
"""Set matplotlib style for shading."""
self.shadeargs = kwargs
def set_lineargs(self, **kwargs):
"""Set matplotlib style for line."""
self.lineargs = kwargs
def get_line(self, q=0.5):
"""Over prediction space x, get quantile *q*. Default is median."""
if not 0 <= q <= 1:
raise ValueError("quantile q must be between 0 and 1, not %s" % q)
assert len(self.ys) > 0, self.ys
return scipy.stats.mstats.mquantiles(self.ys, q, axis=0)[0]
def shade(self, q=0.341, **kwargs):
"""Plot a shaded region between 0.5-q and 0.5+q. Default is 1 sigma."""
if not 0 <= q <= 0.5:
raise ValueError("quantile distance from the median, q, must be between 0 and 0.5, not %s. For a 99%% quantile range, use q=0.48." % q)
shadeargs = dict(self.shadeargs)
shadeargs.update(kwargs)
lo = self.get_line(0.5 - q)
hi = self.get_line(0.5 + q)
return plt.fill_between(self.x, lo, hi, **shadeargs)
def line(self, **kwargs):
"""Plot the median curve."""
lineargs = dict(self.lineargs)
lineargs.update(kwargs)
mid = self.get_line(0.5)
return plt.plot(self.x, mid, **lineargs)
# the following function is taken from https://github.com/joshspeagle/dynesty/blob/master/dynesty/plotting.py
# Copyright (c) 2017 - Present: Josh Speagle and contributors.
# Copyright (c) 2014 - 2017: Kyle Barbary and contributors.
# https://github.com/joshspeagle/dynesty/blob/master/LICENSE
def runplot(results, span=None, logplot=False, kde=True, nkde=1000,
color='blue', plot_kwargs=None, label_kwargs=None, lnz_error=True,
lnz_truth=None, truth_color='red', truth_kwargs=None,
max_x_ticks=8, max_y_ticks=3, use_math_text=True,
mark_final_live=True, fig=None
):
"""Plot live points, ln(likelihood), ln(weight), and ln(evidence) vs. ln(prior volume).
Parameters
----------
results : dynesty.results.Results instance
dynesty.results.Results instance from a nested
sampling run.
span : iterable with shape (4,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds *or* a float from `(0., 1.]` giving the
fraction below the maximum. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.001, 0.2, (5., 6.)]
Default is `(0., 1.05 * max(data))` for each element.
logplot : bool, optional
Whether to plot the evidence on a log scale. Default is `False`.
kde : bool, optional
Whether to use kernel density estimation to estimate and plot
the PDF of the importance weights as a function of log-volume
(as opposed to the importance weights themselves). Default is
`True`.
nkde : int, optional
The number of grid points used when plotting the kernel density
estimate. Default is `1000`.
color : str or iterable with shape (4,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting the lines in each subplot.
Default is `'blue'`.
plot_kwargs : dict, optional
Extra keyword arguments that will be passed to `plot`.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
lnz_error : bool, optional
Whether to plot the 1, 2, and 3-sigma approximate error bars
derived from the ln(evidence) error approximation over the course
of the run. Default is True.
lnz_truth : float, optional
A reference value for the evidence that will be overplotted on the
evidence subplot if provided.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color used when plotting `lnz_truth`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting
`lnz_truth`.
max_x_ticks : int, optional
Maximum number of ticks allowed for the x axis. Default is `8`.
max_y_ticks : int, optional
Maximum number of ticks allowed for the y axis. Default is `4`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
mark_final_live : bool, optional
Whether to indicate the final addition of recycled live points
(if they were added to the resulting samples) using
a dashed vertical line. Default is `True`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the run onto the provided figure.
Otherwise, by default an internal figure is generated.
Returns
-------
runplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output summary plot.
"""
# Initialize values.
if label_kwargs is None:
label_kwargs = dict()
if plot_kwargs is None:
plot_kwargs = dict()
if truth_kwargs is None:
truth_kwargs = dict()
# Set defaults.
plot_kwargs['linewidth'] = plot_kwargs.get('linewidth', 5)
plot_kwargs['alpha'] = plot_kwargs.get('alpha', 0.7)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 3)
# Extract results.
niter = results['niter'] # number of iterations
logvol = results['logvol'] # ln(prior volume)
logl = results['logl'] - max(results['logl']) # ln(normalized likelihood)
logwt = results['logwt'] - results['logz'][-1] # ln(importance weight)
logz = results['logz'] # ln(evidence)
logzerr = results['logzerr'] # error in ln(evidence)
weights = results['weights']
logzerr[~np.isfinite(logzerr)] = 0.
nsamps = len(logwt) # number of samples
# Check whether the run was "static" or "dynamic".
try:
nlive = results['samples_n']
mark_final_live = False
except Exception:
nlive = np.ones(niter) * results['nlive']
if nsamps - niter == results['nlive']:
nlive_final = np.arange(1, results['nlive'] + 1)[::-1]
nlive = np.append(nlive, nlive_final)
# Check if the final set of live points were added to the results.
if mark_final_live:
if nsamps - niter == results['nlive']:
live_idx = niter
else:
warnings.warn("The number of iterations and samples differ "
"by an amount that isn't the number of final "
"live points. `mark_final_live` has been disabled.")
mark_final_live = False
# Determine plotting bounds for each subplot.
data = [nlive, np.exp(logl), weights, logz if logplot else np.exp(logz)]
kde = kde and (weights * len(logvol) > 0.1).sum() > 10
if kde:
try:
# from scipy.ndimage import gaussian_filter as norm_kde
from scipy.stats import gaussian_kde
# Derive kernel density estimate.
wt_kde = gaussian_kde(resample_equal(-logvol, weights)) # KDE
logvol_new = np.linspace(logvol[0], logvol[-1], nkde) # resample
data[2] = wt_kde.pdf(-logvol_new) # evaluate KDE PDF
except ImportError:
kde = False
if span is None:
span = [(0., 1.05 * max(d)) for d in data]
no_span = True
else:
no_span = False
span = list(span)
if len(span) != 4:
raise ValueError("More bounds provided in `span` than subplots!")
for i, _ in enumerate(span):
try:
ymin, ymax = span[i]
except Exception:
span[i] = (max(data[i]) * span[i], max(data[i]))
if lnz_error and no_span:
if logplot:
zspan = (logz[-1] - 10.3 * 3. * logzerr[-1],
logz[-1] + 1.3 * 3. * logzerr[-1])
else:
zspan = (0., 1.05 * np.exp(logz[-1] + 3. * logzerr[-1]))
span[3] = zspan
# Setting up default plot layout.
if fig is None:
fig, axes = pl.subplots(4, 1, figsize=(16, 16))
xspan = [(0., -min(logvol)) for _ax in axes]
yspan = span
else:
fig, axes = fig
try:
axes.reshape(4, 1)
except Exception:
raise ValueError("Provided axes do not match the required shape "
"for plotting samples.")
# If figure is provided, keep previous bounds if they were larger.
xspan = [ax.get_xlim() for ax in axes]
yspan = [ax.get_ylim() for ax in axes]
# One exception: if the bounds are the plotting default `(0., 1.)`,
# overwrite them.
xspan = [t if t != (0., 1.) else (None, None) for t in xspan]
yspan = [t if t != (0., 1.) else (None, None) for t in yspan]
# Set up bounds for plotting.
for i in range(4):
if xspan[i][0] is None:
xmin = None
else:
xmin = min(0., xspan[i][0])
if xspan[i][1] is None:
xmax = -min(logvol)
else:
xmax = max(-min(logvol), xspan[i][1])
if yspan[i][0] is None:
ymin = None
else:
ymin = min(span[i][0], yspan[i][0])
if yspan[i][1] is None:
ymax = span[i][1]
else:
ymax = max(span[i][1], yspan[i][1])
axes[i].set_xlim([xmin, xmax])
axes[i].set_ylim([ymin, ymax])
# Plotting.
labels = ['Live Points', 'Likelihood\n(normalized)',
'Importance\nWeight', 'Evidence']
if kde:
labels[2] += ' PDF'
for i, d in enumerate(data):
# Establish axes.
ax = axes[i]
# Set color(s)/colormap(s).
if isinstance(color, str_type):
c = color
else:
c = color[i]
# Setup axes.
if max_x_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_x_ticks))
if max_y_ticks == 0:
ax.yaxis.set_major_locator(NullLocator())
else:
ax.yaxis.set_major_locator(MaxNLocator(max_y_ticks))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.yaxis.set_major_formatter(sf)
ax.set_xlabel(r"$-\ln X$", **label_kwargs)
ax.set_ylabel(labels[i], **label_kwargs)
# Plot run.
if logplot and i == 3:
ax.plot(-logvol, d, color=c, **plot_kwargs)
yspan = [ax.get_ylim() for _ax in axes]
elif kde and i == 2:
ax.plot(-logvol_new, d, color=c, **plot_kwargs)
else:
ax.plot(-logvol, d, color=c, **plot_kwargs)
if i == 3 and lnz_error:
if logplot:
mask = logz >= ax.get_ylim()[0] - 10
[ax.fill_between(-logvol[mask], (logz + s * logzerr)[mask],
(logz - s * logzerr)[mask],
color=c, alpha=0.2)
for s in range(1, 4)]
else:
[ax.fill_between(-logvol, np.exp(logz + s * logzerr),
np.exp(logz - s * logzerr), color=c, alpha=0.2)
for s in range(1, 4)]
# Mark addition of final live points.
if mark_final_live:
ax.axvline(-logvol[live_idx], color=c, ls="dashed", lw=2,
**plot_kwargs)
if i == 0:
ax.axhline(live_idx, color=c, ls="dashed", lw=2,
**plot_kwargs)
# Add truth value(s).
if i == 3 and lnz_truth is not None:
if logplot:
ax.axhline(lnz_truth, color=truth_color, **truth_kwargs)
else:
ax.axhline(np.exp(lnz_truth), color=truth_color, **truth_kwargs)
return fig, axes
def traceplot(results, span=None, quantiles=[0.025, 0.5, 0.975], smooth=0.02,
post_color='blue', post_kwargs=None, kde=True, nkde=1000,
trace_cmap='plasma', trace_color=None, trace_kwargs=None,
connect=False, connect_highlight=10, connect_color='red',
connect_kwargs=None, max_n_ticks=5, use_math_text=False,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color='red', truth_kwargs=None,
verbose=False, fig=None):
"""Plot traces and marginalized posteriors for each parameter.
Parameters
----------
results : `~dynesty.results.Results` instance
A `~dynesty.results.Results` instance from a nested
sampling run. **Compatible with results derived from**
`nestle <http://kylebarbary.com/nestle/>`_.
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.999999426697` (5-sigma credible interval) for each
parameter.
quantiles : iterable, optional
A list of fractional quantiles to overplot on the 1-D marginalized
posteriors as vertical dashed lines. Default is `[0.025, 0.5, 0.975]`
(the 95%/2-sigma credible interval).
smooth : float or iterable with shape (ndim,), optional
The standard deviation (either a single value or a different value for
each subplot) for the Gaussian kernel used to smooth the 1-D
marginalized posteriors, expressed as a fraction of the span.
Default is `0.02` (2% smoothing). If an integer is provided instead,
this will instead default to a simple (weighted) histogram with
`bins=smooth`.
post_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting the histograms.
Default is `'blue'`.
post_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the
marginalized 1-D posteriors.
kde : bool, optional
Whether to use kernel density estimation to estimate and plot
the PDF of the importance weights as a function of log-volume
(as opposed to the importance weights themselves). Default is
`True`.
nkde : int, optional
The number of grid points used when plotting the kernel density
estimate. Default is `1000`.
trace_cmap : str or iterable with shape (ndim,), optional
A `~matplotlib`-style colormap (either a single colormap or a
different colormap for each subplot) used when plotting the traces,
where each point is colored according to its weight. Default is
`'plasma'`.
trace_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a
different color for each subplot) used when plotting the traces.
This overrides the `trace_cmap` option by giving all points
the same color. Default is `None` (not used).
trace_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the traces.
connect : bool, optional
Whether to draw lines connecting the paths of unique particles.
Default is `False`.
connect_highlight : int or iterable, optional
If `connect=True`, highlights the paths of a specific set of
particles. If an integer is passed, :data:`connect_highlight`
random particle paths will be highlighted. If an iterable is passed,
then the particle paths corresponding to the provided indices
will be highlighted.
connect_color : str, optional
The color of the highlighted particle paths. Default is `'red'`.
connect_kwargs : dict, optional
Extra keyword arguments used for plotting particle paths.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
show_titles : bool, optional
Whether to display a title above each 1-D marginalized posterior
showing the 0.5 quantile along with the upper/lower bounds associated
with the 0.025 and 0.975 (95%/2-sigma credible interval) quantiles.
Default is `True`.
title_fmt : str, optional
The format string for the quantiles provided in the title. Default is
`'.2f'`.
title_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_title` command.
truths : iterable with shape (ndim,), optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
verbose : bool, optional
Whether to print the values of the computed quantiles associated with
each parameter. Default is `False`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the traces and marginalized 1-D posteriors
onto the provided figure. Otherwise, by default an
internal figure is generated.
Returns
-------
traceplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output trace plot.
"""
# Initialize values.
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if trace_kwargs is None:
trace_kwargs = dict()
if connect_kwargs is None:
connect_kwargs = dict()
if post_kwargs is None:
post_kwargs = dict()
if truth_kwargs is None:
truth_kwargs = dict()
# Set defaults.
connect_kwargs['alpha'] = connect_kwargs.get('alpha', 0.7)
post_kwargs['alpha'] = post_kwargs.get('alpha', 0.6)
trace_kwargs['s'] = trace_kwargs.get('s', 3)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)
# Extract weighted samples.
samples = results['samples']
logvol = results['logvol']
weights = results['weights']
wts = weights
kde = kde and (weights * len(logvol) > 0.1).sum() > 10
if kde:
try:
from scipy.ndimage import gaussian_filter as norm_kde
from scipy.stats import gaussian_kde
# Derive kernel density estimate.
wt_kde = gaussian_kde(resample_equal(-logvol, weights)) # KDE
logvol_grid = np.linspace(logvol[0], logvol[-1], nkde) # resample
wt_grid = wt_kde.pdf(-logvol_grid) # evaluate KDE PDF
wts = np.interp(-logvol, -logvol_grid, wt_grid) # interpolate
except ImportError:
kde = False
# Deal with 1D results. A number of extra catches are also here
# in case users are trying to plot other results besides the `Results`
# instance generated by `dynesty`.
samples = np.atleast_1d(samples)
if len(samples.shape) == 1:
samples = np.atleast_2d(samples)
else:
assert len(samples.shape) == 2, "Samples must be 1- or 2-D."
samples = samples.T
assert samples.shape[0] <= samples.shape[1], "There are more dimensions than samples!"
ndim, nsamps = samples.shape
# Check weights.
if weights.ndim != 1:
raise ValueError("Weights must be 1-D.")
if nsamps != weights.shape[0]:
raise ValueError("The number of weights and samples disagree!")
# Check ln(volume).
if logvol.ndim != 1:
raise ValueError("Ln(volume)'s must be 1-D.")
if nsamps != logvol.shape[0]:
raise ValueError("The number of ln(volume)'s and samples disagree!")
# Check sample IDs.
if connect:
try:
samples_id = results['samples_id']
uid = np.unique(samples_id)
except Exception:
raise ValueError("Sample IDs are not defined!")
try:
ids = connect_highlight[0]
ids = connect_highlight
except Exception:
ids = np.random.choice(uid, size=connect_highlight, replace=False)
# Determine plotting bounds for marginalized 1-D posteriors.
if span is None:
span = [0.999999426697 for i in range(ndim)]
span = list(span)
if len(span) != ndim:
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except Exception:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = _quantile(samples[i], q, weights=weights)
# Setting up labels.
if labels is None:
labels = [r"$x_{%d}$" % (i + 1) for i in range(ndim)]
# Setting up smoothing.
if (isinstance(smooth, int_type) or isinstance(smooth, float_type)):
smooth = [smooth for i in range(ndim)]
# Setting up default plot layout.
if fig is None:
fig, axes = pl.subplots(ndim, 2, figsize=(12, 3 * ndim))
else:
fig, axes = fig
try:
axes.reshape(ndim, 2)
except Exception:
raise ValueError("Provided axes do not match the required shape "
"for plotting samples.")
# Plotting.
for i, x in enumerate(samples):
# Plot trace.
# Establish axes.
if np.shape(samples)[0] == 1:
ax = axes[1]
else:
ax = axes[i, 0]
# Set color(s)/colormap(s).
if trace_color is not None:
if isinstance(trace_color, str_type):
color = trace_color
else:
color = trace_color[i]
else:
color = wts
if isinstance(trace_cmap, str_type):
cmap = trace_cmap
else:
cmap = trace_cmap[i]
# Setup axes.
ax.set_xlim([0., -min(logvol)])
ax.set_ylim([min(x), max(x)])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.yaxis.set_major_formatter(sf)
ax.set_xlabel(r"$-\ln X$", **label_kwargs)
ax.set_ylabel(labels[i], **label_kwargs)
# Generate scatter plot.
ax.scatter(-logvol, x, c=color, cmap=cmap, **trace_kwargs)
if connect:
# Add lines highlighting specific particle paths.
for j in ids:
sel = (samples_id == j)
ax.plot(-logvol[sel], x[sel], color=connect_color,
**connect_kwargs)
# Add truth value(s).
if truths is not None and truths[i] is not None:
try:
[ax.axhline(t, color=truth_color, **truth_kwargs)
for t in truths[i]]
except Exception:
ax.axhline(truths[i], color=truth_color, **truth_kwargs)
# Plot marginalized 1-D posterior.
# Establish axes.
if np.shape(samples)[0] == 1:
ax = axes[0]
else:
ax = axes[i, 1]
# Set color(s).
if isinstance(post_color, str_type):
color = post_color
else:
color = post_color[i]
# Setup axes
ax.set_xlim(span[i])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks))
ax.yaxis.set_major_locator(NullLocator())
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.set_xlabel(labels[i], **label_kwargs)
# Generate distribution.
s = smooth[i]
if isinstance(s, int_type):
# If `s` is an integer, plot a weighted histogram with
# `s` bins within the provided bounds.
n, b, _ = ax.hist(x, bins=s, weights=weights, color=color,
range=np.sort(span[i]), **post_kwargs)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
else:
# If `s` is a float, oversample the data relative to the
# smoothing filter by a factor of 10, then use a Gaussian
# filter to smooth the results.
if kde:
bins = int(round(10. / s))
n, b = np.histogram(x, bins=bins, weights=weights,
range=np.sort(span[i]))
x0 = 0.5 * (b[1:] + b[:-1])
n = norm_kde(n, 10.)
y0 = n
ax.fill_between(x0, y0, color=color, **post_kwargs)
else:
bins = 40
n, b = np.histogram(x, bins=bins, weights=weights,
range=np.sort(span[i]))
x0 = 0.5 * (b[1:] + b[:-1])
y0 = n
ax.fill_between(x0, y0, color=color, **post_kwargs)
ax.set_ylim([0., max(y0) * 1.05])
# Plot quantiles.
if quantiles is not None and len(quantiles) > 0:
qs = _quantile(x, quantiles, weights=weights)
for q in qs:
ax.axvline(q, lw=2, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print(labels[i], [blob for blob in zip(quantiles, qs)])
# Add truth value(s).
if truths is not None and truths[i] is not None:
try:
[ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[i]]
except Exception:
ax.axvline(truths[i], color=truth_color, **truth_kwargs)
# Set titles.
if show_titles:
title = None
if title_fmt is not None:
ql, qm, qh = _quantile(x, [0.025, 0.5, 0.975], weights=weights)
q_minus, q_plus = qm - ql, qh - qm
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(qm), fmt(q_minus), fmt(q_plus))
title = "{0} = {1}".format(labels[i], title)
ax.set_title(title, **title_kwargs)
return fig, axes
| 30,736 | 38.711886 | 147 | py |
UltraNest | UltraNest-master/ultranest/__init__.py | """
Performs nested sampling to calculate the Bayesian evidence and posterior samples
Some parts are from the Nestle library by Kyle Barbary (https://github.com/kbarbary/nestle)
Some parts are from the nnest library by Adam Moss (https://github.com/adammoss/nnest)
"""
from .integrator import NestedSampler, ReactiveNestedSampler, read_file
from .utils import vectorize
__author__ = """Johannes Buchner"""
__email__ = 'johannes.buchner.acad@gmx.com'
__version__ = '3.6.1'
| 475 | 33 | 91 | py |
UltraNest | UltraNest-master/ultranest/store.py | """
Storage for nested sampling points
-----------------------------------
The information stored is a table with
- the likelihood threshold drawn from
- the likelihood, prior volume coordinates and physical coordinates of the point
"""
from __future__ import print_function, division
import numpy as np
import warnings
import os
class NullPointStore(object):
"""No storage."""
def __init__(self, ncols):
"""Mock initialisation."""
self.ncols = int(ncols)
self.nrows = 0
self.stack_empty = True
self.ncalls = 0
def reset(self):
"""Do nothing."""
pass
def close(self):
"""Do nothing."""
pass
def flush(self):
"""Do nothing."""
pass
def add(self, row, ncalls):
"""Increases the number of "stored" points."""
self.nrows += 1
self.ncalls = ncalls
return self.nrows - 1
def pop(self, Lmin):
"""Return no point (None, None)."""
return None, None
class FilePointStore(object):
"""Base class for storing points in a file."""
def reset(self):
"""Reset stack to loaded data.
Useful when Lmin is not reset to a lower value.
"""
# self.stack = sorted(self.stack + self.data, key=lambda e: (e[1][0], e[0]))
self.stack_empty = len(self.stack) == 0
# print("PointStore: have %d items" % len(self.stack))
def close(self):
"""Close file."""
self.fileobj.close()
def flush(self):
"""Flush file to disk."""
self.fileobj.flush()
def pop(self, Lmin):
"""Request from the storage a point sampled from <= Lmin with L > Lmin.
Returns
-------
index: int
index of the point, None if no point exists
point: array
point values, None if no point exists
"""
if self.stack_empty:
return None, None
# look forward to see if there is an exact match
# if we do not use the exact matches
# this causes a shift in the loglikelihoods
for i, (idx, next_row) in enumerate(self.stack):
row_Lmin = next_row[0]
L = next_row[1]
if row_Lmin <= Lmin and L > Lmin:
idx, row = self.stack.pop(i)
self.stack_empty = self.stack == []
return idx, row
self.stack_empty = len(self.stack) == 0
return None, None
class TextPointStore(FilePointStore):
"""Storage in a text file.
Stores previously drawn points above some likelihood contour,
so that they can be reused in another run.
The format is a tab separated text file.
Through the fmt and delimiter attributes the output can be altered.
"""
def __init__(self, filepath, ncols):
"""Load and append to storage at *filepath*.
The file should contain *ncols* columns (Lmin, L, and others).
"""
self.ncols = int(ncols)
self.nrows = 0
self.stack_empty = True
self._load(filepath)
self.fileobj = open(filepath, 'ab')
self.fmt = '%.18e'
self.delimiter = '\t'
def _load(self, filepath):
"""Load from data file *filepath*."""
stack = []
if os.path.exists(filepath):
try:
for line in open(filepath):
try:
parts = [float(p) for p in line.split()]
if len(parts) != self.ncols:
warnings.warn("skipping lines in '%s' with different number of columns" % (filepath))
continue
stack.append(parts)
except ValueError:
warnings.warn("skipping unparsable line in '%s'" % (filepath))
except IOError:
pass
self.stack = list(enumerate(stack))
self.ncalls = len(self.stack)
self.reset()
def add(self, row, ncalls):
r"""Add data point *row* = [Lmin, L, \*otherinfo] to storage."""
if len(row) != self.ncols:
raise ValueError("expected %d values, got %d in %s" % (self.ncols, len(row), row))
np.savetxt(self.fileobj, [row], fmt=self.fmt, delimiter=self.delimiter)
self.nrows += 1
self.ncalls = ncalls
return self.nrows - 1
class HDF5PointStore(FilePointStore):
"""Storage in a HDF5 file.
Stores previously drawn points above some likelihood contour,
so that they can be reused in another run.
The format is a HDF5 file, which grows as needed.
"""
FILES_OPENED = []
def __init__(self, filepath, ncols, **h5_file_args):
"""Load and append to storage at filepath.
File contains *ncols* columns in 'points' dataset (Lmin, L, and others).
h5_file_args are passed on to hdf5.File.
"""
import h5py
self.ncols = int(ncols)
self.stack_empty = True
h5_file_args['mode'] = h5_file_args.get('mode', 'a')
# An annoying part of jupyter notebooks is that they keep all the variables
# This means a old pointstore can survive, as we don't usually close them
# Opening a new one with the same path will then fail with
# Unable to create file (unable to truncate a file which is already open)
# even when overwriting/truncating (mode='w')
# To avoid this problem, we keep track of all the files opened in this process
# and when another HDF5PointStore instance is created with the same path,
# we close the old one. Further operations on it will then likely fail.
for i, (filepath2, fileobj2) in enumerate(HDF5PointStore.FILES_OPENED):
if filepath == filepath2:
fileobj2.close()
HDF5PointStore.FILES_OPENED.pop(i)
self.fileobj = h5py.File(filepath, **h5_file_args)
HDF5PointStore.FILES_OPENED.append((filepath, self.fileobj))
self._load()
def _load(self):
"""Load from data file."""
if 'points' not in self.fileobj:
self.fileobj.create_dataset(
'points', dtype=float,
shape=(0, self.ncols), maxshape=(None, self.ncols))
self.nrows, ncols = self.fileobj['points'].shape
if ncols != self.ncols:
raise IOError("Tried to resume from file '%s', which has a different number of columns!" % (self.fileobj))
points = self.fileobj['points'][:]
self.stack = list(enumerate(points))
self.ncalls = self.fileobj.attrs.get('ncalls', len(self.stack))
self.reset()
def add(self, row, ncalls):
"""Add data point row = [Lmin, L, *otherinfo* to storage."""
if len(row) != self.ncols:
raise ValueError("expected %d values, got %d in %s" % (self.ncols, len(row), row))
# make space:
self.fileobj['points'].resize(self.nrows + 1, axis=0)
# insert:
self.fileobj['points'][self.nrows,:] = row
if self.ncalls != ncalls:
self.ncalls = self.fileobj.attrs['ncalls'] = ncalls
self.nrows += 1
return self.nrows - 1
| 7,205 | 31.606335 | 118 | py |
UltraNest | UltraNest-master/ultranest/stepsampler.py | """
MCMC-like step sampling
-----------------------
The classes implemented here are generators that, in each iteration,
only make one likelihood call. This allows running in parallel a
population of samplers that have the same execution time per call,
even if they do not terminate at the same number of iterations.
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from .utils import listify as _listify
def generate_random_direction(ui, region, scale=1):
"""Draw uniform direction vector in unit cube space of length `scale`.
Parameters
-----------
ui: array
starting point
region: MLFriends object
current region (not used)
scale: float
length of direction vector
Returns
--------
v: array
new direction vector
"""
del region
v = np.random.normal(0, 1, size=len(ui))
v *= scale / (v**2).sum()**0.5
return v
def generate_cube_oriented_direction(ui, region, scale=1):
"""Draw a unit direction vector in direction of a random unit cube axes.
Parameters
-----------
ui: array
starting point
region: MLFriends object
current region (not used)
scale: float
factor to multiple the vector
Returns
--------
v: array
new direction vector
"""
del region
ndim = len(ui)
# choose axis
j = np.random.randint(ndim)
# use doubling procedure to identify left and right maxima borders
v = np.zeros(ndim)
v[j] = scale
return v
def generate_cube_oriented_differential_direction(ui, region, scale=1):
"""Draw a unit direction vector in direction of a random unit cube axes.
Guess the length from the difference of two points in that axis.
Parameters
-----------
ui: array
starting point
region: MLFriends object
current region
scale: float
factor to multiple the vector
Returns
--------
v: array
new direction vector
"""
nlive, ndim = region.u.shape
v = np.zeros(ndim)
# choose axis
j = np.random.randint(ndim)
# choose pair
while v[j] == 0:
i = np.random.randint(nlive)
i2 = np.random.randint(nlive - 1)
if i2 >= i:
i2 += 1
v[j] = (region.u[i,j] - region.u[i2,j]) * scale
return v
def generate_differential_direction(ui, region, scale=1):
"""Draw a vector using the difference between two points.
Parameters
-----------
ui: array
starting point
region: MLFriends object
current region
scale: float
factor to multiple the vector
Returns
--------
v: array
new direction vector
"""
nlive, ndim = region.u.shape
# choose pair
i = np.random.randint(nlive)
i2 = np.random.randint(nlive - 1)
if i2 >= i:
i2 += 1
# use doubling procedure to identify left and right maxima borders
v = (region.u[i,:] - region.u[i2,:]) * scale
return v
def generate_partial_differential_direction(ui, region, scale=1):
"""Draw a unit direction vector in direction of a random unit cube axes.
Parameters
-----------
ui: array
starting point
region: MLFriends object
current region
scale: float
factor to multiple the vector
Returns
--------
v: array
new direction vector
"""
nlive, ndim = region.u.shape
# choose pair
i = np.random.randint(nlive)
while True:
i2 = np.random.randint(nlive - 1)
if i2 >= i:
i2 += 1
v = region.u[i] - region.u[i2]
mask = np.random.uniform(size=ndim) > 0.1
# at least one must be on
mask[np.random.randint(ndim)] = True
v[mask] = 0
if (v != 0).any():
# repeat if live points are identical
break
# use doubling procedure to identify left and right maxima borders
# v = np.zeros(ndim)
# v[mask] = (region.u[i,mask] - region.u[i2,mask]) * scale
return v
def generate_region_oriented_direction(ui, region, scale=1):
"""Draw a random direction vector in direction of one of the `region` axes.
Parameters
-----------
ui: array
starting point
region: MLFriends object
current region
scale: float
factor to multiple the vector
Returns
--------
v: array
new direction vector (in u-space)
"""
# choose axis in transformed space:
j = np.random.randint(len(ui))
v = region.transformLayer.axes[j] * scale
return v
def generate_region_random_direction(ui, region, scale=1):
"""Draw a direction vector in a random direction of the region.
The vector length is `scale` (in unit cube space).
Parameters
-----------
ui: array
starting point
region: MLFriends object
current region
scale: float:
length of direction vector (in t-space)
Returns
--------
v: array
new direction vector
"""
# choose axis in transformed space:
v1 = np.random.normal(0, 1, size=len(ui))
v1 *= scale / np.linalg.norm(v1)
v = np.dot(region.transformLayer.axes, v1)
return v
def generate_mixture_random_direction(ui, region, scale=1):
"""Draw either from a region-direction or a unit axis.
Parameters
-----------
region: MLFriends
region
ui: array
vector of starting point
scale: float
length of the vector.
Returns
--------
v: array
new direction vector
"""
if np.random.uniform() < 0.5:
# DE proposal
return generate_differential_direction(ui, region, scale=scale)
else:
# region-oriented random axis proposal
return generate_region_oriented_direction(ui, region, scale=scale)
def _inside_region(region, unew, uold):
"""Check if `unew` is inside region.
This is a bit looser than the region, because it adds a
MLFriends ellipsoid around the old point as well.
"""
tnew = region.transformLayer.transform(unew)
told = region.transformLayer.transform(uold)
mask2 = ((told.reshape((1, -1)) - tnew)**2).sum(axis=1) < region.maxradiussq
if mask2.all():
return mask2
mask = region.inside(unew)
return np.logical_or(mask, mask2)
def inside_region(region, unew, uold):
"""Check if `unew` is inside region.
Parameters
-----------
region: MLFriends object
current region
unew: array
point to check
uold: array
not used
Returns
--------
v: array
boolean whether point is inside the region
"""
del uold
return region.inside(unew)
def adapt_proposal_total_distances(region, history, mean_pair_distance, ndim):
# compute mean vector of each proposed jump
# compute total distance of all jumps
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
assert len(tproposed.sum(axis=1)) == len(tproposed)
d2 = ((((tproposed[0] - tproposed)**2).sum(axis=1))**0.5).sum()
far_enough = d2 > mean_pair_distance / ndim
return far_enough, [d2, mean_pair_distance]
def adapt_proposal_total_distances_NN(region, history, mean_pair_distance, ndim):
# compute mean vector of each proposed jump
# compute total distance of all jumps
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
assert len(tproposed.sum(axis=1)) == len(tproposed)
d2 = ((((tproposed[0] - tproposed)**2).sum(axis=1))**0.5).sum()
far_enough = d2 > region.maxradiussq**0.5
return far_enough, [d2, region.maxradiussq**0.5]
def adapt_proposal_summed_distances(region, history, mean_pair_distance, ndim):
# compute sum of distances from each jump
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
d2 = (((tproposed[1:,:] - tproposed[:-1,:])**2).sum(axis=1)**0.5).sum()
far_enough = d2 > mean_pair_distance / ndim
return far_enough, [d2, mean_pair_distance]
def adapt_proposal_summed_distances_NN(region, history, mean_pair_distance, ndim):
# compute sum of distances from each jump
tproposed = region.transformLayer.transform(np.asarray([u for u, _ in history]))
d2 = (((tproposed[1:,:] - tproposed[:-1,:])**2).sum(axis=1)**0.5).sum()
far_enough = d2 > region.maxradiussq**0.5
return far_enough, [d2, region.maxradiussq**0.5]
def adapt_proposal_move_distances(region, history, mean_pair_distance, ndim):
# compute distance from start to end
ustart, _ = history[0]
ufinal, _ = history[-1]
tstart, tfinal = region.transformLayer.transform(np.vstack((ustart, ufinal)))
d2 = ((tstart - tfinal)**2).sum()
far_enough = d2 > region.maxradiussq
return far_enough, [d2, region.maxradiussq**0.5]
def adapt_proposal_move_distances_midway(region, history, mean_pair_distance, ndim):
# compute distance from start to end
ustart, _ = history[0]
middle = max(1, len(history) // 2)
ufinal, _ = history[middle]
tstart, tfinal = region.transformLayer.transform(np.vstack((ustart, ufinal)))
d2 = ((tstart - tfinal)**2).sum()
far_enough = d2 > region.maxradiussq
return far_enough, [d2, region.maxradiussq**0.5]
class StepSampler(object):
"""Base class for a simple step sampler, staggering around.
Scales proposal towards a 50% acceptance rate.
"""
def __init__(
self, nsteps, generate_direction,
scale=1.0, adaptive_nsteps=False, max_nsteps=1000,
region_filter=False, log=False,
):
"""Initialise sampler.
Parameters
-----------
scale: float
initial proposal size
nsteps: int
number of accepted steps until the sample is considered independent.
To find the right value, run nested sampling several time,
always doubling nsteps, until Z is stable.
generate_direction: function
direction proposal function.
Available are:
* :py:func:`generate_cube_oriented_direction` (slice sampling)
* :py:func:`generate_region_oriented_direction` (slice sampling on the whitened parameter space)
* :py:class:`SequentialDirectionGenerator` (sequential slice sampling on the whitened parameter space)
* :py:func:`generate_random_direction` (hit-and-run sampling)
* :py:func:`generate_region_random_direction` (hit-and-run sampling on the whitened parameter space)
* :py:func:`generate_cube_oriented_differential_direction` (slice sampling with better proposal scale)
* :py:func:`generate_differential_direction` (differential evolution slice proposal)
* :py:func:`generate_partial_differential_direction` (differential evolution slice proposal on only 10% of the parameters)
* :py:func:`generate_mixture_random_direction` (generate_differential_direction and generate_cube_oriented_differential_direction)
Additionally, :py:class:`OrthogonalDirectionGenerator`
can be applied to a generate_direction.
When in doubt, try :py:func:`generate_mixture_random_direction`.
It combines efficient moves along the live point distribution,
with robustness against collapse to a subspace.
:py:func:`generate_cube_oriented_direction` works well too.
adaptive_nsteps: False, 'proposal-distance', 'move-distance'
Strategy to adapt the number of steps. The strategies
make sure that:
* 'move-distance' (recommended): distance between
start point and final position exceeds the mean distance
between pairs of live points.
* 'move-distance-midway': distance between
start point and position in the middle of the chain
exceeds the mean distance between pairs of live points.
* 'proposal-total-distances': mean square distance of
proposed vectors exceeds the mean distance
between pairs of live points.
* 'proposal-total-distances-NN': mean distance
of chain points from starting point exceeds mean distance
between pairs of live points.
* 'proposal-summed-distances-NN': summed distances
between chain points exceeds mean distance
between pairs of live points.
* 'proposal-summed-distances-min-NN': smallest distance
between chain points exceeds mean distance
between pairs of live points.
Adapting can give usable results. However, strictly speaking,
detailed balance is not maintained, so the results can be biased.
You can use the logstat property to find out the `nsteps` learned
from one run (third column), and use the largest value for `nsteps`
of a fresh run.
max_nsteps: int
Maximum number of steps the adaptive_nsteps can reach.
region_filter: bool
if True, use region to check if a proposed point can be inside
before calling likelihood.
log: file
log file for sampler statistics, such as acceptance rate,
proposal scale, number of steps, jump distance and distance
between live points
"""
self.history = []
self.nsteps = nsteps
self.nrejects = 0
self.scale = 1.0
self.max_nsteps = max_nsteps
self.next_scale = self.scale
self.nudge = 1.1**(1. / self.nsteps)
self.nsteps_nudge = 1.01
self.generate_direction = generate_direction
adaptive_nsteps_options = {
False: None,
'move-distance': adapt_proposal_move_distances,
'move-distance-midway': adapt_proposal_move_distances_midway,
'proposal-total-distances': adapt_proposal_total_distances,
'proposal-total-distances-NN': adapt_proposal_total_distances_NN,
'proposal-summed-distances': adapt_proposal_summed_distances,
'proposal-summed-distances-NN': adapt_proposal_summed_distances_NN,
}
if adaptive_nsteps not in adaptive_nsteps_options.keys():
raise ValueError("adaptive_nsteps must be one of: %s, not '%s'" % (adaptive_nsteps_options, adaptive_nsteps))
self.adaptive_nsteps = adaptive_nsteps
self.adaptive_nsteps_function = adaptive_nsteps_options[adaptive_nsteps]
self.adaptive_nsteps_needs_mean_pair_distance = self.adaptive_nsteps in (
'proposal-total-distances', 'proposal-summed-distances',
)
self.mean_pair_distance = np.nan
self.region_filter = region_filter
self.log = log
self.logstat = []
self.logstat_labels = ['rejection_rate', 'scale', 'steps']
if adaptive_nsteps:
self.logstat_labels += ['jump-distance', 'reference-distance']
def __str__(self):
"""Return string representation."""
if not self.adaptive_nsteps:
return type(self).__name__ + '(nsteps=%d, generate_direction=%s)' % (self.nsteps, self.generate_direction)
else:
return type(self).__name__ + '(adaptive_nsteps=%s, generate_direction=%s)' % (self.adaptive_nsteps, self.generate_direction)
def plot(self, filename):
"""Plot sampler statistics.
Parameters
-----------
filename: str
Stores plot into ``filename`` and data into
``filename + ".txt.gz"``.
"""
if len(self.logstat) == 0:
return
plt.figure(figsize=(10, 1 + 3 * len(self.logstat_labels)))
for i, label in enumerate(self.logstat_labels):
part = [entry[i] for entry in self.logstat]
plt.subplot(len(self.logstat_labels), 1, 1 + i)
plt.ylabel(label)
plt.plot(part)
x = []
y = []
for j in range(0, len(part), 20):
x.append(j)
y.append(np.mean(part[j:j + 20]))
plt.plot(x, y)
if np.min(part) > 0:
plt.yscale('log')
plt.savefig(filename, bbox_inches='tight')
np.savetxt(filename + '.txt.gz', self.logstat,
header=','.join(self.logstat_labels), delimiter=',')
plt.close()
def move(self, ui, region, ndraw=1, plot=False):
"""Move around point ``ui``. Stub to be implemented by child classes."""
raise NotImplementedError()
def adjust_outside_region(self):
"""Adjust proposal, given that we landed outside region."""
print("ineffective proposal scale (%g). shrinking..." % self.scale)
# Usually the region is very generous.
# Being here means that the scale is very wrong and we are probably stuck.
# Adjust it and restart the chain
self.scale /= self.nudge**10
self.next_scale /= self.nudge**10
assert self.scale > 0
assert self.next_scale > 0
# reset chain
if self.adaptive_nsteps:
self.logstat.append([-1.0, self.scale, self.nsteps, np.nan, np.nan])
else:
self.logstat.append([-1.0, self.scale, self.nsteps])
def adjust_accept(self, accepted, unew, pnew, Lnew, nc):
"""Adjust proposal, given that a new point was found after `nc` calls.
Parameters
-----------
accepted: bool
Whether the most recent proposal was accepted
unew: array
new point (in u-space)
pnew: array
new point (in p-space)
Lnew: float
loglikelihood of new point
nc: int
number of likelihood function calls used.
"""
if accepted:
self.next_scale *= self.nudge
self.history.append((unew.copy(), Lnew.copy()))
else:
self.next_scale /= self.nudge**10
self.nrejects += 1
self.history.append(self.history[-1])
assert self.next_scale > 0, self.next_scale
def adapt_nsteps(self, region):
"""
Adapt the number of steps.
Parameters
-----------
region: MLFriends object
current region
"""
if not self.adaptive_nsteps:
return
elif len(self.history) < self.nsteps:
# incomplete or aborted for some reason
print("not adapting, incomplete history", len(self.history), self.nsteps)
return
# assert self.nrejects < len(self.history), (self.nsteps, self.nrejects, len(self.history))
# assert self.nrejects <= self.nsteps, (self.nsteps, self.nrejects, len(self.history))
if self.adaptive_nsteps_needs_mean_pair_distance:
assert np.isfinite(self.mean_pair_distance)
ndim = region.u.shape[1]
far_enough, extra_info = self.adaptive_nsteps_function(region, self.history, self.mean_pair_distance, ndim)
self.logstat[-1] += extra_info
# adjust nsteps
if far_enough:
self.nsteps = min(self.nsteps - 1, int(self.nsteps / self.nsteps_nudge))
else:
self.nsteps = max(self.nsteps + 1, int(self.nsteps * self.nsteps_nudge))
self.nsteps = max(1, min(self.max_nsteps, self.nsteps))
def finalize_chain(self, region=None, Lmin=None, Ls=None):
"""Store chain statistics and adapt proposal.
Parameters
-----------
region: MLFriends object
current region
Lmin: float
current loglikelihood threshold
Ls: array
loglikelihood values of the live points
"""
self.logstat.append([self.nrejects / self.nsteps, self.scale, self.nsteps])
if self.log:
ustart, Lstart = self.history[0]
ufinal, Lfinal = self.history[-1]
# mean_pair_distance = region.compute_mean_pair_distance()
mean_pair_distance = self.mean_pair_distance
tstart, tfinal = region.transformLayer.transform(np.vstack((ustart, ufinal)))
# L index of start and end
# Ls_sorted = np.sort(Ls)
iLstart = np.sum(Ls > Lstart)
iLfinal = np.sum(Ls > Lfinal)
# nearest neighbor index of start and end
itstart = np.argmin((region.unormed - tstart.reshape((1, -1)))**2)
itfinal = np.argmin((region.unormed - tfinal.reshape((1, -1)))**2)
np.savetxt(self.log, [_listify(
[Lmin], ustart, ufinal, tstart, tfinal,
[self.nsteps, region.maxradiussq**0.5, mean_pair_distance,
iLstart, iLfinal, itstart, itfinal])])
if self.adaptive_nsteps:
self.adapt_nsteps(region=region)
if self.next_scale > self.scale * self.nudge**10:
self.next_scale = self.scale * self.nudge**10
elif self.next_scale < self.scale / self.nudge**10:
self.next_scale = self.scale / self.nudge**10
# print("updating scale: %g -> %g" % (self.scale, self.next_scale))
self.scale = self.next_scale
self.history = []
self.nrejects = 0
def new_chain(self, region=None):
"""Start a new path, reset statistics."""
self.history = []
self.nrejects = 0
def region_changed(self, Ls, region):
"""React to change of region.
Parameters
-----------
region: MLFriends object
current region
Ls: array
loglikelihood values of the live points
"""
if self.adaptive_nsteps_needs_mean_pair_distance:
self.mean_pair_distance = region.compute_mean_pair_distance()
# print("region changed. new mean_pair_distance: %g" % self.mean_pair_distance)
def __next__(self, region, Lmin, us, Ls, transform, loglike, ndraw=10, plot=False, tregion=None):
"""Get next point.
Parameters
----------
region: MLFriends
region.
Lmin: float
loglikelihood threshold
us: array of vectors
current live points
Ls: array of floats
current live point likelihoods
transform: function
transform function
loglike: function
loglikelihood function
ndraw: int
number of draws to attempt simultaneously.
plot: bool
whether to produce debug plots.
tregion: :py:class:`WrappingEllipsoid`
optional ellipsoid in transformed space for rejecting proposals
"""
# find most recent point in history conforming to current Lmin
for j, (uj, Lj) in enumerate(self.history):
if not Lj > Lmin:
self.history = self.history[:j]
# print("wandered out of L constraint; reverting", ui[0])
break
if len(self.history) > 0:
ui, Li = self.history[-1]
else:
# select starting point
self.new_chain(region)
# choose a new random starting point
# mask = region.inside(us)
# assert mask.any(), ("One of the live points does not satisfies the current region!",
# region.maxradiussq, region.u, region.unormed, us)
i = np.random.randint(len(us))
self.starti = i
ui = us[i,:]
# print("starting at", ui[0])
# assert np.logical_and(ui > 0, ui < 1).all(), ui
Li = Ls[i]
self.history.append((ui.copy(), Li.copy()))
del i
while True:
unew = self.move(ui, region, ndraw=ndraw, plot=plot)
# print("proposed: %s -> %s" % (ui, unew))
if plot:
plt.plot([ui[0], unew[:,0]], [ui[1], unew[:,1]], '-', color='k', lw=0.5)
plt.plot(ui[0], ui[1], 'd', color='r', ms=4)
plt.plot(unew[:,0], unew[:,1], 'x', color='r', ms=4)
mask = np.logical_and(unew > 0, unew < 1).all(axis=1)
if not mask.any():
# print("rejected by unit cube")
self.adjust_outside_region()
continue
unew = unew[mask,:]
nc = 0
if self.region_filter:
mask = inside_region(region, unew, ui)
if not mask.any():
print("rejected by region")
self.adjust_outside_region()
continue
unew = unew[mask,:]
if tregion is not None:
pnew = transform(unew)
tmask = tregion.inside(pnew)
unew = unew[tmask,:]
pnew = pnew[tmask,:]
if len(unew) == 0:
self.adjust_outside_region()
continue
break
unew = unew[0,:]
pnew = transform(unew.reshape((1, -1)))
Lnew = loglike(pnew)[0]
nc = 1
if Lnew > Lmin:
if plot:
plt.plot(unew[0], unew[1], 'o', color='g', ms=4)
self.adjust_accept(True, unew, pnew, Lnew, nc)
else:
self.adjust_accept(False, unew, pnew, Lnew, nc)
if len(self.history) > self.nsteps:
# print("made %d steps" % len(self.history), Lnew, Lmin)
u, L = self.history[-1]
p = transform(u.reshape((1, -1)))[0]
self.finalize_chain(region=region, Lmin=Lmin, Ls=Ls)
return u, p, L, nc
# do not have a independent sample yet
return None, None, None, nc
class MHSampler(StepSampler):
"""Gaussian Random Walk."""
def move(self, ui, region, ndraw=1, plot=False):
"""Move in u-space with a Gaussian proposal.
Parameters
----------
ui: array
current point
ndraw: int
number of points to draw.
region:
ignored
plot:
ignored
"""
# propose in that direction
direction = self.generate_direction(ui, region, scale=self.scale)
jitter = direction * np.random.normal(0, 1, size=(min(10, ndraw), 1))
unew = ui.reshape((1, -1)) + jitter
return unew
def CubeMHSampler(*args, **kwargs):
"""Gaussian Metropolis-Hastings sampler, using unit cube."""
return MHSampler(*args, **kwargs, generate_direction=generate_random_direction)
def RegionMHSampler(*args, **kwargs):
"""Gaussian Metropolis-Hastings sampler, using region."""
return MHSampler(*args, **kwargs, generate_direction=generate_region_random_direction)
class SliceSampler(StepSampler):
"""Slice sampler, respecting the region."""
def new_chain(self, region=None):
"""Start a new path, reset slice."""
self.interval = None
self.found_left = False
self.found_right = False
self.axis_index = 0
self.history = []
self.nrejects = 0
def adjust_accept(self, accepted, unew, pnew, Lnew, nc):
"""See :py:meth:`StepSampler.adjust_accept`."""
v, left, right, u = self.interval
if not self.found_left:
if accepted:
self.interval = (v, left * 2, right, u)
else:
self.found_left = True
elif not self.found_right:
if accepted:
self.interval = (v, left, right * 2, u)
else:
self.found_right = True
# adjust scale
if -left > self.next_scale or right > self.next_scale:
self.next_scale *= 1.1
else:
self.next_scale /= 1.1
# print("adjusting after accept...", self.next_scale)
else:
if accepted:
# start with a new interval next time
self.interval = None
self.history.append((unew.copy(), Lnew.copy()))
else:
self.nrejects += 1
# shrink current interval
if u == 0:
pass
elif u < 0:
left = u
elif u > 0:
right = u
self.interval = (v, left, right, u)
def adjust_outside_region(self):
"""Adjust proposal given that we landed outside region."""
self.adjust_accept(False, unew=None, pnew=None, Lnew=None, nc=0)
def move(self, ui, region, ndraw=1, plot=False):
"""Advance the slice sampling move. see :py:meth:`StepSampler.move`."""
if self.interval is None:
v = self.generate_direction(ui, region)
# expand direction until it is surely outside
left = -self.scale
right = self.scale
self.found_left = False
self.found_right = False
u = 0
self.interval = (v, left, right, u)
else:
v, left, right, u = self.interval
if plot:
plt.plot([(ui + v * left)[0], (ui + v * right)[0]],
[(ui + v * left)[1], (ui + v * right)[1]],
':o', color='k', lw=2, alpha=0.3)
# shrink direction if outside
if not self.found_left:
xj = ui + v * left
if not self.region_filter or inside_region(region, xj.reshape((1, -1)), ui):
return xj.reshape((1, -1))
else:
self.found_left = True
if not self.found_right:
xj = ui + v * right
if not self.region_filter or inside_region(region, xj.reshape((1, -1)), ui):
return xj.reshape((1, -1))
else:
self.found_right = True
# adjust scale
if -left > self.next_scale or right > self.next_scale:
self.next_scale *= 1.1
else:
self.next_scale /= 1.1
# print("adjusting scale...", self.next_scale)
while True:
u = np.random.uniform(left, right)
xj = ui + v * u
if not self.region_filter or inside_region(region, xj.reshape((1, -1)), ui):
self.interval = (v, left, right, u)
return xj.reshape((1, -1))
else:
if u < 0:
left = u
else:
right = u
self.interval = (v, left, right, u)
def CubeSliceSampler(*args, **kwargs):
"""Slice sampler, randomly picking region axes."""
return SliceSampler(*args, **kwargs, generate_direction=generate_cube_oriented_direction)
def RegionSliceSampler(*args, **kwargs):
"""Slice sampler, randomly picking region axes."""
return SliceSampler(*args, **kwargs, generate_direction=generate_region_oriented_direction)
def BallSliceSampler(*args, **kwargs):
"""Hit & run sampler. Choose random directions in space."""
return SliceSampler(*args, **kwargs, generate_direction=generate_random_direction)
def RegionBallSliceSampler(*args, **kwargs):
"""Hit & run sampler. Choose random directions according to region."""
return SliceSampler(*args, **kwargs, generate_direction=generate_region_random_direction)
class SequentialRegionDirectionGenerator(object):
"""Sequentially proposes one region axes after the next."""
def __init__(self):
"""Initialise."""
self.axis_index = 0
def __call__(self, ui, region, scale=1):
"""Choose the next axis in t-space.
Parameters
-----------
ui: array
current point (in u-space)
region: MLFriends object
region to use for transformation
scale: float
length of direction vector
Returns
--------
v: array
new direction vector (in u-space)
"""
ndim = len(ui)
ti = region.transformLayer.transform(ui)
# choose axis in transformed space:
j = self.axis_index % ndim
self.axis_index = j + 1
tv = np.zeros(ndim)
tv[j] = 1.0
# convert back to unit cube space:
uj = region.transformLayer.untransform(ti + tv * 1e-3)
v = uj - ui
v *= scale / (v**2).sum()**0.5
return v
def __str__(self):
return type(self).__name__ + '()'
def RegionSequentialSliceSampler(*args, **kwargs):
"""Slice sampler, sequentially iterating region axes."""
return SliceSampler(*args, **kwargs, generate_direction=SequentialRegionDirectionGenerator())
class OrthogonalDirectionGenerator(object):
"""Orthogonalizes proposal vectors."""
def __init__(self, generate_direction):
"""Initialise.
Parameters
-----------
generate_direction: function
direction proposal to orthogonalize
"""
self.axis_index = 0
self.generate_direction = generate_direction
self.directions = None
def __str__(self):
"""Return string representation."""
return type(self).__name__ + '(generate_direction=%s)' % self.generate_direction
def __call__(self, ui, region, scale=1):
"""Return next orthogonalized vector.
Parameters
-----------
ui: array
current point (in u-space)
region: MLFriends object
region to use for transformation
scale: float
length of direction vector
Returns
--------
v: array
new direction vector (in u-space)
"""
ndim = len(ui)
if self.directions is None or self.axis_index >= ndim:
proposed_directions = np.empty((ndim, ndim))
for i in range(ndim):
proposed_directions[i] = self.generate_direction(ui, region, scale=scale)
q, r = np.linalg.qr(proposed_directions)
self.directions = np.dot(q, np.diag(np.diag(r)))
self.axis_index = 0
v = self.directions[self.axis_index]
self.axis_index += 1
return v
class SpeedVariableGenerator(object):
"""Propose directions with only some parameters variable.
Propose in region direction, but only include some dimensions at a time.
Completely configurable.
"""
def __init__(self, step_matrix, generate_direction=generate_region_random_direction):
"""Initialise sampler.
Parameters
-----------
step_matrix: matrix or list of slices
**if a bool matrix of shape (n_steps, n_dims):**
Each row of the matrix indicates which parameters
should be updated.
Example::
[[True, True], [False, True], [False, True]]
This would update the first parameter 1/3 times, and the second
parameters every time. Three steps are made until the point
is considered independent.
For a full update in every step, use::
np.ones((n_steps, n_dims), dtype=bool)
**if a list of slices:**
Each entry indicates which parameters should be updated.
Example::
[Ellipsis, slice(2,10), slice(5,10)]
This would update the first parameter 1/3 times, parameters
2-9 2/3 times and parameter 5-9 in every step.
Three steps are made until the point is considered independent.
generate_direction: function
direction proposal function.
"""
self.step_matrix = step_matrix
self.nsteps = len(self.step_matrix)
self.axis_index = 0
self.generate_direction = generate_direction
def __call__(self, ui, region, scale=1):
"""Generate a slice sampling direction, using only some of the axes.
Parameters
-----------
ui: array
current point (in u-space)
region: MLFriends object
region to use for transformation
scale: float
length of direction vector
Returns
--------
v: array
new direction vector
"""
ndim = len(ui)
v = self.generate_direction(ui=ui, region=region, scale=scale)
j = self.axis_index % self.nsteps
self.axis_index = j + 1
# only update active dimensions
active_dims = self.step_matrix[j]
# project uj onto ui. vary only active dimensions
uk = np.zeros(ndim)
uk[active_dims] = v[active_dims] # if this fails, user passed a faulty step_matrix
return uk
def SpeedVariableRegionSliceSampler(step_matrix, *args, **kwargs):
"""Slice sampler, in region axes.
Updates only some dimensions at a time, completely user-definable.
"""
generate_direction = kwargs.pop('generate_direction', generate_region_random_direction)
return SliceSampler(
*args, **kwargs,
nsteps=kwargs.pop('nsteps', len(step_matrix)),
generate_direction=SpeedVariableGenerator(
step_matrix=step_matrix,
generate_direction=generate_direction
)
)
def ellipsoid_bracket(ui, v, ellipsoid_center, ellipsoid_inv_axes, ellipsoid_radius_square):
"""Find line-ellipsoid intersection points.
For a line from ui in direction v through an ellipsoid
centered at ellipsoid_center with axes matrix ellipsoid_inv_axes,
return the lower and upper intersection parameter.
Parameters
-----------
ui: array
current point (in u-space)
v: array
direction vector
ellipsoid_center: array
center of the ellipsoid
ellipsoid_inv_axes: array
ellipsoid axes matrix, as computed by :py:class:`WrappingEllipsoid`
ellipsoid_radius_square: float
square of the ellipsoid radius
Returns
--------
left: float
distance to go until ellipsoid is intersected (non-positive)
right: float
distance to go until ellipsoid is intersected (non-negative)
"""
vell = np.dot(v, ellipsoid_inv_axes)
# ui in ellipsoid
xell = np.dot(ui - ellipsoid_center, ellipsoid_inv_axes)
a = np.dot(vell, vell)
b = 2 * np.dot(vell, xell)
c = np.dot(xell, xell) - ellipsoid_radius_square
assert c <= 0, ("outside ellipsoid", c)
intersect = b**2 - 4 * a * c
assert intersect >= 0, ("no intersection", intersect, c)
d1 = (-b + intersect**0.5) / (2 * a)
d2 = (-b - intersect**0.5) / (2 * a)
left = min(0, d1, d2)
right = max(0, d1, d2)
return left, right
def crop_bracket_at_unit_cube(ui, v, left, right, epsilon=1e-6):
"""Find line-cube intersection points.
A line segment from *ui* in direction *v* from t between *left* <= 0 <= *right*
will be truncated by the unit cube. Returns the bracket and whether cropping was applied.
Parameters
-----------
ui: array
current point (in u-space)
v: array
direction vector
left: float
bracket lower end (non-positive)
right: float
bracket upper end (non-negative)
epsilon: float
small number to allow for numerical effects
Returns
--------
left: float
new left
right: float
new right
cropped_left: bool
whether left was changed
cropped_right: bool
whether right was changed
"""
assert (ui > 0).all(), ui
assert (ui < 1).all(), ui
leftu = left * v + ui
rightu = right * v + ui
# print("crop: current ends:", leftu, rightu)
cropped_left = False
leftbelow = leftu <= 0
if leftbelow.any():
# choose left so that point is > 0 in all axes
# 0 = left * v + ui
del left
left = (-ui[leftbelow] / v[leftbelow]).max() * (1 - epsilon)
del leftu
leftu = left * v + ui
cropped_left |= True
assert (leftu >= 0).all(), leftu
leftabove = leftu >= 1
if leftabove.any():
del left
left = ((1 - ui[leftabove]) / v[leftabove]).max() * (1 - epsilon)
del leftu
leftu = left * v + ui
cropped_left |= True
assert (leftu <= 1).all(), leftu
cropped_right = False
rightabove = rightu >= 1
if rightabove.any():
# choose right so that point is < 1 in all axes
# 1 = left * v + ui
del right
right = ((1 - ui[rightabove]) / v[rightabove]).min() * (1 - epsilon)
del rightu
rightu = right * v + ui
cropped_right |= True
assert (rightu <= 1).all(), rightu
rightbelow = rightu <= 0
if rightbelow.any():
del right
right = (-ui[rightbelow] / v[rightbelow]).min() * (1 - epsilon)
del rightu
rightu = right * v + ui
cropped_right |= True
assert (rightu >= 0).all(), rightu
assert left <= 0 <= right, (left, right)
return left, right, cropped_left, cropped_right
| 41,046 | 32.290349 | 142 | py |
UltraNest | UltraNest-master/ultranest/integrator.py | """
Nested sampling integrators
---------------------------
This module provides the high-level class :py:class:`ReactiveNestedSampler`,
for calculating the Bayesian evidence and posterior samples of arbitrary models.
"""
# Some parts are from the Nestle library by Kyle Barbary (https://github.com/kbarbary/nestle)
# Some parts are from the nnest library by Adam Moss (https://github.com/adammoss/nnest)
from __future__ import print_function, division
import os
import sys
import csv
import json
import operator
import time
import warnings
from numpy import log, exp, logaddexp
import numpy as np
from .utils import create_logger, make_run_dir, resample_equal, vol_prefactor, vectorize, listify as _listify
from .utils import is_affine_transform, normalised_kendall_tau_distance, distributed_work_chunk_size
from ultranest.mlfriends import MLFriends, AffineLayer, ScalingLayer, find_nearby, WrappingEllipsoid, RobustEllipsoidRegion
from .store import HDF5PointStore, TextPointStore, NullPointStore
from .viz import get_default_viz_callback
from .ordertest import UniformOrderAccumulator
from .netiter import PointPile, SingleCounter, MultiCounter, BreadthFirstIterator, TreeNode, count_tree_between, find_nodes_before, logz_sequence
from .netiter import dump_tree, combine_results
from .hotstart import get_auxiliary_contbox_parameterization
__all__ = ['ReactiveNestedSampler', 'NestedSampler', 'read_file', 'warmstart_from_similar_file']
def _get_cumsum_range(pi, dp):
"""Compute quantile indices from probabilities.
Parameters
------------
pi: array
probability of each item.
dp: float
Quantile (between 0 and 0.5).
Returns
---------
index_lo: int
Index of the item corresponding to quantile ``dp``.
index_hi: int
Index of the item corresponding to quantile ``1-dp``.
"""
ci = pi.cumsum()
ilo = np.where(ci > dp)[0]
ilo = ilo[0] if len(ilo) > 0 else 0
ihi = np.where(ci < 1. - dp)[0]
ihi = ihi[-1] if len(ihi) > 0 else -1
return ilo, ihi
def _sequentialize_width_sequence(minimal_widths, min_width):
"""Turn a list of required tree width into an ordered sequence.
Parameters
------------
minimal_widths: list of (Llo, Lhi, width)
Defines the required width between Llo and Lhi.
min_width: int
Minimum width everywhere.
Returns
---------
Lsequence: list of (L, width)
A sequence of L points and the expected tree width at and above it.
"""
Lpoints = np.unique(_listify(
[-np.inf], [L for L, _, _ in minimal_widths],
[L for _, L, _ in minimal_widths], [np.inf]))
widths = np.ones(len(Lpoints)) * min_width
for Llo, Lhi, width in minimal_widths:
# all Lpoints within that range should be maximized to width
# mask = np.logical_and(Lpoints >= Llo, Lpoints <= Lhi)
# the following allows segments to specify -inf..L ranges
mask = ~np.logical_or(Lpoints < Llo, Lpoints > Lhi)
widths[mask] = np.where(widths[mask] < width, width, widths[mask])
# the width has to monotonically increase to the maximum from both sides
# so we fill up any intermediate dips
max_width = widths.max()
mid = np.where(widths == max_width)[0][0]
widest = 0
for i in range(mid):
widest = widths[i] = max(widest, widths[i])
widest = 0
for i in range(len(widths) - 1, mid, -1):
widest = widths[i] = max(widest, widths[i])
return list(zip(Lpoints, widths))
def _explore_iterator_batch(explorer, pop, x_dim, num_params, pointpile, batchsize=1):
batch = []
while True:
next_node = explorer.next_node()
if next_node is None:
break
rootid, node, (_, active_rootids, active_values, active_node_ids) = next_node
Lmin = node.value
children = []
_, row = pop(Lmin)
if row is not None:
logl = row[1]
u = row[3:3 + x_dim]
v = row[3 + x_dim:3 + x_dim + num_params]
assert u.shape == (x_dim,)
assert v.shape == (num_params,)
assert logl > Lmin
children.append((u, v, logl))
child = pointpile.make_node(logl, u, v)
node.children.append(child)
batch.append((Lmin, active_values.copy(), children))
if len(batch) >= batchsize:
yield batch
batch = []
explorer.expand_children_of(rootid, node)
if len(batch) > 0:
yield batch
def resume_from_similar_file(
log_dir, x_dim, loglikelihood, transform,
max_tau=0, verbose=False, ndraw=400
):
"""
Change a stored UltraNest run to a modified loglikelihood/transform.
Parameters
----------
log_dir: str
Folder containing results
x_dim: int
number of dimensions
loglikelihood: function
new likelihood function
transform: function
new transform function
verbose: bool
show progress
ndraw: int
set to >1 if functions can take advantage of vectorized computations
max_tau: float
Allowed dissimilarity in the live point ordering, quantified as
normalised Kendall tau distance.
max_tau=0 is the very conservative choice of stopping the warm start
when the live point order differs.
Near 1 are completely different live point orderings.
Values in between permit mild disorder.
Returns
----------
sequence: dict
contains arrays storing for each iteration estimates of:
* logz: log evidence estimate
* logzerr: log evidence uncertainty estimate
* logvol: log volume estimate
* samples_n: number of live points
* logwt: log weight
* logl: log likelihood
final: dict
same as ReactiveNestedSampler.results and
ReactiveNestedSampler.run return values
"""
import h5py
filepath = os.path.join(log_dir, 'results', 'points.hdf5')
filepath2 = os.path.join(log_dir, 'results', 'points.hdf5.new')
fileobj = h5py.File(filepath, 'r')
_, ncols = fileobj['points'].shape
num_params = ncols - 3 - x_dim
points = fileobj['points'][:]
fileobj.close()
del fileobj
pointstore2 = HDF5PointStore(filepath2, ncols, mode='w')
stack = list(enumerate(points))
pointpile = PointPile(x_dim, num_params)
pointpile2 = PointPile(x_dim, num_params)
def pop(Lmin):
"""Find matching sample from points file."""
# look forward to see if there is an exact match
# if we do not use the exact matches
# this causes a shift in the loglikelihoods
for i, (idx, next_row) in enumerate(stack):
row_Lmin = next_row[0]
L = next_row[1]
if row_Lmin <= Lmin and L > Lmin:
idx, row = stack.pop(i)
return idx, row
return None, None
roots = []
roots2 = []
initial_points_u = []
initial_points_v = []
initial_points_logl = []
while True:
_, row = pop(-np.inf)
if row is None:
break
logl = row[1]
u = row[3:3 + x_dim]
v = row[3 + x_dim:3 + x_dim + num_params]
initial_points_u.append(u)
initial_points_v.append(v)
initial_points_logl.append(logl)
v2 = transform(np.array(initial_points_u, ndmin=2, dtype=float))
assert np.allclose(v2, initial_points_v), 'transform inconsistent, cannot resume'
logls_new = loglikelihood(v2)
for u, v, logl, logl_new in zip(initial_points_u, initial_points_v, initial_points_logl, logls_new):
roots.append(pointpile.make_node(logl, u, v))
roots2.append(pointpile2.make_node(logl_new, u, v))
pointstore2.add(_listify([-np.inf, logl_new, 0.0], u, v), 1)
batchsize = ndraw
explorer = BreadthFirstIterator(roots)
explorer2 = BreadthFirstIterator(roots2)
main_iterator2 = SingleCounter()
main_iterator2.Lmax = logls_new.max()
good_state = True
indices1, indices2 = np.meshgrid(np.arange(len(logls_new)), np.arange(len(logls_new)))
last_good_like = -1e300
last_good_state = 0
epsilon = 1 + 1e-6
niter = 0
for batch in _explore_iterator_batch(explorer, pop, x_dim, num_params, pointpile, batchsize=batchsize):
assert len(batch) > 0
batch_u = np.array([u for _, _, children in batch for u, _, _ in children], ndmin=2, dtype=float)
if batch_u.size > 0:
assert batch_u.shape[1] == x_dim, batch_u.shape
batch_v = np.array([v for _, _, children in batch for _, v, _ in children], ndmin=2, dtype=float)
# print("calling likelihood with %d points" % len(batch_u))
v2 = transform(batch_u)
assert batch_v.shape[1] == num_params, batch_v.shape
assert np.allclose(v2, batch_v), 'transform inconsistent, cannot resume'
logls_new = loglikelihood(batch_v)
else:
# no new points
logls_new = []
j = 0
for Lmin, active_values, children in batch:
next_node2 = explorer2.next_node()
rootid2, node2, (active_nodes2, _, active_values2, _) = next_node2
Lmin2 = float(node2.value)
# in the tails of distributions it can happen that two points are out of order
# but that may not be very important
# in the interest of practicality, we allow this and only stop the
# warmstart copying when some bulk of points differ.
# in any case, warmstart should not be considered safe, but help iterating
# and a final clean run is needed to finalise the results.
if len(active_values) != len(active_values2):
if verbose == 2:
print("stopping, number of live points differ (%d vs %d)" % (len(active_values), len(active_values2)))
good_state = False
break
if len(active_values) != len(indices1):
indices1, indices2 = np.meshgrid(np.arange(len(active_values)), np.arange(len(active_values2)))
tau = normalised_kendall_tau_distance(active_values, active_values2, indices1, indices2)
order_consistent = tau <= max_tau
if order_consistent and len(active_values) > 10 and len(active_values) > 10:
good_state = True
elif not order_consistent:
good_state = False
else:
# maintain state
pass
if verbose == 2:
print(niter, tau)
if good_state:
# print(" (%.1e) L=%.1f" % (last_good_like, Lmin2))
# assert last_good_like < Lmin2, (last_good_like, Lmin2)
last_good_like = Lmin2
last_good_state = niter
else:
# interpolate a increasing likelihood
# in the hope that the step size is smaller than
# the likelihood increase
Lmin2 = last_good_like
node2.value = Lmin2
last_good_like = last_good_like * epsilon
break
for u, v, logl_old in children:
logl_new = logls_new[j]
j += 1
# print(j, Lmin2, '->', logl_new, 'instead of', Lmin, '->', [c.value for c in node2.children])
child2 = pointpile2.make_node(logl_new, u, v)
node2.children.append(child2)
if logl_new > Lmin2:
pointstore2.add(_listify([Lmin2, logl_new, 0.0], u, v), 1)
else:
if verbose == 2:
print("cannot use new point because it would decrease likelihood (%.1f->%.1f)" % (Lmin2, logl_new))
# good_state = False
# break
main_iterator2.passing_node(node2, active_nodes2)
niter += 1
if verbose:
sys.stderr.write("%d...\r" % niter)
explorer2.expand_children_of(rootid2, node2)
if not good_state:
break
if main_iterator2.logZremain < main_iterator2.logZ and not good_state:
# stop as the results diverged already
break
if verbose:
sys.stderr.write("%d/%d iterations salvaged (%.2f%%).\n" % (
last_good_state + 1, len(points), (last_good_state + 1) * 100. / len(points)))
# delete the ones at the end from last_good_state onwards
# assert len(pointstore2.fileobj['points']) == niter, (len(pointstore2.fileobj['points']), niter)
mask = pointstore2.fileobj['points'][:,0] <= last_good_like
points2 = pointstore2.fileobj['points'][:][mask,:]
del pointstore2.fileobj['points']
pointstore2.fileobj.create_dataset(
'points', dtype=np.float64,
shape=(0, pointstore2.ncols), maxshape=(None, pointstore2.ncols))
pointstore2.fileobj['points'].resize(len(points2), axis=0)
pointstore2.fileobj['points'][:] = points2
pointstore2.close()
del pointstore2
os.replace(filepath2, filepath)
def _update_region_bootstrap(region, nbootstraps, minvol=0., comm=None, mpi_size=1):
"""
Update *region* with *nbootstraps* rounds of excluding points randomly.
Stiffen ellipsoid size using the minimum volume *minvol*.
If the mpi communicator *comm* is not None, use MPI to distribute
the bootstraps over the *mpi_size* processes.
"""
assert nbootstraps > 0, nbootstraps
# catch potential errors so MPI syncing still works
e = None
try:
r, f = region.compute_enlargement(
minvol=minvol,
nbootstraps=max(1, nbootstraps // mpi_size))
except np.linalg.LinAlgError as e1:
e = e1
r, f = np.nan, np.nan
if comm is not None:
recv_maxradii = comm.gather(r, root=0)
recv_maxradii = comm.bcast(recv_maxradii, root=0)
# if there are very many processors, we may have more
# rounds than requested, leading to slowdown
# thus we throw away the extra ones
r = np.max(recv_maxradii[:nbootstraps])
recv_enlarge = comm.gather(f, root=0)
recv_enlarge = comm.bcast(recv_enlarge, root=0)
f = np.max(recv_enlarge[:nbootstraps])
if not np.isfinite(r) and not np.isfinite(r):
# reraise error if needed
if e is None:
raise np.linalg.LinAlgError("compute_enlargement failed")
else:
raise e
region.maxradiussq = r
region.enlarge = f
return r, f
class NestedSampler(object):
"""Simple Nested sampler for reference."""
def __init__(self,
param_names,
loglike,
transform=None,
derived_param_names=[],
resume='subfolder',
run_num=None,
log_dir='logs/test',
num_live_points=1000,
vectorized=False,
wrapped_params=[],
):
"""Set up nested sampler.
Parameters
-----------
param_names: list of str, names of the parameters.
Length gives dimensionality of the sampling problem.
loglike: function
log-likelihood function.
Receives multiple parameter vectors, returns vector of likelihood.
transform: function
parameter transform from unit cube to physical parameters.
Receives multiple cube vectors, returns multiple parameter vectors.
derived_param_names: list of str
Additional derived parameters created by transform. (empty by default)
log_dir: str
where to store output files
resume: 'resume', 'overwrite' or 'subfolder'
if 'overwrite', overwrite previous data.
if 'subfolder', create a fresh subdirectory in log_dir.
if 'resume' or True, continue previous run if available.
wrapped_params: list of bools
indicating whether this parameter wraps around (circular parameter).
num_live_points: int
Number of live points
vectorized: bool
If true, loglike and transform function can receive arrays
of points.
run_num: int
unique run number. If None, will be automatically incremented.
"""
self.paramnames = param_names
x_dim = len(self.paramnames)
self.num_live_points = num_live_points
self.sampler = 'nested'
self.x_dim = x_dim
self.derivedparamnames = derived_param_names
num_derived = len(self.derivedparamnames)
self.num_params = x_dim + num_derived
self.volfactor = vol_prefactor(self.x_dim)
if wrapped_params is None:
self.wrapped_axes = []
else:
self.wrapped_axes = np.where(wrapped_params)[0]
assert resume or resume in ('overwrite', 'subfolder', 'resume'), "resume should be one of 'overwrite' 'subfolder' or 'resume'"
append_run_num = resume == 'subfolder'
resume = resume == 'resume' or resume
if not vectorized:
transform = vectorize(transform)
loglike = vectorize(loglike)
if transform is None:
self.transform = lambda x: x
else:
self.transform = transform
u = np.random.uniform(size=(2, self.x_dim))
p = self.transform(u)
assert p.shape == (2, self.num_params), ("Error in transform function: returned shape is %s, expected %s" % (p.shape, (2, self.num_params)))
logl = loglike(p)
assert np.logical_and(u > 0, u < 1).all(), ("Error in transform function: u was modified!")
assert np.shape(logl) == (2,), ("Error in loglikelihood function: returned shape is %s, expected %s" % (p.shape, (2, self.num_params)))
assert np.isfinite(logl).all(), ("Error in loglikelihood function: returned non-finite number: %s for input u=%s p=%s" % (logl, u, p))
def safe_loglike(x):
"""Call likelihood function safely wrapped to avoid non-finite values."""
x = np.asarray(x)
logl = loglike(x)
assert np.isfinite(logl).all(), (
'User-provided loglikelihood returned non-finite value:',
logl[~np.isfinite(logl)][0],
"for input value:",
x[~np.isfinite(logl),:][0,:])
return logl
self.loglike = safe_loglike
self.use_mpi = False
try:
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.mpi_size = self.comm.Get_size()
self.mpi_rank = self.comm.Get_rank()
if self.mpi_size > 1:
self.use_mpi = True
except Exception:
self.mpi_size = 1
self.mpi_rank = 0
self.log = self.mpi_rank == 0
self.log_to_disk = self.log and log_dir is not None
if self.log and log_dir is not None:
self.logs = make_run_dir(log_dir, run_num, append_run_num=append_run_num)
log_dir = self.logs['run_dir']
else:
log_dir = None
self.logger = create_logger(__name__ + '.' + type(self).__name__, log_dir=log_dir)
if self.log:
self.logger.info('Num live points [%d]', self.num_live_points)
if self.log_to_disk:
# self.pointstore = TextPointStore(os.path.join(self.logs['results'], 'points.tsv'), 2 + self.x_dim + self.num_params)
self.pointstore = HDF5PointStore(
os.path.join(self.logs['results'], 'points.hdf5'),
3 + self.x_dim + self.num_params, mode='a' if resume else 'w')
else:
self.pointstore = NullPointStore(3 + self.x_dim + self.num_params)
def run(
self,
update_interval_iter=None,
update_interval_ncall=None,
log_interval=None,
dlogz=0.001,
max_iters=None):
"""Explore parameter space.
Parameters
----------
update_interval_iter:
Update region after this many iterations.
update_interval_ncall:
Update region after update_interval_ncall likelihood calls.
log_interval:
Update stdout status line every log_interval iterations
dlogz:
Target evidence uncertainty.
max_iters:
maximum number of integration iterations.
"""
if update_interval_ncall is None:
update_interval_ncall = max(1, round(self.num_live_points))
if update_interval_iter is None:
if update_interval_ncall == 0:
update_interval_iter = max(1, round(self.num_live_points))
else:
update_interval_iter = max(1, round(0.2 * self.num_live_points))
if log_interval is None:
log_interval = max(1, round(0.2 * self.num_live_points))
else:
log_interval = round(log_interval)
if log_interval < 1:
raise ValueError("log_interval must be >= 1")
viz_callback = get_default_viz_callback()
prev_u = []
prev_v = []
prev_logl = []
if self.log:
# try to resume:
self.logger.info('Resuming...')
for i in range(self.num_live_points):
_, row = self.pointstore.pop(-np.inf)
if row is not None:
prev_logl.append(row[1])
prev_u.append(row[3:3 + self.x_dim])
prev_v.append(row[3 + self.x_dim:3 + self.x_dim + self.num_params])
else:
break
prev_u = np.array(prev_u)
prev_v = np.array(prev_v)
prev_logl = np.array(prev_logl)
num_live_points_missing = self.num_live_points - len(prev_logl)
else:
num_live_points_missing = -1
if self.use_mpi:
num_live_points_missing = self.comm.bcast(num_live_points_missing, root=0)
prev_u = self.comm.bcast(prev_u, root=0)
prev_v = self.comm.bcast(prev_v, root=0)
prev_logl = self.comm.bcast(prev_logl, root=0)
use_point_stack = True
assert num_live_points_missing >= 0
if num_live_points_missing > 0:
if self.use_mpi:
# self.logger.info('Using MPI with rank [%d]', self.mpi_rank)
if self.mpi_rank == 0:
active_u = np.random.uniform(size=(num_live_points_missing, self.x_dim))
else:
active_u = np.empty((num_live_points_missing, self.x_dim), dtype=np.float64)
active_u = self.comm.bcast(active_u, root=0)
else:
active_u = np.random.uniform(size=(num_live_points_missing, self.x_dim))
active_v = self.transform(active_u)
if self.use_mpi:
if self.mpi_rank == 0:
chunks = [[] for _ in range(self.mpi_size)]
for i, chunk in enumerate(active_v):
chunks[i % self.mpi_size].append(chunk)
else:
chunks = None
data = self.comm.scatter(chunks, root=0)
active_logl = self.loglike(data)
recv_active_logl = self.comm.gather(active_logl, root=0)
recv_active_logl = self.comm.bcast(recv_active_logl, root=0)
active_logl = np.concatenate(recv_active_logl, axis=0)
else:
active_logl = self.loglike(active_v)
if self.log_to_disk:
for i in range(num_live_points_missing):
self.pointstore.add(
_listify([-np.inf, active_logl[i], 0.], active_u[i,:], active_v[i,:]),
num_live_points_missing)
if len(prev_u) > 0:
active_u = np.concatenate((prev_u, active_u))
active_v = np.concatenate((prev_v, active_v))
active_logl = np.concatenate((prev_logl, active_logl))
assert active_u.shape == (self.num_live_points, self.x_dim)
assert active_v.shape == (self.num_live_points, self.num_params)
assert active_logl.shape == (self.num_live_points,)
else:
active_u = prev_u
active_v = prev_v
active_logl = prev_logl
saved_u = []
saved_v = [] # Stored points for posterior results
saved_logl = []
saved_logwt = []
h = 0.0 # Information, initially 0.
logz = -1e300 # ln(Evidence Z), initially Z=0
logvol = log(1.0 - exp(-1.0 / self.num_live_points))
logz_remain = np.max(active_logl)
fraction_remain = 1.0
ncall = num_live_points_missing # number of calls we already made
first_time = True
if self.x_dim > 1:
transformLayer = AffineLayer(wrapped_dims=self.wrapped_axes)
else:
transformLayer = ScalingLayer(wrapped_dims=self.wrapped_axes)
transformLayer.optimize(active_u, active_u)
region = MLFriends(active_u, transformLayer)
if self.log:
self.logger.info('Starting sampling ...')
ib = 0
samples = []
ndraw = 100
it = 0
next_update_interval_ncall = -1
next_update_interval_iter = -1
while max_iters is None or it < max_iters:
# Worst object in collection and its weight (= volume * likelihood)
worst = np.argmin(active_logl)
logwt = logvol + active_logl[worst]
# Update evidence Z and information h.
logz_new = np.logaddexp(logz, logwt)
h = (exp(logwt - logz_new) * active_logl[worst] + exp(logz - logz_new) * (h + logz) - logz_new)
logz = logz_new
# Add worst object to samples.
saved_u.append(np.array(active_u[worst]))
saved_v.append(np.array(active_v[worst]))
saved_logwt.append(logwt)
saved_logl.append(active_logl[worst])
# expected_vol = np.exp(-it / self.num_live_points)
# The new likelihood constraint is that of the worst object.
loglstar = active_logl[worst]
if ncall > next_update_interval_ncall and it > next_update_interval_iter:
if first_time:
nextregion = region
else:
# rebuild space
# print()
# print("rebuilding space...", active_u.shape, active_u)
nextTransformLayer = transformLayer.create_new(active_u, region.maxradiussq)
nextregion = MLFriends(active_u, nextTransformLayer)
# print("computing maxradius...")
r, f = _update_region_bootstrap(nextregion, 30, 0., self.comm if self.use_mpi else None, self.mpi_size)
nextregion.maxradiussq = r
nextregion.enlarge = f
# force shrinkage of volume
# this is to avoid re-connection of dying out nodes
if nextregion.estimate_volume() < region.estimate_volume():
region = nextregion
transformLayer = region.transformLayer
region.create_ellipsoid(minvol=exp(-it / self.num_live_points) * self.volfactor)
if self.log:
viz_callback(
points=dict(u=active_u, p=active_v, logl=active_logl),
info=dict(
it=it, ncall=ncall, logz=logz, logz_remain=logz_remain,
paramnames=self.paramnames + self.derivedparamnames,
logvol=logvol),
region=region, transformLayer=transformLayer)
self.pointstore.flush()
next_update_interval_ncall = ncall + update_interval_ncall
next_update_interval_iter = it + update_interval_iter
first_time = False
while True:
if ib >= len(samples) and use_point_stack:
# root checks the point store
next_point = np.zeros((1, 3 + self.x_dim + self.num_params))
if self.log_to_disk:
_, stored_point = self.pointstore.pop(loglstar)
if stored_point is not None:
next_point[0,:] = stored_point
else:
next_point[0,:] = -np.inf
use_point_stack = not self.pointstore.stack_empty
if self.use_mpi: # and informs everyone
use_point_stack = self.comm.bcast(use_point_stack, root=0)
next_point = self.comm.bcast(next_point, root=0)
# assert not use_point_stack
# unpack
likes = next_point[:,1]
samples = next_point[:,3:3 + self.x_dim]
samplesv = next_point[:,3 + self.x_dim:3 + self.x_dim + self.num_params]
# skip if we already know it is not useful
ib = 0 if np.isfinite(likes[0]) else 1
while ib >= len(samples):
# get new samples
ib = 0
nc = 0
u = region.sample(nsamples=ndraw)
nu = u.shape[0]
if nu == 0:
v = np.empty((0, self.x_dim))
logl = np.empty((0,))
else:
v = self.transform(u)
logl = self.loglike(v)
nc += nu
accepted = logl > loglstar
u = u[accepted,:]
v = v[accepted,:]
logl = logl[accepted]
# father = father[accepted]
# collect results from all MPI members
if self.use_mpi:
recv_samples = self.comm.gather(u, root=0)
recv_samplesv = self.comm.gather(v, root=0)
recv_likes = self.comm.gather(logl, root=0)
recv_nc = self.comm.gather(nc, root=0)
recv_samples = self.comm.bcast(recv_samples, root=0)
recv_samplesv = self.comm.bcast(recv_samplesv, root=0)
recv_likes = self.comm.bcast(recv_likes, root=0)
recv_nc = self.comm.bcast(recv_nc, root=0)
samples = np.concatenate(recv_samples, axis=0)
samplesv = np.concatenate(recv_samplesv, axis=0)
likes = np.concatenate(recv_likes, axis=0)
ncall += sum(recv_nc)
else:
samples = np.array(u)
samplesv = np.array(v)
likes = np.array(logl)
ncall += nc
if self.log:
for ui, vi, logli in zip(samples, samplesv, likes):
self.pointstore.add(
_listify([loglstar, logli, 0.0], ui, vi),
ncall)
if likes[ib] > loglstar:
active_u[worst] = samples[ib, :]
active_v[worst] = samplesv[ib,:]
active_logl[worst] = likes[ib]
# if we keep the region informed about the new live points
# then the region follows the live points even if maxradius is not updated
region.u[worst,:] = active_u[worst]
region.unormed[worst,:] = region.transformLayer.transform(region.u[worst,:])
# if we track the cluster assignment, then in the next round
# the ids with the same members are likely to have the same id
# this is imperfect
# transformLayer.clusterids[worst] = transformLayer.clusterids[father[ib]]
# so we just mark the replaced ones as "unassigned"
transformLayer.clusterids[worst] = 0
ib = ib + 1
break
else:
ib = ib + 1
# Shrink interval
logvol -= 1.0 / self.num_live_points
logz_remain = np.max(active_logl) - it / self.num_live_points
fraction_remain = np.logaddexp(logz, logz_remain) - logz
if it % log_interval == 0 and self.log:
# nicelogger(self.paramnames, active_u, active_v, active_logl, it, ncall, logz, logz_remain, region=region)
sys.stdout.write('Z=%.1g+%.1g | Like=%.1g..%.1g | it/evals=%d/%d eff=%.4f%% \r' % (
logz, logz_remain, loglstar, np.max(active_logl), it,
ncall, np.inf if ncall == 0 else it * 100 / ncall))
sys.stdout.flush()
# if efficiency becomes low, bulk-process larger arrays
ndraw = max(128, min(16384, round((ncall + 1) / (it + 1) / self.mpi_size)))
# Stopping criterion
if fraction_remain < dlogz:
break
it = it + 1
logvol = -len(saved_v) / self.num_live_points - log(self.num_live_points)
for i in range(self.num_live_points):
logwt = logvol + active_logl[i]
logz_new = np.logaddexp(logz, logwt)
h = (exp(logwt - logz_new) * active_logl[i] + exp(logz - logz_new) * (h + logz) - logz_new)
logz = logz_new
saved_u.append(np.array(active_u[i]))
saved_v.append(np.array(active_v[i]))
saved_logwt.append(logwt)
saved_logl.append(active_logl[i])
saved_u = np.array(saved_u)
saved_v = np.array(saved_v)
saved_wt = exp(np.array(saved_logwt) - logz)
saved_logl = np.array(saved_logl)
logzerr = np.sqrt(h / self.num_live_points)
if self.log_to_disk:
with open(os.path.join(self.logs['results'], 'final.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['niter', 'ncall', 'logz', 'logzerr', 'h'])
writer.writerow([it + 1, ncall, logz, logzerr, h])
self.pointstore.close()
if not self.use_mpi or self.mpi_rank == 0:
print()
print("niter: {:d}\n ncall: {:d}\n nsamples: {:d}\n logz: {:6.3f} +/- {:6.3f}\n h: {:6.3f}"
.format(it + 1, ncall, len(saved_v), logz, logzerr, h))
self.results = dict(
samples=resample_equal(saved_v, saved_wt / saved_wt.sum()),
ncall=ncall, niter=it, logz=logz, logzerr=logzerr,
weighted_samples=dict(
upoints=saved_u, points=saved_v, weights=saved_wt,
logweights=saved_logwt, logl=saved_logl),
)
return self.results
def print_results(self):
"""Give summary of marginal likelihood and parameters."""
print()
print('logZ = %(logz).3f +- %(logzerr).3f' % self.results)
print()
for i, p in enumerate(self.paramnames + self.derivedparamnames):
v = self.results['samples'][:,i]
sigma = v.std()
med = v.mean()
if sigma == 0:
i = 3
else:
i = max(0, int(-np.floor(np.log10(sigma))) + 1)
fmt = '%%.%df' % i
fmts = '\t'.join([' %-20s' + fmt + " +- " + fmt])
print(fmts % (p, med, sigma))
def plot(self):
"""Make corner plot."""
if self.log_to_disk:
import matplotlib.pyplot as plt
import corner
data = np.array(self.results['weighted_samples']['points'])
weights = np.array(self.results['weighted_samples']['weights'])
cumsumweights = np.cumsum(weights)
mask = cumsumweights > 1e-4
corner.corner(
data[mask,:], weights=weights[mask],
labels=self.paramnames + self.derivedparamnames,
show_titles=True)
plt.savefig(os.path.join(self.logs['plots'], 'corner.pdf'), bbox_inches='tight')
plt.close()
def warmstart_from_similar_file(
usample_filename,
param_names,
loglike,
transform,
vectorized=False,
min_num_samples=50
):
"""Warmstart from a previous run.
Usage::
aux_paramnames, aux_log_likelihood, aux_prior_transform, vectorized = warmstart_from_similar_file(
'model1/chains/weighted_post_untransformed.txt', parameters, log_likelihood_with_background, prior_transform)
aux_sampler = ReactiveNestedSampler(aux_paramnames, aux_log_likelihood, transform=aux_prior_transform,vectorized=vectorized)
aux_sampler.run()
posterior_samples = aux_results['samples'][:,-1]
See :py:func:`ultranest.hotstart.get_auxiliary_contbox_parameterization`
for more information.
The remaining parameters have the same meaning as in :py:class:`ReactiveNestedSampler`.
Parameters
------------
usample_filename: str
'directory/chains/weighted_post_untransformed.txt'
contains posteriors in u-space (untransformed) of a previous run.
Columns are weight, logl, param1, param2, ...
min_num_samples: int
minimum number of samples in the usample_filename file required.
Too few samples will give a poor approximation.
Other Parameters
-----------------
param_names: list
loglike: function
transform: function
vectorized: bool
Returns
---------
aux_param_names: list
new parameter list
aux_loglikelihood: function
new loglikelihood function
aux_transform: function
new prior transform function
vectorized: bool
whether the new functions are vectorized
"""
# load samples
try:
with open(usample_filename) as f:
old_param_names = f.readline().lstrip('#').strip().split()
auxiliary_usamples = np.loadtxt(f)
except IOError:
warnings.warn('not hot-resuming, could not load file "%s"' % usample_filename)
return param_names, loglike, transform, vectorized
ulogl = auxiliary_usamples[:,1]
uweights_full = auxiliary_usamples[:,0] * np.exp(ulogl - ulogl.max())
mask = uweights_full > 0
uweights = uweights_full[mask]
uweights /= uweights.sum()
upoints = auxiliary_usamples[mask,2:]
del auxiliary_usamples
nsamples = len(upoints)
if nsamples < min_num_samples:
raise ValueError('file "%s" has too few samples (%d) to hot-resume' % (usample_filename, nsamples))
# check that the parameter meanings have not changed
if old_param_names != ['weight', 'logl'] + param_names:
raise ValueError('file "%s" has parameters %s, expected %s, cannot hot-resume.' % (usample_filename, old_param_names, param_names))
return get_auxiliary_contbox_parameterization(
param_names, loglike=loglike, transform=transform,
vectorized=vectorized,
upoints=upoints,
uweights=uweights,
)
class ReactiveNestedSampler(object):
"""Nested sampler with reactive exploration strategy.
Storage & resume capable, optionally MPI parallelised.
"""
def __init__(self,
param_names,
loglike,
transform=None,
derived_param_names=[],
wrapped_params=None,
resume='subfolder',
run_num=None,
log_dir=None,
num_test_samples=2,
draw_multiple=True,
num_bootstraps=30,
vectorized=False,
ndraw_min=128,
ndraw_max=65536,
storage_backend='hdf5',
warmstart_max_tau=-1,
):
"""Initialise nested sampler.
Parameters
-----------
param_names: list of str, names of the parameters.
Length gives dimensionality of the sampling problem.
loglike: function
log-likelihood function.
Receives multiple parameter vectors, returns vector of likelihood.
transform: function
parameter transform from unit cube to physical parameters.
Receives multiple cube vectors, returns multiple parameter vectors.
derived_param_names: list of str
Additional derived parameters created by transform. (empty by default)
log_dir: str
where to store output files
resume: 'resume', 'resume-similar', 'overwrite' or 'subfolder'
if 'overwrite', overwrite previous data.
if 'subfolder', create a fresh subdirectory in log_dir.
if 'resume' or True, continue previous run if available.
Only works when dimensionality, transform or likelihood are consistent.
if 'resume-similar', continue previous run if available.
Only works when dimensionality and transform are consistent.
If a likelihood difference is detected, the existing likelihoods
are updated until the live point order differs.
Otherwise, behaves like resume.
run_num: int or None
If resume=='subfolder', this is the subfolder number.
Automatically increments if set to None.
wrapped_params: list of bools
indicating whether this parameter wraps around (circular parameter).
num_test_samples: int
test transform and likelihood with this number of
random points for errors first. Useful to catch bugs.
vectorized: bool
If true, loglike and transform function can receive arrays
of points.
draw_multiple: bool
If efficiency goes down, dynamically draw more points
from the region between `ndraw_min` and `ndraw_max`.
If set to False, few points are sampled at once.
ndraw_min: int
Minimum number of points to simultaneously propose.
Increase this if your likelihood makes vectorization very cheap.
ndraw_max: int
Maximum number of points to simultaneously propose.
Increase this if your likelihood makes vectorization very cheap.
Memory allocation may be slow for extremely high values.
num_bootstraps: int
number of logZ estimators and MLFriends region
bootstrap rounds.
storage_backend: str or class
Class to use for storing the evaluated points (see ultranest.store)
'hdf5' is strongly recommended. 'tsv' and 'csv' are also possible.
warmstart_max_tau: float
Maximum disorder to accept when resume='resume-similar';
Live points are reused as long as the live point order
is below this normalised Kendall tau distance.
Values from 0 (highly conservative) to 1 (extremely negligent).
"""
self.paramnames = param_names
x_dim = len(self.paramnames)
self.sampler = 'reactive-nested'
self.x_dim = x_dim
self.transform_layer_class = AffineLayer if x_dim > 1 else ScalingLayer
self.derivedparamnames = derived_param_names
self.num_bootstraps = int(num_bootstraps)
num_derived = len(self.derivedparamnames)
self.num_params = x_dim + num_derived
if wrapped_params is None:
self.wrapped_axes = []
else:
assert len(wrapped_params) == self.x_dim, ("wrapped_params has the number of entries:", wrapped_params, ", expected", self.x_dim)
self.wrapped_axes = np.where(wrapped_params)[0]
self.use_mpi = False
try:
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.mpi_size = self.comm.Get_size()
self.mpi_rank = self.comm.Get_rank()
if self.mpi_size > 1:
self.use_mpi = True
self._setup_distributed_seeds()
except Exception:
self.mpi_size = 1
self.mpi_rank = 0
self.log = self.mpi_rank == 0
self.log_to_disk = self.log and log_dir is not None
self.log_to_pointstore = self.log_to_disk
assert resume in (True, 'overwrite', 'subfolder', 'resume', 'resume-similar'), \
"resume should be one of 'overwrite' 'subfolder', 'resume' or 'resume-similar'"
append_run_num = resume == 'subfolder'
resume_similar = resume == 'resume-similar'
resume = resume in ('resume-similar', 'resume', True)
if self.log and log_dir is not None:
self.logs = make_run_dir(log_dir, run_num, append_run_num=append_run_num)
log_dir = self.logs['run_dir']
else:
log_dir = None
if self.log:
self.logger = create_logger('ultranest', log_dir=log_dir)
self.logger.debug('ReactiveNestedSampler: dims=%d+%d, resume=%s, log_dir=%s, backend=%s, vectorized=%s, nbootstraps=%s, ndraw=%s..%s' % (
x_dim, num_derived, resume, log_dir, storage_backend, vectorized,
num_bootstraps, ndraw_min, ndraw_max,
))
self.root = TreeNode(id=-1, value=-np.inf)
self.pointpile = PointPile(self.x_dim, self.num_params)
if self.log_to_pointstore:
storage_filename = os.path.join(self.logs['results'], 'points.' + storage_backend)
storage_num_cols = 3 + self.x_dim + self.num_params
if storage_backend == 'tsv':
self.pointstore = TextPointStore(storage_filename, storage_num_cols)
self.pointstore.delimiter = '\n'
elif storage_backend == 'csv':
self.pointstore = TextPointStore(storage_filename, storage_num_cols)
self.pointstore.delimiter = ','
elif storage_backend == 'hdf5':
self.pointstore = HDF5PointStore(storage_filename, storage_num_cols, mode='a' if resume else 'w')
else:
# use custom backend
self.pointstore = storage_backend
else:
self.pointstore = NullPointStore(3 + self.x_dim + self.num_params)
self.ncall = self.pointstore.ncalls
self.ncall_region = 0
if not vectorized:
if transform is not None:
transform = vectorize(transform)
loglike = vectorize(loglike)
draw_multiple = False
self.draw_multiple = draw_multiple
self.ndraw_min = ndraw_min
self.ndraw_max = ndraw_max
self.build_tregion = transform is not None
if not self._check_likelihood_function(transform, loglike, num_test_samples):
assert self.log_to_disk
if resume_similar and self.log_to_disk:
assert storage_backend == 'hdf5', 'resume-similar is only supported for HDF5 files'
assert 0 <= warmstart_max_tau <= 1, 'warmstart_max_tau parameter needs to be set to a value between 0 and 1'
# close
self.pointstore.close()
del self.pointstore
# rewrite points file
if self.log:
self.logger.info('trying to salvage points from previous, different run ...')
resume_from_similar_file(
log_dir, x_dim, loglike, transform,
ndraw=ndraw_min if vectorized else 1,
max_tau=warmstart_max_tau, verbose=False)
self.pointstore = HDF5PointStore(
os.path.join(self.logs['results'], 'points.hdf5'),
3 + self.x_dim + self.num_params, mode='a' if resume else 'w')
elif resume:
raise Exception("Cannot resume because loglikelihood function changed, "
"unless resume=resume-similar. To start from scratch, delete '%s'." % (log_dir))
self._set_likelihood_function(transform, loglike, num_test_samples)
self.stepsampler = None
def _setup_distributed_seeds(self):
if not self.use_mpi:
return
seed = 0
if self.mpi_rank == 0:
seed = np.random.randint(0, 1000000)
seed = self.comm.bcast(seed, root=0)
if self.mpi_rank > 0:
# from http://arxiv.org/abs/1005.4117
seed = int(abs(((seed * 181) * ((self.mpi_rank - 83) * 359)) % 104729))
# print('setting seed:', self.mpi_rank, seed)
np.random.seed(seed)
def _check_likelihood_function(self, transform, loglike, num_test_samples):
"""Test the `transform` and `loglike`lihood functions.
`num_test_samples` samples are used to check whether they work and give the correct output.
returns whether the most recently stored point (if any)
still returns the same likelihood value.
"""
# do some checks on the likelihood function
# this makes debugging easier by failing early with meaningful errors
# if we are resuming, check that last sample still gives same result
num_resume_test_samples = 0
if num_test_samples and not self.pointstore.stack_empty:
num_resume_test_samples = 1
num_test_samples -= 1
if num_test_samples > 0:
# test with num_test_samples random points
u = np.random.uniform(size=(num_test_samples, self.x_dim))
p = transform(u) if transform is not None else u
assert np.shape(p) == (num_test_samples, self.num_params), (
"Error in transform function: returned shape is %s, expected %s" % (
np.shape(p), (num_test_samples, self.num_params)))
logl = loglike(p)
assert np.logical_and(u > 0, u < 1).all(), (
"Error in transform function: u was modified!")
assert np.shape(logl) == (num_test_samples,), (
"Error in loglikelihood function: returned shape is %s, expected %s" % (np.shape(logl), (num_test_samples,)))
assert np.isfinite(logl).all(), (
"Error in loglikelihood function: returned non-finite number: %s for input u=%s p=%s" % (logl, u, p))
if not self.pointstore.stack_empty and num_resume_test_samples > 0:
# test that last sample gives the same likelihood value
_, lastrow = self.pointstore.stack[-1]
assert len(lastrow) == 3 + self.x_dim + self.num_params, (
"Cannot resume: problem has different dimensionality",
len(lastrow), (2, self.x_dim, self.num_params))
lastL = lastrow[1]
lastu = lastrow[3:3 + self.x_dim]
u = lastu.reshape((1, -1))
lastp = lastrow[3 + self.x_dim:3 + self.x_dim + self.num_params]
if self.log:
self.logger.debug("Testing resume consistency: %s: u=%s -> p=%s -> L=%s ", lastrow, lastu, lastp, lastL)
p = transform(u) if transform is not None else u
if not np.allclose(p.flatten(), lastp) and self.log:
self.logger.warning(
"Trying to resume from previous run, but transform function gives different result: %s gave %s, now %s",
lastu, lastp, p.flatten())
assert np.allclose(p.flatten(), lastp), (
"Cannot resume because transform function changed. "
"To start from scratch, delete '%s'." % (self.logs['run_dir']))
logl = loglike(p).flatten()[0]
if not np.isclose(logl, lastL) and self.log:
self.logger.warning(
"Trying to resume from previous run, but likelihood function gives different result: %s gave %s, now %s",
lastu.flatten(), lastL, logl)
return np.isclose(logl, lastL)
return True
def _set_likelihood_function(self, transform, loglike, num_test_samples, make_safe=False):
"""Store the transform and log-likelihood functions.
if make_safe is set, make functions safer by accepting misformed
return shapes and non-finite likelihood values.
"""
def safe_loglike(x):
"""Safe wrapper of likelihood function."""
x = np.asarray(x)
if len(x.shape) == 1:
assert x.shape[0] == self.x_dim
x = np.expand_dims(x, 0)
logl = loglike(x)
if len(logl.shape) == 0:
logl = np.expand_dims(logl, 0)
logl[np.logical_not(np.isfinite(logl))] = -1e100
return logl
if make_safe:
self.loglike = safe_loglike
else:
self.loglike = loglike
if transform is None:
self.transform = lambda x: x
elif make_safe:
def safe_transform(x):
"""Safe wrapper of transform function."""
x = np.asarray(x)
if len(x.shape) == 1:
assert x.shape[0] == self.x_dim
x = np.expand_dims(x, 0)
return transform(x)
self.transform = safe_transform
else:
self.transform = transform
lims = np.ones((2, self.x_dim))
lims[0,:] = 1e-6
lims[1,:] = 1 - 1e-6
self.transform_limits = self.transform(lims).transpose()
self.volfactor = vol_prefactor(self.x_dim)
def _widen_nodes(self, weighted_parents, weights, nnodes_needed, update_interval_ncall):
"""Ensure that at parents have `nnodes_needed` live points (parallel arcs).
If not, fill up by sampling.
"""
ndone = len(weighted_parents)
if ndone == 0:
if self.log:
self.logger.info('No parents, so widening roots')
self._widen_roots(nnodes_needed)
return {}
# select parents with weight 1/parent_weights
p = 1. / np.array(weights)
if (p == p[0]).all():
parents = weighted_parents
else:
# preferentially select nodes with few parents, as those
# have most weight
i = np.random.choice(len(weighted_parents), size=nnodes_needed, p=p / p.sum())
if self.use_mpi:
i = self.comm.bcast(i, root=0)
parents = [weighted_parents[ii] for ii in i]
del weighted_parents, weights
# sort from low to high
parents.sort(key=operator.attrgetter('value'))
Lmin = parents[0].value
if np.isinf(Lmin):
# some of the parents were born by sampling from the entire
# prior volume. So we can efficiently apply a solution:
# expand the roots
if self.log:
self.logger.info('parent value is -inf, so widening roots')
self._widen_roots(nnodes_needed)
return {}
# double until we reach the necessary points
# this is probably 1, from (2K - K) / K
nsamples = int(np.ceil((nnodes_needed - ndone) / len(parents)))
if self.log:
self.logger.info('Will add %d live points (x%d) at L=%.1g ...', nnodes_needed - ndone, nsamples, Lmin)
# add points where necessary (parents can have multiple entries)
target_min_num_children = {}
for n in parents:
orign = target_min_num_children.get(n.id, len(n.children))
target_min_num_children[n.id] = orign + nsamples
return target_min_num_children
def _widen_roots_beyond_initial_plateau(self, nroots, num_warn, num_stop):
"""Widen roots, but populate ahead of initial plateau.
calls _widen_roots, and if there are several points with the same
value equal to the lowest loglikelihood, widens some more until
there are `nroots`-1 that are different to the lowest
loglikelihood value.
Parameters
-----------
nroots: int
Number of root live points, after the plateau is traversed.
num_warn: int
Warn if the number of root live points reached this.
num_stop: int
Do not increasing the number of root live points beyond this limit.
"""
nroots_needed = nroots
user_has_been_warned = False
while True:
self._widen_roots(nroots_needed)
Ls = np.array([node.value for node in self.root.children])
Lmin = np.min(Ls)
if self.log and nroots_needed > num_warn and not user_has_been_warned:
self.logger.warn("""Warning: The log-likelihood has a large plateau with L=%g.
Probably you are returning a low value when the parameters are problematic/unphysical.
ultranest can handle this correctly, by discarding live points with the same loglikelihood.
(arxiv:2005.08602 arxiv:2010.13884). To mitigate running out of live points,
the initial number of live points is increased. But now this has reached over %d points.
You can avoid this making the loglikelihood increase towards where the good region is.
For example, let's say you have two parameters where the sum must be below 1. Replace this:
if params[0] + params[1] > 1:
return -1e300
with:
if params[0] + params[1] > 1:
return -1e300 * (params[0] + params[1])
The current strategy will continue until %d live points are reached.
It is safe to ignore this warning.""", Lmin, num_warn, num_stop)
user_has_been_warned = True
if nroots_needed >= num_stop:
break
P = (Ls == Lmin).sum()
if 1 < P < len(Ls) and len(Ls) - P + 1 < nroots:
# guess the number of points needed: P-1 are useless
self.logger.debug(
'Found plateau of %d/%d initial points at L=%g. '
'Avoid this by a continuously increasing loglikelihood towards good regions.',
P, nroots_needed, Lmin)
nroots_needed = min(num_stop, nroots_needed + (P - 1))
else:
break
def _widen_roots(self, nroots):
"""Ensure root has `nroots` children.
Sample from prior to fill up (if needed).
Parameters
-----------
nroots: int
Number of root live points, after the plateau is traversed.
"""
if self.log and len(self.root.children) > 0:
self.logger.info('Widening roots to %d live points (have %d already) ...', nroots, len(self.root.children))
nnewroots = nroots - len(self.root.children)
if nnewroots <= 0:
# nothing to do
return
prev_u = []
prev_v = []
prev_logl = []
prev_rowid = []
if self.log and self.use_point_stack:
# try to resume:
# self.logger.info('Resuming...')
for i in range(nnewroots):
rowid, row = self.pointstore.pop(-np.inf)
if row is None:
break
prev_logl.append(row[1])
prev_u.append(row[3:3 + self.x_dim])
prev_v.append(row[3 + self.x_dim:3 + self.x_dim + self.num_params])
prev_rowid.append(rowid)
if self.log:
prev_u = np.array(prev_u)
prev_v = np.array(prev_v)
prev_logl = np.array(prev_logl)
num_live_points_missing = nnewroots - len(prev_logl)
else:
num_live_points_missing = -1
if self.use_mpi:
num_live_points_missing = self.comm.bcast(num_live_points_missing, root=0)
prev_u = self.comm.bcast(prev_u, root=0)
prev_v = self.comm.bcast(prev_v, root=0)
prev_logl = self.comm.bcast(prev_logl, root=0)
assert num_live_points_missing >= 0
if self.log and num_live_points_missing > 0:
self.logger.info('Sampling %d live points from prior ...', num_live_points_missing)
if num_live_points_missing > 0:
num_live_points_todo = distributed_work_chunk_size(num_live_points_missing, self.mpi_rank, self.mpi_size)
self.ncall += num_live_points_missing
if num_live_points_todo > 0:
active_u = np.random.uniform(size=(num_live_points_todo, self.x_dim))
active_v = self.transform(active_u)
active_logl = self.loglike(active_v)
else:
active_u = np.empty((0, self.x_dim))
active_v = np.empty((0, self.num_params))
active_logl = np.empty((0,))
if self.use_mpi:
recv_samples = self.comm.gather(active_u, root=0)
recv_samplesv = self.comm.gather(active_v, root=0)
recv_likes = self.comm.gather(active_logl, root=0)
recv_samples = self.comm.bcast(recv_samples, root=0)
recv_samplesv = self.comm.bcast(recv_samplesv, root=0)
recv_likes = self.comm.bcast(recv_likes, root=0)
active_u = np.concatenate(recv_samples, axis=0)
active_v = np.concatenate(recv_samplesv, axis=0)
active_logl = np.concatenate(recv_likes, axis=0)
assert active_logl.shape == (num_live_points_missing,), (active_logl.shape, num_live_points_missing)
if self.log_to_pointstore:
for i in range(num_live_points_missing):
rowid = self.pointstore.add(_listify(
[-np.inf, active_logl[i], 0.0],
active_u[i,:],
active_v[i,:]), 1)
if len(prev_u) > 0:
active_u = np.concatenate((prev_u, active_u))
active_v = np.concatenate((prev_v, active_v))
active_logl = np.concatenate((prev_logl, active_logl))
assert active_u.shape == (nnewroots, self.x_dim), (active_u.shape, nnewroots, self.x_dim, num_live_points_missing, len(prev_u))
assert active_v.shape == (nnewroots, self.num_params), (active_v.shape, nnewroots, self.num_params, num_live_points_missing, len(prev_u))
assert active_logl.shape == (nnewroots,), (active_logl.shape, nnewroots)
else:
active_u = prev_u
active_v = prev_v
active_logl = prev_logl
roots = [self.pointpile.make_node(logl, u, p) for u, p, logl in zip(active_u, active_v, active_logl)]
if len(active_u) > 4:
self.build_tregion = not is_affine_transform(active_u, active_v)
self.root.children += roots
def _adaptive_strategy_advice(self, Lmin, parallel_values, main_iterator, minimal_widths, frac_remain, Lepsilon):
"""Check if integration is done.
Returns range where more sampling is needed
Returns
--------
Llo: float
lower log-likelihood bound, nan if done
Lhi: float
lower log-likelihood bound, nan if done
Parameters
-----------
Lmin: float
current loglikelihood threshold
parallel_values: array of floats
loglikelihoods of live points
main_iterator: BreadthFirstIterator
current tree exploration iterator
minimal_widths: list
current width required
frac_remain: float
maximum fraction of integral in remainder for termination
Lepsilon: float
loglikelihood accuracy threshold
"""
Ls = parallel_values.copy()
Ls.sort()
# Ls = [node.value] + [n.value for rootid2, n in parallel_nodes]
Lmax = Ls[-1]
Lmin = Ls[0]
# all points the same, stop
if Lmax - Lmin < Lepsilon:
return np.nan, np.nan
# max remainder contribution is Lmax + weight, to be added to main_iterator.logZ
# the likelihood that would add an equal amount as main_iterator.logZ is:
logZmax = main_iterator.logZremain
Lnext = logZmax - (main_iterator.logVolremaining + log(frac_remain)) - log(len(Ls))
L1 = Ls[1] if len(Ls) > 1 else Ls[0]
Lmax1 = np.median(Ls)
Lnext = max(min(Lnext, Lmax1), L1)
# if the remainder dominates, return that range
if main_iterator.logZremain > main_iterator.logZ:
return Lmin, Lnext
if main_iterator.remainder_fraction > frac_remain:
return Lmin, Lnext
return np.nan, np.nan
def _find_strategy(self, saved_logl, main_iterator, dlogz, dKL, min_ess):
"""Ask each strategy which log-likelihood interval needs more exploration.
Returns
-------
(Llo_Z, Lhi_Z): floats
interval where dlogz strategy requires more samples.
(Llo_KL, Lhi_KL): floats
interval where posterior uncertainty strategy requires more samples.
(Llo_ess, Lhi_ess): floats
interval where effective sample strategy requires more samples.
Parameters
----------
saved_logl: array of float
loglikelihood values in integration
main_iterator: BreadthFirstIterator
current tree exploration iterator
dlogz: float
required logZ accuracy (smaller is stricter)
dKL: float
required Kulback-Leibler information gain between bootstrapped
nested sampling incarnations (smaller is stricter).
min_ess: float
required number of effective samples (higher is stricter).
"""
saved_logl = np.asarray(saved_logl)
logw = np.asarray(main_iterator.logweights) + saved_logl.reshape((-1,1)) - main_iterator.all_logZ
ref_logw = logw[:,0].reshape((-1,1))
other_logw = logw[:,1:]
Llo_ess = np.inf
Lhi_ess = -np.inf
w = exp(ref_logw.flatten())
w /= w.sum()
ess = len(w) / (1.0 + ((len(w) * w - 1)**2).sum() / len(w))
if ess < min_ess:
samples = np.random.choice(len(w), p=w, size=min_ess)
Llo_ess = saved_logl[samples].min()
Lhi_ess = saved_logl[samples].max()
if self.log and Lhi_ess > Llo_ess:
self.logger.info("Effective samples strategy wants to improve: %.2f..%.2f (ESS = %.1f, need >%d)",
Llo_ess, Lhi_ess, ess, min_ess)
elif self.log and min_ess > 0:
self.logger.info("Effective samples strategy satisfied (ESS = %.1f, need >%d)",
ess, min_ess)
# compute KL divergence
with np.errstate(invalid='ignore'):
KL = np.where(np.isfinite(other_logw), exp(other_logw) * (other_logw - ref_logw), 0)
KLtot = KL.sum(axis=0)
dKLtot = np.abs(KLtot - KLtot.mean())
p = np.where(KL > 0, KL, 0)
p /= p.sum(axis=0).reshape((1, -1))
Llo_KL = np.inf
Lhi_KL = -np.inf
for i, (pi, dKLi, logwi) in enumerate(zip(p.transpose(), dKLtot, other_logw)):
if dKLi > dKL:
ilo, ihi = _get_cumsum_range(pi, 1. / 400)
# ilo and ihi are most likely missing in this iterator
# --> select the one before/after in this iterator
ilos = np.where(np.isfinite(logwi[:ilo]))[0]
ihis = np.where(np.isfinite(logwi[ihi:]))[0]
ilo2 = ilos[-1] if len(ilos) > 0 else 0
ihi2 = (ihi + ihis[0]) if len(ihis) > 0 else -1
# self.logger.info(' - KL[%d] = %.2f: need to improve near %.2f..%.2f --> %.2f..%.2f' % (
# i, dKLi, saved_logl[ilo], saved_logl[ihi], saved_logl[ilo2], saved_logl[ihi2]))
Llo_KL = min(Llo_KL, saved_logl[ilo2])
Lhi_KL = max(Lhi_KL, saved_logl[ihi2])
if self.log and Lhi_KL > Llo_KL:
self.logger.info("Posterior uncertainty strategy wants to improve: %.2f..%.2f (KL: %.2f+-%.2f nat, need <%.2f nat)",
Llo_KL, Lhi_KL, KLtot.mean(), dKLtot.max(), dKL)
elif self.log:
self.logger.info("Posterior uncertainty strategy is satisfied (KL: %.2f+-%.2f nat, need <%.2f nat)",
KLtot.mean(), dKLtot.max(), dKL)
Nlive_min = 0
p = exp(logw)
p /= p.sum(axis=0).reshape((1, -1))
deltalogZ = np.abs(main_iterator.all_logZ[1:] - main_iterator.logZ)
tail_fraction = w[np.asarray(main_iterator.istail)].sum() / w.sum()
logzerr_tail = logaddexp(log(tail_fraction) + main_iterator.logZ, main_iterator.logZ) - main_iterator.logZ
maxlogzerr = max(main_iterator.logZerr, deltalogZ.max(), main_iterator.logZerr_bs)
if maxlogzerr > dlogz:
if logzerr_tail > maxlogzerr:
if self.log:
self.logger.info("logz error is dominated by tail. Decrease frac_remain to make progress.")
# very convervative estimation using all iterations
# this punishes short intervals with many live points
niter_max = len(saved_logl)
Nlive_min = int(np.ceil(niter_max**0.5 / dlogz))
if self.log:
self.logger.debug(" conservative estimate says at least %d live points are needed to reach dlogz goal", Nlive_min)
# better estimation:
# get only until where logz bulk is (random sample here)
itmax = np.random.choice(len(w), p=w)
# back out nlive sequence (width changed by (1 - exp(-1/N))*(exp(-1/N)) )
logweights = np.array(main_iterator.logweights[:itmax])
with np.errstate(divide='ignore', invalid='ignore'):
widthratio = 1 - np.exp(logweights[1:,0] - logweights[:-1,0])
nlive = 1. / np.log((1 - np.sqrt(1 - 4 * widthratio)) / (2 * widthratio))
nlive[~(nlive > 1)] = 1
# build iteration groups
nlive_sets, niter = np.unique(nlive.astype(int), return_counts=True)
if self.log:
self.logger.debug(
" number of live points vary between %.0f and %.0f, most (%d/%d iterations) have %d",
nlive.min(), nlive.max(), niter.max(), itmax, nlive_sets[niter.argmax()])
for nlive_floor in nlive_sets:
# estimate error if this was the minimum nlive applied
nlive_adjusted = np.where(nlive_sets < nlive_floor, nlive_floor, nlive_sets)
deltalogZ_expected = (niter / nlive_adjusted**2.0).sum()**0.5
if deltalogZ_expected < dlogz:
# achievable with Nlive_min
Nlive_min = int(nlive_floor)
if self.log:
self.logger.debug(" at least %d live points are needed to reach dlogz goal", Nlive_min)
break
if self.log and Nlive_min > 0:
self.logger.info(
"Evidency uncertainty strategy wants %d minimum live points (dlogz from %.2f to %.2f, need <%s)",
Nlive_min, deltalogZ.mean(), deltalogZ.max(), dlogz)
elif self.log:
self.logger.info(
"Evidency uncertainty strategy is satisfied (dlogz=%.2f, need <%s)",
(main_iterator.logZerr_bs**2 + logzerr_tail**2)**0.5, dlogz)
if self.log:
self.logger.info(
' logZ error budget: single: %.2f bs:%.2f tail:%.2f total:%.2f required:<%.2f',
main_iterator.logZerr, main_iterator.logZerr_bs, logzerr_tail,
(main_iterator.logZerr_bs**2 + logzerr_tail**2)**0.5, dlogz)
return Nlive_min, (Llo_KL, Lhi_KL), (Llo_ess, Lhi_ess)
def _refill_samples(self, Lmin, ndraw, nit):
"""Get new samples from region."""
nc = 0
u = self.region.sample(nsamples=ndraw)
assert np.logical_and(u > 0, u < 1).all(), (u)
nu = u.shape[0]
if nu == 0:
v = np.empty((0, self.num_params))
logl = np.empty((0,))
accepted = np.empty(0, dtype=bool)
else:
if nu > 1 and not self.draw_multiple:
# peel off first if multiple evaluation is not supported
nu = 1
u = u[:1,:]
v = self.transform(u)
logl = np.ones(nu) * -np.inf
if self.tregion is not None:
# check wrapping ellipsoid in transformed space
accepted = self.tregion.inside(v)
nt = accepted.sum()
else:
# if undefined, all pass; rarer branch
accepted = np.ones(nu, dtype=bool)
nt = nu
if nt > 0:
logl[accepted] = self.loglike(v[accepted, :])
nc += nt
accepted = logl > Lmin
# print("it: %4d ndraw: %d -> %d -> %d -> %d " % (nit, ndraw, nu, nt, accepted.sum()))
if not self.sampling_slow_warned and nit * ndraw >= 100000 and nit > 20:
warning_message1 = ("Sampling from region seems inefficient (%d/%d accepted in iteration %d). " % (accepted.sum(), ndraw, nit))
warning_message2 = "To improve efficiency, modify the transformation so that the current live points%s are ellipsoidal, " + \
"or use a stepsampler, or set frac_remain to a lower number (e.g., 0.5) to terminate earlier."
if self.log_to_disk:
debug_filename = os.path.join(self.logs['extra'], 'sampling-stuck-it%d')
np.savez(
debug_filename + '.npz',
u=self.region.u, unormed=self.region.unormed,
maxradiussq=self.region.maxradiussq,
sample_u=u, sample_v=v, sample_logl=logl)
np.savetxt(debug_filename + '.csv', self.region.u, delimiter=',')
warning_message = warning_message1 + (warning_message2 % (' (stored for you in %s.csv)' % debug_filename))
else:
warning_message = warning_message1 + warning_message2 % ''
warnings.warn(warning_message)
logl_region = self.loglike(self.transform(self.region.u))
if (logl_region == Lmin).all():
raise ValueError(
"Region cannot sample a higher point. "
"All remaining live points have the same value.")
if not (logl_region > Lmin).any():
raise ValueError(
"Region cannot sample a higher point. "
"Perhaps you are resuming from a different problem?"
"Delete the output files and start again.")
self.sampling_slow_warned = True
self.ncall_region += ndraw
return u[accepted,:], v[accepted,:], logl[accepted], nc, 0
def _create_point(self, Lmin, ndraw, active_u, active_values):
"""Draw a new point above likelihood threshold `Lmin`.
Parameters
-----------
Lmin: float
loglikelihood threshold to draw above
ndraw: float
number of points to try to sample at once
active_u: array of floats
current live points
active_values: array
loglikelihoods of current live points
"""
if self.stepsampler is None:
assert self.region.inside(active_u).any(), \
("None of the live points satisfies the current region!",
self.region.maxradiussq, self.region.u, self.region.unormed, active_u,
getattr(self.region, 'bbox_lo'),
getattr(self.region, 'bbox_hi'),
getattr(self.region, 'ellipsoid_cov'),
getattr(self.region, 'ellipsoid_center'),
getattr(self.region, 'ellipsoid_invcov'),
getattr(self.region, 'ellipsoid_cov'),
)
nit = 0
while True:
ib = self.ib
if ib >= len(self.samples) and self.use_point_stack:
# root checks the point store
next_point = np.zeros((1, 3 + self.x_dim + self.num_params)) * np.nan
if self.log_to_pointstore:
_, stored_point = self.pointstore.pop(Lmin)
if stored_point is not None:
next_point[0,:] = stored_point
else:
next_point[0,:] = -np.inf
self.use_point_stack = not self.pointstore.stack_empty
if self.use_mpi: # and informs everyone
self.use_point_stack = self.comm.bcast(self.use_point_stack, root=0)
next_point = self.comm.bcast(next_point, root=0)
# unpack
self.likes = next_point[:,1]
self.samples = next_point[:,3:3 + self.x_dim]
self.samplesv = next_point[:,3 + self.x_dim:3 + self.x_dim + self.num_params]
# skip if we already know it is not useful
ib = 0 if np.isfinite(self.likes[0]) else 1
use_stepsampler = self.stepsampler is not None
while ib >= len(self.samples):
ib = 0
if use_stepsampler:
u, v, logl, nc = self.stepsampler.__next__(
self.region,
transform=self.transform, loglike=self.loglike,
Lmin=Lmin, us=active_u, Ls=active_values,
ndraw=ndraw, tregion=self.tregion)
quality = self.stepsampler.nsteps
else:
u, v, logl, nc, quality = self._refill_samples(Lmin, ndraw, nit)
nit += 1
if logl is None:
u = np.empty((0, self.x_dim))
v = np.empty((0, self.num_params))
logl = np.empty((0,))
elif u.ndim == 1:
assert np.logical_and(u > 0, u < 1).all(), (u)
u = u.reshape((1, self.x_dim))
v = v.reshape((1, self.num_params))
logl = logl.reshape((1,))
if self.use_mpi:
recv_samples = self.comm.gather(u, root=0)
recv_samplesv = self.comm.gather(v, root=0)
recv_likes = self.comm.gather(logl, root=0)
recv_nc = self.comm.gather(nc, root=0)
recv_samples = self.comm.bcast(recv_samples, root=0)
recv_samplesv = self.comm.bcast(recv_samplesv, root=0)
recv_likes = self.comm.bcast(recv_likes, root=0)
recv_nc = self.comm.bcast(recv_nc, root=0)
self.samples = np.concatenate(recv_samples, axis=0)
self.samplesv = np.concatenate(recv_samplesv, axis=0)
self.likes = np.concatenate(recv_likes, axis=0)
self.ncall += sum(recv_nc)
else:
self.samples = u
self.samplesv = v
self.likes = logl
self.ncall += nc
if self.log:
for ui, vi, logli in zip(self.samples, self.samplesv, self.likes):
self.pointstore.add(
_listify([Lmin, logli, quality], ui, vi),
self.ncall)
if self.likes[ib] > Lmin:
u = self.samples[ib, :]
assert np.logical_and(u > 0, u < 1).all(), (u)
p = self.samplesv[ib, :]
logl = self.likes[ib]
self.ib = ib + 1
return u, p, logl
else:
self.ib = ib + 1
def _update_region(
self, active_u, active_node_ids,
bootstrap_rootids=None, active_rootids=None,
nbootstraps=30, minvol=0., active_p=None
):
"""Build a new MLFriends region from `active_u`, and wrapping ellipsoid.
Both are safely built using bootstrapping, so that the
region can be used for sampling and rejecting points.
If MPI is enabled, this computation is parallelised.
If active_p is not None, a wrapping ellipsoid is built also
in the user-transformed parameter space.
Parameters
-----------
active_u: array of floats
current live points
active_node_ids: 2d array of ints
which bootstrap initialisation the points belong to.
active_rootids: 2d array of ints
roots active in each bootstrap initialisation
bootstrap_rootids: array of ints
bootstrap samples. if None, they are drawn fresh.
nbootstraps: int
number of bootstrap rounds
active_p: array of floats
current live points, in user-transformed space
minvol: float
expected current minimum volume of region.
Returns
--------
updated: bool
True if update was made, False if previous region remained.
"""
assert nbootstraps > 0
updated = False
if self.region is None:
# if self.log:
# self.logger.debug("building first region ...")
self.transformLayer = self.transform_layer_class(wrapped_dims=self.wrapped_axes)
self.transformLayer.optimize(active_u, active_u, minvol=minvol)
self.region = self.region_class(active_u, self.transformLayer)
self.region_nodes = active_node_ids.copy()
assert self.region.maxradiussq is None
_update_region_bootstrap(self.region, nbootstraps, minvol, self.comm if self.use_mpi else None, self.mpi_size)
self.region.create_ellipsoid(minvol=minvol)
# if self.log:
# self.logger.debug("building first region ... r=%e, f=%e" % (r, f))
updated = True
# verify correctness:
# self.region.create_ellipsoid(minvol=minvol)
# assert self.region.inside(active_u).all(), self.region.inside(active_u).mean()
assert self.transformLayer is not None
need_accept = False
if self.region.maxradiussq is None:
# we have been told that radius is currently invalid
# we need to bootstrap back to a valid state
# compute radius given current transformLayer
oldu = self.region.u
self.region.u = active_u
self.region_nodes = active_node_ids.copy()
self.region.set_transformLayer(self.transformLayer)
_update_region_bootstrap(self.region, nbootstraps, minvol, self.comm if self.use_mpi else None, self.mpi_size)
# print("made first region, r=%e" % (r))
# now that we have r, can do clustering
# but such reclustering would forget the cluster ids
# instead, track the clusters from before by matching manually
oldt = self.transformLayer.transform(oldu)
clusterids = np.zeros(len(active_u), dtype=int)
nnearby = np.empty(len(self.region.unormed), dtype=int)
for ci in np.unique(self.transformLayer.clusterids):
if ci == 0:
continue
# find points from that cluster
oldti = oldt[self.transformLayer.clusterids == ci]
# identify which new points are near this cluster
find_nearby(oldti, self.region.unormed, self.region.maxradiussq, nnearby)
mask = nnearby != 0
# assign the nearby ones to this cluster
# if they have not been set yet
# if they have, set them to -1
clusterids[mask] = np.where(clusterids[mask] == 0, ci, -1)
# clusters we are unsure about (double assignments) go unassigned
clusterids[clusterids == -1] = 0
# tell scaling layer the correct cluster information
self.transformLayer.clusterids = clusterids
# we want the clustering to repeat to remove remaining zeros
need_accept = (self.transformLayer.clusterids == 0).any()
updated = True
assert len(self.region.u) == len(self.transformLayer.clusterids)
# verify correctness:
self.region.create_ellipsoid(minvol=minvol)
# assert self.region.inside(active_u).all(), self.region.inside(active_u).mean()
assert len(self.region.u) == len(self.transformLayer.clusterids)
# rebuild space
with warnings.catch_warnings(), np.errstate(all='raise'):
try:
nextTransformLayer = self.transformLayer.create_new(active_u, self.region.maxradiussq, minvol=minvol)
assert not (nextTransformLayer.clusterids == 0).any()
_, cluster_sizes = np.unique(nextTransformLayer.clusterids, return_counts=True)
smallest_cluster = cluster_sizes.min()
if self.log and smallest_cluster == 1:
self.logger.debug(
"clustering found some stray points [need_accept=%s] %s",
need_accept,
np.unique(nextTransformLayer.clusterids, return_counts=True)
)
nextregion = self.region_class(active_u, nextTransformLayer)
assert np.isfinite(nextregion.unormed).all()
if not nextTransformLayer.nclusters < 20:
if self.log:
self.logger.info(
"Found a lot of clusters: %d (%d with >1 members)",
nextTransformLayer.nclusters, (cluster_sizes > 1).sum())
# if self.log:
# self.logger.info("computing maxradius...")
r, f = _update_region_bootstrap(nextregion, nbootstraps, minvol, self.comm if self.use_mpi else None, self.mpi_size)
# verify correctness:
nextregion.create_ellipsoid(minvol=minvol)
# check if live points are numerically colliding or linearly dependent
self.live_points_healthy = len(active_u) > self.x_dim and \
np.all(np.sum(active_u[1:] != active_u[0], axis=0) > self.x_dim) and \
np.linalg.matrix_rank(nextregion.ellipsoid_cov) == self.x_dim
assert (nextregion.u == active_u).all()
assert np.allclose(nextregion.unormed, nextregion.transformLayer.transform(active_u))
# assert nextregion.inside(active_u).all(),
# ("live points should live in new region, but only %.3f%% do." % (100 * nextregion.inside(active_u).mean()), active_u)
good_region = nextregion.inside(active_u).all()
# assert good_region
if not good_region and self.log:
self.logger.debug("Proposed region is inconsistent (maxr=%g,enlarge=%g) and will be skipped.", r, f)
# avoid cases where every point is its own cluster,
# and even the largest cluster has fewer than x_dim points
sensible_clustering = nextTransformLayer.nclusters < len(nextregion.u) \
and cluster_sizes.max() >= nextregion.u.shape[1]
# force shrinkage of volume. avoids reconnecting dying modes
if good_region and \
(need_accept or nextregion.estimate_volume() <= self.region.estimate_volume()) \
and sensible_clustering:
self.region = nextregion
self.transformLayer = self.region.transformLayer
self.region_nodes = active_node_ids.copy()
updated = True
assert not (self.transformLayer.clusterids == 0).any(), (self.transformLayer.clusterids, need_accept, updated)
except Warning:
if self.log:
self.logger.debug("not updating region", exc_info=True)
except FloatingPointError:
if self.log:
self.logger.debug("not updating region", exc_info=True)
except np.linalg.LinAlgError:
if self.log:
self.logger.debug("not updating region", exc_info=True)
assert len(self.region.u) == len(self.transformLayer.clusterids)
if active_p is None or not self.build_tregion:
self.tregion = None
else:
try:
with np.errstate(invalid='raise'):
tregion = WrappingEllipsoid(active_p)
f = tregion.compute_enlargement(
nbootstraps=max(1, nbootstraps // self.mpi_size))
if self.use_mpi:
recv_enlarge = self.comm.gather(f, root=0)
recv_enlarge = self.comm.bcast(recv_enlarge, root=0)
f = np.max(recv_enlarge)
tregion.enlarge = f
tregion.create_ellipsoid()
self.tregion = tregion
except FloatingPointError:
if self.log:
self.logger.debug("not updating t-ellipsoid", exc_info=True)
self.tregion = None
except np.linalg.LinAlgError:
if self.log:
self.logger.debug("not updating t-ellipsoid", exc_info=True)
self.tregion = None
return updated
def _expand_nodes_before(self, Lmin, nnodes_needed, update_interval_ncall):
"""Expand nodes before `Lmin` to have `nnodes_needed`.
Returns
--------
Llo: float
lowest parent sampled (-np.inf if sampling from root)
Lhi: float
Lmin
target_min_num_children: int
number of children that need to be maintained between Llo, Lhi
"""
self.pointstore.reset()
parents, weights = find_nodes_before(self.root, Lmin)
target_min_num_children = self._widen_nodes(parents, weights, nnodes_needed, update_interval_ncall)
if len(parents) == 0:
Llo = -np.inf
else:
Llo = min(n.value for n in parents)
Lhi = Lmin
return Llo, Lhi, target_min_num_children
def _should_node_be_expanded(
self, it, Llo, Lhi, minimal_widths_sequence, target_min_num_children,
node, parallel_values, max_ncalls, max_iters, live_points_healthy
):
"""Check if node needs new children.
Returns
-------
expand_node: bool
True if should sample a new point
based on this node (above its likelihood value Lmin).
Parameters
----------
it: int
current iteration
node: node
The node to consider
parallel_values: array of floats
loglikelihoods of live points
max_ncalls: int
maximum number of likelihood function calls allowed
max_iters: int
maximum number of nested sampling iteration allowed
Llo: float
lower loglikelihood bound for the strategy
Lhi: float
upper loglikelihood bound for the strategy
minimal_widths_sequence: list
list of likelihood intervals with minimum number of live points
target_min_num_children:
minimum number of live points currently targeted
live_points_healthy: bool
indicates whether the live points have become
linearly dependent (covariance not full rank)
or have attained the same exact value in some parameter.
"""
Lmin = node.value
nlive = len(parallel_values)
if not (Lmin <= Lhi and Llo <= Lhi):
return False
if not live_points_healthy:
if self.log:
self.logger.debug("not expanding, because live points are linearly dependent")
return False
# some reasons to stop:
if it > 0:
if max_ncalls is not None and self.ncall >= max_ncalls:
# print("not expanding, because above max_ncall")
return False
if max_iters is not None and it >= max_iters:
# print("not expanding, because above max_iters")
return False
# in a plateau, only shrink (Fowlie+2020)
if (Lmin == parallel_values).sum() > 1:
if self.log:
self.logger.debug("Plateau detected at L=%e, not replacing live point." % Lmin)
return False
expand_node = False
# we should continue to progress towards Lhi
while Lmin > minimal_widths_sequence[0][0]:
minimal_widths_sequence.pop(0)
# get currently desired width
if self.region is None:
minimal_width_clusters = 0
else:
# compute number of clusters with more than 1 element
_, cluster_sizes = np.unique(self.region.transformLayer.clusterids, return_counts=True)
nclusters = (cluster_sizes > 1).sum()
minimal_width_clusters = self.cluster_num_live_points * nclusters
minimal_width = max(minimal_widths_sequence[0][1], minimal_width_clusters)
# if already has children, no need to expand
# if we are wider than the width required
# we do not need to expand this one
# expand_node = len(node.children) == 0
# prefer 1 child, or the number required, if specified
nmin = target_min_num_children.get(node.id, 1) if target_min_num_children else 1
expand_node = len(node.children) < nmin
# print("not expanding, because we are quite wide", nlive, minimal_width, minimal_widths_sequence)
# but we have to expand the first iteration,
# otherwise the integrator never sets H
too_wide = nlive > minimal_width and it > 0
return expand_node and not too_wide
def run(
self,
update_interval_volume_fraction=0.8,
update_interval_ncall=None,
log_interval=None,
show_status=True,
viz_callback='auto',
dlogz=0.5,
dKL=0.5,
frac_remain=0.01,
Lepsilon=0.001,
min_ess=400,
max_iters=None,
max_ncalls=None,
max_num_improvement_loops=-1,
min_num_live_points=400,
cluster_num_live_points=40,
insertion_test_window=10,
insertion_test_zscore_threshold=4,
region_class=MLFriends,
widen_before_initial_plateau_num_warn=10000,
widen_before_initial_plateau_num_max=50000,
):
"""Run until target convergence criteria are fulfilled.
Parameters
----------
update_interval_volume_fraction: float
Update region when the volume shrunk by this amount.
update_interval_ncall: int
Update region after update_interval_ncall likelihood calls (not used).
log_interval: int
Update stdout status line every log_interval iterations
show_status: bool
show integration progress as a status line.
If no output desired, set to False.
viz_callback: function
callback function when region was rebuilt. Allows to
show current state of the live points.
See :py:func:`nicelogger` or :py:class:`LivePointsWidget`.
If no output desired, set to False.
dlogz: float
Target evidence uncertainty. This is the std
between bootstrapped logz integrators.
dKL: float
Target posterior uncertainty. This is the
Kullback-Leibler divergence in nat between bootstrapped integrators.
frac_remain: float
Integrate until this fraction of the integral is left in the remainder.
Set to a low number (1e-2 ... 1e-5) to make sure peaks are discovered.
Set to a higher number (0.5) if you know the posterior is simple.
Lepsilon: float
Terminate when live point likelihoods are all the same,
within Lepsilon tolerance. Increase this when your likelihood
function is inaccurate, to avoid unnecessary search.
min_ess: int
Target number of effective posterior samples.
max_iters: int
maximum number of integration iterations.
max_ncalls: int
stop after this many likelihood evaluations.
max_num_improvement_loops: int
run() tries to assess iteratively where more samples are needed.
This number limits the number of improvement loops.
min_num_live_points: int
minimum number of live points throughout the run
cluster_num_live_points: int
require at least this many live points per detected cluster
insertion_test_zscore_threshold: float
z-score used as a threshold for the insertion order test.
Set to infinity to disable.
insertion_test_window: int
Number of iterations after which the insertion order test is reset.
region_class: :py:class:`MLFriends` or :py:class:`RobustEllipsoidRegion` or :py:class:`SimpleRegion`
Whether to use MLFriends+ellipsoidal+tellipsoidal region (better for multi-modal problems)
or just ellipsoidal sampling (faster for high-dimensional, gaussian-like problems)
or a axis-aligned ellipsoid (fastest, to be combined with slice sampling).
widen_before_initial_plateau_num_warn: int
If a likelihood plateau is encountered, increase the number
of initial live points so that once the plateau is traversed,
*min_num_live_points* live points remain.
If the number exceeds *widen_before_initial_plateau_num_warn*,
a warning is raised.
widen_before_initial_plateau_num_max: int
If a likelihood plateau is encountered, increase the number
of initial live points so that once the plateau is traversed,
*min_num_live_points* live points remain, but not more than
*widen_before_initial_plateau_num_warn*.
"""
for result in self.run_iter(
update_interval_volume_fraction=update_interval_volume_fraction,
update_interval_ncall=update_interval_ncall,
log_interval=log_interval,
dlogz=dlogz, dKL=dKL,
Lepsilon=Lepsilon, frac_remain=frac_remain,
min_ess=min_ess, max_iters=max_iters,
max_ncalls=max_ncalls, max_num_improvement_loops=max_num_improvement_loops,
min_num_live_points=min_num_live_points,
cluster_num_live_points=cluster_num_live_points,
show_status=show_status,
viz_callback=viz_callback,
insertion_test_window=insertion_test_window,
insertion_test_zscore_threshold=insertion_test_zscore_threshold,
region_class=region_class,
widen_before_initial_plateau_num_warn=widen_before_initial_plateau_num_warn,
widen_before_initial_plateau_num_max=widen_before_initial_plateau_num_max,
):
if self.log:
self.logger.debug("did a run_iter pass!")
pass
if self.log:
self.logger.info("done iterating.")
return self.results
def run_iter(
self,
update_interval_volume_fraction=0.8,
update_interval_ncall=None,
log_interval=None,
dlogz=0.5,
dKL=0.5,
frac_remain=0.01,
Lepsilon=0.001,
min_ess=400,
max_iters=None,
max_ncalls=None,
max_num_improvement_loops=-1,
min_num_live_points=400,
cluster_num_live_points=40,
show_status=True,
viz_callback='auto',
insertion_test_window=10000,
insertion_test_zscore_threshold=2,
region_class=MLFriends,
widen_before_initial_plateau_num_warn=10000,
widen_before_initial_plateau_num_max=50000,
):
"""Iterate towards convergence.
Use as an iterator like so::
for result in sampler.run_iter(...):
print('lnZ = %(logz).2f +- %(logzerr).2f' % result)
Parameters as described in run() method.
Yields
------
results: dict
"""
# frac_remain=1 means 1:1 -> dlogz=log(0.5)
# frac_remain=0.1 means 1:10 -> dlogz=log(0.1)
# dlogz_min = log(1./(1 + frac_remain))
# dlogz_min = -log1p(frac_remain)
if -np.log1p(frac_remain) > dlogz:
raise ValueError("To achieve the desired logz accuracy, set frac_remain to a value much smaller than %s (currently: %s)" % (
exp(-dlogz) - 1, frac_remain))
# the error is approximately dlogz = sqrt(iterations) / Nlive
# so we need a minimum, which depends on the number of iterations
# fewer than 1000 iterations is quite unlikely
if min_num_live_points < 1000**0.5 / dlogz:
min_num_live_points = int(np.ceil(1000**0.5 / dlogz))
if self.log:
self.logger.info("To achieve the desired logz accuracy, min_num_live_points was increased to %d" % (
min_num_live_points))
if self.log_to_pointstore:
if len(self.pointstore.stack) > 0:
self.logger.info("Resuming from %d stored points", len(self.pointstore.stack))
self.use_point_stack = not self.pointstore.stack_empty
else:
self.use_point_stack = False
assert min_num_live_points >= cluster_num_live_points, \
('min_num_live_points(%d) cannot be less than cluster_num_live_points(%d)' %
(min_num_live_points, cluster_num_live_points))
self.min_num_live_points = min_num_live_points
self.cluster_num_live_points = cluster_num_live_points
self.sampling_slow_warned = False
self.build_tregion = True
self.region_class = region_class
update_interval_volume_log_fraction = log(update_interval_volume_fraction)
if viz_callback == 'auto':
viz_callback = get_default_viz_callback()
self._widen_roots_beyond_initial_plateau(
min_num_live_points,
widen_before_initial_plateau_num_warn, widen_before_initial_plateau_num_max)
Llo, Lhi = -np.inf, np.inf
Lmax = -np.inf
strategy_stale = True
minimal_widths = []
target_min_num_children = {}
improvement_it = 0
assert max_iters is None or max_iters > 0, ("Invalid value for max_iters: %s. Set to None or positive number" % max_iters)
assert max_ncalls is None or max_ncalls > 0, ("Invalid value for max_ncalls: %s. Set to None or positive number" % max_ncalls)
if self.log:
self.logger.debug(
'run_iter dlogz=%.1f, dKL=%.1f, frac_remain=%.2f, Lepsilon=%.4f, min_ess=%d' % (
dlogz, dKL, frac_remain, Lepsilon, min_ess)
)
self.logger.debug(
'max_iters=%d, max_ncalls=%d, max_num_improvement_loops=%d, min_num_live_points=%d, cluster_num_live_points=%d' % (
max_iters if max_iters else -1, max_ncalls if max_ncalls else -1,
max_num_improvement_loops, min_num_live_points, cluster_num_live_points)
)
self.results = None
while True:
roots = self.root.children
nroots = len(roots)
if update_interval_ncall is None:
update_interval_ncall = nroots
if log_interval is None:
log_interval = max(1, round(0.1 * nroots))
else:
log_interval = round(log_interval)
if log_interval < 1:
raise ValueError("log_interval must be >= 1")
explorer = BreadthFirstIterator(roots)
# Integrating thing
main_iterator = MultiCounter(
nroots=len(roots),
nbootstraps=max(1, self.num_bootstraps // self.mpi_size),
random=False, check_insertion_order=False)
main_iterator.Lmax = max(Lmax, max(n.value for n in roots))
insertion_test = UniformOrderAccumulator()
insertion_test_runs = []
insertion_test_quality = np.inf
insertion_test_direction = 0
self.transformLayer = None
self.region = None
self.tregion = None
self.live_points_healthy = True
it_at_first_region = 0
self.ib = 0
self.samples = []
if self.draw_multiple:
ndraw = self.ndraw_min
else:
ndraw = 40
self.pointstore.reset()
if self.log_to_pointstore:
self.use_point_stack = not self.pointstore.stack_empty
else:
self.use_point_stack = False
if self.use_mpi:
self.use_point_stack = self.comm.bcast(self.use_point_stack, root=0)
if self.log and (np.isfinite(Llo) or np.isfinite(Lhi)):
self.logger.info("Exploring (in particular: L=%.2f..%.2f) ...", Llo, Lhi)
region_sequence = []
minimal_widths_sequence = _sequentialize_width_sequence(minimal_widths, self.min_num_live_points)
if self.log:
self.logger.debug('minimal_widths_sequence: %s', minimal_widths_sequence)
saved_nodeids = []
saved_logl = []
it = 0
ncall_at_run_start = self.ncall
ncall_region_at_run_start = self.ncall_region
next_update_interval_volume = 1
last_status = time.time()
# we go through each live point (regardless of root) by likelihood value
while True:
next_node = explorer.next_node()
if next_node is None:
break
rootid, node, (_, active_rootids, active_values, active_node_ids) = next_node
assert not isinstance(rootid, float)
# this is the likelihood level we have to improve upon
self.Lmin = Lmin = node.value
# if within suggested range, expand
if strategy_stale or not (Lmin <= Lhi) or not np.isfinite(Lhi) or (active_values == Lmin).all():
# check with advisor if we want to expand this node
Llo, Lhi = self._adaptive_strategy_advice(
Lmin, active_values, main_iterator,
minimal_widths, frac_remain, Lepsilon=Lepsilon)
# when we are going to the peak, numerical accuracy
# can become an issue. We should try not to get stuck there
strategy_stale = Lhi - Llo < max(Lepsilon, 0.01)
expand_node = self._should_node_be_expanded(
it, Llo, Lhi, minimal_widths_sequence,
target_min_num_children, node, active_values,
max_ncalls, max_iters, self.live_points_healthy)
region_fresh = False
if expand_node:
# sample a new point above Lmin
active_u = self.pointpile.getu(active_node_ids)
active_p = self.pointpile.getp(active_node_ids)
nlive = len(active_u)
# first we check that the region is up-to-date
if main_iterator.logVolremaining < next_update_interval_volume:
if self.region is None:
it_at_first_region = it
region_fresh = self._update_region(
active_u=active_u, active_p=active_p, active_node_ids=active_node_ids,
active_rootids=active_rootids,
bootstrap_rootids=main_iterator.rootids[1:,],
nbootstraps=self.num_bootstraps,
minvol=exp(main_iterator.logVolremaining))
if region_fresh and self.stepsampler is not None:
self.stepsampler.region_changed(active_values, self.region)
_, cluster_sizes = np.unique(self.region.transformLayer.clusterids, return_counts=True)
nclusters = (cluster_sizes > 1).sum()
region_sequence.append((Lmin, nlive, nclusters, np.max(active_values)))
# next_update_interval_ncall = self.ncall + (update_interval_ncall or nlive)
next_update_interval_volume = main_iterator.logVolremaining + update_interval_volume_log_fraction
# provide nice output to follow what is going on
# but skip if we are resuming
# and (self.ncall != ncall_at_run_start and it_at_first_region == it)
if self.log and viz_callback:
viz_callback(
points=dict(u=active_u, p=active_p, logl=active_values),
info=dict(
it=it, ncall=self.ncall,
logz=main_iterator.logZ,
logz_remain=main_iterator.logZremain,
logvol=main_iterator.logVolremaining,
paramnames=self.paramnames + self.derivedparamnames,
paramlims=self.transform_limits,
order_test_correlation=insertion_test_quality,
order_test_direction=insertion_test_direction,
),
region=self.region, transformLayer=self.transformLayer,
region_fresh=region_fresh,
)
if self.log:
self.pointstore.flush()
if nlive < cluster_num_live_points * nclusters and improvement_it < max_num_improvement_loops:
# make wider here
if self.log:
self.logger.info(
"Found %d clusters, but only have %d live points, want %d.",
self.region.transformLayer.nclusters, nlive,
cluster_num_live_points * nclusters)
break
# sample point
u, p, L = self._create_point(Lmin=Lmin, ndraw=ndraw, active_u=active_u, active_values=active_values)
child = self.pointpile.make_node(L, u, p)
main_iterator.Lmax = max(main_iterator.Lmax, L)
if np.isfinite(insertion_test_zscore_threshold) and nlive > 1:
insertion_test.add((active_values < L).sum(), nlive)
if abs(insertion_test.zscore) > insertion_test_zscore_threshold:
insertion_test_runs.append(insertion_test.N)
insertion_test_quality = insertion_test.N
insertion_test_direction = np.sign(insertion_test.zscore)
insertion_test.reset()
elif insertion_test.N > insertion_test_window:
insertion_test_quality = np.inf
insertion_test_direction = 0
insertion_test.reset()
# identify which point is being replaced (from when we built the region)
worst = np.where(self.region_nodes == node.id)[0]
self.region_nodes[worst] = child.id
# if we keep the region informed about the new live points
# then the region follows the live points even if maxradius is not updated
self.region.u[worst] = u
self.region.unormed[worst] = self.region.transformLayer.transform(u)
# move also the ellipsoid
self.region.ellipsoid_center = np.mean(self.region.u, axis=0)
if self.tregion:
self.tregion.update_center(np.mean(active_p, axis=0))
# if we track the cluster assignment, then in the next round
# the ids with the same members are likely to have the same id
# this is imperfect
# transformLayer.clusterids[worst] = transformLayer.clusterids[father[ib]]
# so we just mark the replaced ones as "unassigned"
self.transformLayer.clusterids[worst] = 0
node.children.append(child)
if self.log and (region_fresh or it % log_interval == 0 or time.time() > last_status + 0.1):
last_status = time.time()
# the number of proposals asked from region
ncall_region_here = (self.ncall_region - ncall_region_at_run_start)
# the number of proposals returned by the region
ncall_here = self.ncall - ncall_at_run_start
# the number of likelihood evaluations above threshold
it_here = it - it_at_first_region
if show_status:
if Lmin < -1e8:
txt = 'Z=%.1g(%.2f%%) | Like=%.2g..%.2g [%.4g..%.4g]%s| it/evals=%d/%d eff=%.4f%% N=%d \r'
elif Llo < -1e8:
txt = 'Z=%.1f(%.2f%%) | Like=%.2f..%.2f [%.4g..%.4g]%s| it/evals=%d/%d eff=%.4f%% N=%d \r'
else:
txt = 'Z=%.1f(%.2f%%) | Like=%.2f..%.2f [%.4f..%.4f]%s| it/evals=%d/%d eff=%.4f%% N=%d \r'
sys.stdout.write(txt % (
main_iterator.logZ, 100 * (1 - main_iterator.remainder_fraction),
Lmin, main_iterator.Lmax, Llo, Lhi, '*' if strategy_stale else ' ', it, self.ncall,
np.inf if ncall_here == 0 else it_here * 100 / ncall_here,
nlive))
sys.stdout.flush()
self.logger.debug('iteration=%d, ncalls=%d, regioncalls=%d, ndraw=%d, logz=%.2f, remainder_fraction=%.4f%%, Lmin=%.2f, Lmax=%.2f' % (
it, self.ncall, self.ncall_region, ndraw, main_iterator.logZ,
100 * main_iterator.remainder_fraction, Lmin, main_iterator.Lmax))
# if efficiency becomes low, bulk-process larger arrays
if self.draw_multiple:
# inefficiency is the number of (region) proposals per successful number of iterations
# but improves by parallelism (because we need only the per-process inefficiency)
# sampling_inefficiency = (self.ncall - ncall_at_run_start + 1) / (it + 1) / self.mpi_size
sampling_inefficiency = (ncall_region_here + 1) / (it_here + 1) / self.mpi_size
# smooth update:
ndraw_next = 0.04 * sampling_inefficiency + ndraw * 0.96
ndraw = max(self.ndraw_min, min(self.ndraw_max, round(ndraw_next), ndraw * 100))
if sampling_inefficiency > 100000 and it >= it_at_first_region + 10:
# if the efficiency is poor, there are enough samples in each iteration
# to estimate the inefficiency
ncall_at_run_start = self.ncall
it_at_first_region = it
ncall_region_at_run_start = self.ncall_region
else:
# we do not want to count iterations without work
# otherwise efficiency becomes > 1
it_at_first_region += 1
saved_nodeids.append(node.id)
saved_logl.append(Lmin)
# inform iterators (if it is their business) about the arc
main_iterator.passing_node(rootid, node, active_rootids, active_values)
if len(node.children) == 0 and self.region is not None:
# the region radius needs to increase if nlive decreases
# radius is not reliable, so set to inf
# (heuristics do not work in practice)
self.region.maxradiussq = None
# ask for the region to be rebuilt
next_update_interval_volume = 1
it += 1
explorer.expand_children_of(rootid, node)
if self.log:
self.logger.info("Explored until L=%.1g ", node.value)
# print_tree(roots[::10])
self.pointstore.flush()
self._update_results(main_iterator, saved_logl, saved_nodeids)
yield self.results
if max_ncalls is not None and self.ncall >= max_ncalls:
if self.log:
self.logger.info(
'Reached maximum number of likelihood calls (%d > %d)...',
self.ncall, max_ncalls)
break
improvement_it += 1
if max_num_improvement_loops >= 0 and improvement_it > max_num_improvement_loops:
if self.log:
self.logger.info('Reached maximum number of improvement loops.')
break
if ncall_at_run_start == self.ncall and improvement_it > 1:
if self.log:
self.logger.info(
'No changes made. '
'Probably the strategy was to explore in the remainder, '
'but it is irrelevant already; try decreasing frac_remain.')
break
Lmax = main_iterator.Lmax
if len(region_sequence) > 0:
Lmin, nlive, nclusters, Lhi = region_sequence[-1]
nnodes_needed = cluster_num_live_points * nclusters
if nlive < nnodes_needed:
Llo, _, target_min_num_children_new = self._expand_nodes_before(Lmin, nnodes_needed, update_interval_ncall or nlive)
target_min_num_children.update(target_min_num_children_new)
# if self.log:
# print_tree(self.root.children[::10])
minimal_widths.append((Llo, Lhi, nnodes_needed))
Llo, Lhi = -np.inf, np.inf
continue
if self.log:
# self.logger.info(' logZ = %.4f +- %.4f (main)' % (main_iterator.logZ, main_iterator.logZerr))
self.logger.info(' logZ = %.4g +- %.4g', main_iterator.logZ_bs, main_iterator.logZerr_bs)
saved_logl = np.asarray(saved_logl)
# reactive nested sampling: see where we have to improve
dlogz_min_num_live_points, (Llo_KL, Lhi_KL), (Llo_ess, Lhi_ess) = self._find_strategy(
saved_logl, main_iterator, dlogz=dlogz, dKL=dKL, min_ess=min_ess)
Llo = min(Llo_ess, Llo_KL)
Lhi = max(Lhi_ess, Lhi_KL)
# to avoid numerical issues when all likelihood values are the same
Lhi = min(Lhi, saved_logl.max() - 0.001)
if self.use_mpi:
recv_Llo = self.comm.gather(Llo, root=0)
recv_Llo = self.comm.bcast(recv_Llo, root=0)
recv_Lhi = self.comm.gather(Lhi, root=0)
recv_Lhi = self.comm.bcast(recv_Lhi, root=0)
recv_dlogz_min_num_live_points = self.comm.gather(dlogz_min_num_live_points, root=0)
recv_dlogz_min_num_live_points = self.comm.bcast(recv_dlogz_min_num_live_points, root=0)
Llo = min(recv_Llo)
Lhi = max(recv_Lhi)
dlogz_min_num_live_points = max(recv_dlogz_min_num_live_points)
if dlogz_min_num_live_points > self.min_num_live_points:
# more live points needed throughout to reach target
self.min_num_live_points = dlogz_min_num_live_points
self._widen_roots_beyond_initial_plateau(
self.min_num_live_points,
widen_before_initial_plateau_num_warn,
widen_before_initial_plateau_num_max)
elif Llo <= Lhi:
# if self.log:
# print_tree(roots, title="Tree before forking:")
parents, parent_weights = find_nodes_before(self.root, Llo)
# double the width / live points:
_, width = count_tree_between(self.root.children, Llo, Lhi)
nnodes_needed = width * 2
if self.log:
self.logger.info(
'Widening from %d to %d live points before L=%.1g...',
len(parents), nnodes_needed, Llo)
if len(parents) == 0:
Llo = -np.inf
else:
Llo = min(n.value for n in parents)
self.pointstore.reset()
target_min_num_children.update(self._widen_nodes(parents, parent_weights, nnodes_needed, update_interval_ncall))
minimal_widths.append((Llo, Lhi, nnodes_needed))
# if self.log:
# print_tree(roots, title="Tree after forking:")
# print('tree size:', count_tree(roots))
else:
break
def _update_results(self, main_iterator, saved_logl, saved_nodeids):
if self.log:
self.logger.info('Likelihood function evaluations: %d', self.ncall)
results = combine_results(
saved_logl, saved_nodeids, self.pointpile,
main_iterator, mpi_comm=self.comm if self.use_mpi else None)
results['ncall'] = int(self.ncall)
results['paramnames'] = self.paramnames + self.derivedparamnames
results['logzerr_single'] = (main_iterator.all_H[0] / self.min_num_live_points)**0.5
sequence, results2 = logz_sequence(self.root, self.pointpile, random=True, check_insertion_order=True)
results['insertion_order_MWW_test'] = results2['insertion_order_MWW_test']
results_simple = dict(results)
weighted_samples = results_simple.pop('weighted_samples')
samples = results_simple.pop('samples')
saved_wt0 = weighted_samples['weights']
saved_u = weighted_samples['upoints']
saved_v = weighted_samples['points']
if self.log_to_disk:
if self.log:
self.logger.info("Writing samples and results to disk ...")
np.savetxt(os.path.join(self.logs['chains'], 'equal_weighted_post.txt'),
samples,
header=' '.join(self.paramnames + self.derivedparamnames),
comments='')
np.savetxt(os.path.join(self.logs['chains'], 'weighted_post.txt'),
np.hstack((saved_wt0.reshape((-1, 1)), np.reshape(saved_logl, (-1, 1)), saved_v)),
header=' '.join(['weight', 'logl'] + self.paramnames + self.derivedparamnames),
comments='')
np.savetxt(os.path.join(self.logs['chains'], 'weighted_post_untransformed.txt'),
np.hstack((saved_wt0.reshape((-1, 1)), np.reshape(saved_logl, (-1, 1)), saved_u)),
header=' '.join(['weight', 'logl'] + self.paramnames + self.derivedparamnames),
comments='')
with open(os.path.join(self.logs['info'], 'results.json'), 'w') as f:
json.dump(results_simple, f, indent=4)
np.savetxt(
os.path.join(self.logs['info'], 'post_summary.csv'),
[[results['posterior'][k][i] for i in range(self.num_params) for k in ('mean', 'stdev', 'median', 'errlo', 'errup')]],
header=','.join(['"{0}_mean","{0}_stdev","{0}_median","{0}_errlo","{0}_errup"'.format(k)
for k in self.paramnames + self.derivedparamnames]),
delimiter=',', comments='',
)
if self.log_to_disk:
keys = 'logz', 'logzerr', 'logvol', 'nlive', 'logl', 'logwt', 'insert_order'
np.savetxt(os.path.join(self.logs['chains'], 'run.txt'),
np.hstack(tuple([np.reshape(sequence[k], (-1, 1)) for k in keys])),
header=' '.join(keys),
comments='')
if self.log:
self.logger.info("Writing samples and results to disk ... done")
self.results = results
self.run_sequence = sequence
def store_tree(self):
"""Store tree to disk (results/tree.hdf5)."""
if self.log_to_disk:
dump_tree(os.path.join(self.logs['results'], 'tree.hdf5'),
self.root.children, self.pointpile)
def print_results(self, use_unicode=True):
"""Give summary of marginal likelihood and parameter posteriors.
Parameters
----------
use_unicode: bool
Whether to print a unicode plot of the posterior distributions
"""
if self.log:
print()
print('logZ = %(logz).3f +- %(logzerr).3f' % self.results)
print(' single instance: logZ = %(logz_single).3f +- %(logzerr_single).3f' % self.results)
print(' bootstrapped : logZ = %(logz_bs).3f +- %(logzerr_bs).3f' % self.results)
print(' tail : logZ = +- %(logzerr_tail).3f' % self.results)
print('insert order U test : converged: %(converged)s correlation: %(independent_iterations)s iterations' % (
self.results['insertion_order_MWW_test']))
print()
for i, p in enumerate(self.paramnames + self.derivedparamnames):
v = self.results['samples'][:,i]
sigma = v.std()
med = v.mean()
if sigma == 0:
j = 3
else:
j = max(0, int(-np.floor(np.log10(sigma))) + 1)
fmt = '%%.%df' % j
try:
if not use_unicode:
raise UnicodeEncodeError("")
# make fancy terminal visualisation on a best-effort basis
' ▁▂▃▄▅▆▇██'.encode(sys.stdout.encoding)
H, edges = np.histogram(v, bins=40)
# add a bit of padding, but not outside parameter limits
lo, hi = edges[0], edges[-1]
step = edges[1] - lo
lo = max(self.transform_limits[i,0], lo - 2 * step)
hi = min(self.transform_limits[i,1], hi + 2 * step)
H, edges = np.histogram(v, bins=np.linspace(lo, hi, 40))
lo, hi = edges[0], edges[-1]
dist = ''.join([' ▁▂▃▄▅▆▇██'[i] for i in np.ceil(H * 7 / H.max()).astype(int)])
print(' %-20s: %-6s│%s│%-6s %s +- %s' % (p, fmt % lo, dist, fmt % hi, fmt % med, fmt % sigma))
except Exception:
fmts = ' %-20s' + fmt + " +- " + fmt
print(fmts % (p, med, sigma))
print()
def plot(self):
"""Make corner, run and trace plots.
calls:
* plot_corner()
* plot_run()
* plot_trace()
"""
self.plot_corner()
self.plot_run()
self.plot_trace()
def plot_corner(self):
"""Make corner plot.
Writes corner plot to plots/ directory if log directory was
specified, otherwise show interactively.
This does essentially::
from ultranest.plot import cornerplot
cornerplot(results)
"""
from .plot import cornerplot
import matplotlib.pyplot as plt
if self.log:
self.logger.debug('Making corner plot ...')
cornerplot(self.results, logger=self.logger if self.log else None)
if self.log_to_disk:
plt.savefig(os.path.join(self.logs['plots'], 'corner.pdf'), bbox_inches='tight')
plt.close()
self.logger.debug('Making corner plot ... done')
def plot_trace(self):
"""Make trace plot.
Write parameter trace diagnostic plots to plots/ directory
if log directory specified, otherwise show interactively.
This does essentially::
from ultranest.plot import traceplot
traceplot(results=results, labels=paramnames + derivedparamnames)
"""
from .plot import traceplot
import matplotlib.pyplot as plt
if self.log:
self.logger.debug('Making trace plot ... ')
paramnames = self.paramnames + self.derivedparamnames
# get dynesty-compatible sequences
traceplot(results=self.run_sequence, labels=paramnames)
if self.log_to_disk:
plt.savefig(os.path.join(self.logs['plots'], 'trace.pdf'), bbox_inches='tight')
plt.close()
self.logger.debug('Making trace plot ... done')
def plot_run(self):
"""Make run plot.
Write run diagnostic plots to plots/ directory
if log directory specified, otherwise show interactively.
This does essentially::
from ultranest.plot import runplot
runplot(results=results)
"""
from .plot import runplot
import matplotlib.pyplot as plt
if self.log:
self.logger.debug('Making run plot ... ')
# get dynesty-compatible sequences
runplot(results=self.run_sequence, logplot=True)
if self.log_to_disk:
plt.savefig(os.path.join(self.logs['plots'], 'run.pdf'), bbox_inches='tight')
plt.close()
self.logger.debug('Making run plot ... done')
def read_file(log_dir, x_dim, num_bootstraps=20, random=True, verbose=False, check_insertion_order=True):
"""
Read the output HDF5 file of UltraNest.
Parameters
----------
log_dir: str
Folder containing results
x_dim: int
number of dimensions
num_bootstraps: int
number of bootstraps to use for estimating logZ.
random: bool
use randomization for volume estimation.
verbose: bool
show progress
check_insertion_order: bool
whether to perform MWW insertion order test for assessing convergence
Returns
----------
sequence: dict
contains arrays storing for each iteration estimates of:
* logz: log evidence estimate
* logzerr: log evidence uncertainty estimate
* logvol: log volume estimate
* samples_n: number of live points
* logwt: log weight
* logl: log likelihood
final: dict
same as ReactiveNestedSampler.results and
ReactiveNestedSampler.run return values
"""
import h5py
filepath = os.path.join(log_dir, 'results', 'points.hdf5')
fileobj = h5py.File(filepath, 'r')
_, ncols = fileobj['points'].shape
num_params = ncols - 3 - x_dim
points = fileobj['points'][:]
fileobj.close()
del fileobj
stack = list(enumerate(points))
pointpile = PointPile(x_dim, num_params)
def pop(Lmin):
"""Find matching sample from points file."""
# look forward to see if there is an exact match
# if we do not use the exact matches
# this causes a shift in the loglikelihoods
for i, (idx, next_row) in enumerate(stack):
row_Lmin = next_row[0]
L = next_row[1]
if row_Lmin <= Lmin and L > Lmin:
idx, row = stack.pop(i)
return idx, row
return None, None
roots = []
while True:
_, row = pop(-np.inf)
if row is None:
break
logl = row[1]
u = row[3:3 + x_dim]
v = row[3 + x_dim:3 + x_dim + num_params]
roots.append(pointpile.make_node(logl, u, v))
root = TreeNode(id=-1, value=-np.inf, children=roots)
def onNode(node, main_iterator):
"""Insert (single) child of node if available."""
while True:
_, row = pop(node.value)
if row is None:
break
if row is not None:
logl = row[1]
u = row[3:3 + x_dim]
v = row[3 + x_dim:3 + x_dim + num_params]
child = pointpile.make_node(logl, u, v)
assert logl > node.value, (logl, node.value)
main_iterator.Lmax = max(main_iterator.Lmax, logl)
node.children.append(child)
return logz_sequence(root, pointpile, nbootstraps=num_bootstraps,
random=random, onNode=onNode, verbose=verbose,
check_insertion_order=check_insertion_order)
| 134,588 | 41.972222 | 157 | py |
UltraNest | UltraNest-master/ultranest/hotstart.py | """
Warm start
----------
Helper functions for deforming the parameter space to enable
a more efficient sampling.
Based on ideas from Petrosyan & Handley (2022, https://arxiv.org/abs/2212.01760).
"""
import numpy as np
import scipy.stats
from .utils import vectorize, resample_equal
def get_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_factor, df=1):
"""Return a new loglike and transform based on an auxiliary distribution.
Given a likelihood and prior transform, and information about
the (expected) posterior peak, generates a auxiliary
likelihood and prior transform that is identical but
requires fewer nested sampling iterations.
This is achieved by deforming the prior space, and undoing that
transformation by correction weights in the likelihood.
The auxiliary distribution used for transformation/weighting is
a d-dimensional Student-t distribution.
Usage::
aux_loglikelihood, aux_aftertransform = get_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_factor, df=1)
aux_sampler = ReactiveNestedSampler(parameters, aux_loglikelihood)
aux_results = aux_sampler.run()
posterior_samples = [aux_aftertransform(sample) for sample in aux_results['samples']]
Parameters
------------
loglike: function
original likelihood function
transform: function
original prior transform function
ctr: array
Posterior center (in u-space).
invcov: array
Covariance of the posterior (in u-space).
enlargement_factor: float
Factor by which the scale of the auxiliary distribution is enlarged
in all dimensions.
For Gaussian-like posteriors, sqrt(ndim) seems to work,
Heavier tailed or non-elliptical distributions may need larger factors.
df: float
Number of degrees of freedom of the auxiliary student-t distribution.
The default is recommended. For truly gaussian posteriors,
the student-t can be made more gaussian (by df>=30) for accelation.
Returns
---------
aux_loglike: function
auxiliary loglikelihood function.
aux_aftertransform: function
auxiliary transform function.
Takes d u-space coordinates, and returns d + 1 p-space parameters.
The first d return coordinates are identical to what ``transform`` would return.
The final coordinate is the correction weight.
"""
ndim, = ctr.shape
assert invcov.shape == (ndim, ndim)
assert df >= 1, ('Degrees of freedom must be above 1', df)
l, v = np.linalg.eigh(invcov)
rotation_matrix = np.dot(v, enlargement_factor * np.diag(1. / np.sqrt(l)))
rv_auxiliary1d = scipy.stats.t(df)
def aux_rotator(coords):
return ctr + np.dot(coords, rotation_matrix)
def aux_loglikelihood(u):
# get uniform gauss/t distributed values:
coords = rv_auxiliary1d.ppf(u)
# rotate & stretch; transform into physical parameters
x = aux_rotator(coords)
# avoid outside regions
if not (x > 0).all() or not (x < 1).all():
return -1e300
# undo the effect of the auxiliary distribution
loglike_total = rv_auxiliary1d.logpdf(coords).sum()
return loglike(transform(x)) - loglike_total
def aux_aftertransform(u):
return transform(aux_rotator(rv_auxiliary1d.ppf(u)))
return aux_loglikelihood, aux_aftertransform
def get_extended_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_factor, df=1):
"""Return a new loglike and transform based on an auxiliary distribution.
Given a likelihood and prior transform, and information about
the (expected) posterior peak, generates a auxiliary
likelihood and prior transform that is identical but
requires fewer nested sampling iterations.
This is achieved by deforming the prior space, and undoing that
transformation by correction weights in the likelihood.
The auxiliary distribution used for transformation/weighting is
a d-dimensional Student-t distribution.
Parameters
------------
loglike: function
original likelihood function
transform: function
original prior transform function
ctr: array
Posterior center (in u-space).
invcov: array
Covariance of the posterior (in u-space).
enlargement_factor: float
Factor by which the scale of the auxiliary distribution is enlarged
in all dimensions.
For Gaussian-like posteriors, sqrt(ndim) seems to work,
Heavier tailed or non-elliptical distributions may need larger factors.
df: float
Number of degrees of freedom of the auxiliary student-t distribution.
The default is recommended. For truly gaussian posteriors,
the student-t can be made more gaussian (by df>=30) for accelation.
Returns
---------
aux_loglike: function
auxiliary loglikelihood function. Takes d + 1 parameters (see below).
The likelihood is the same as loglike, but adds weights.
aux_transform: function
auxiliary transform function.
Takes d u-space coordinates, and returns d + 1 p-space parameters.
The first d return coordinates are identical to what ``transform`` would return.
The final coordinate is the correction weight.
"""
ndim, = ctr.shape
assert invcov.shape == (ndim, ndim)
assert df >= 1, ('Degrees of freedom must be above 1', df)
l, v = np.linalg.eigh(invcov)
rotation_matrix = np.dot(v, enlargement_factor * np.diag(1. / np.sqrt(l)))
rv_auxiliary1d = scipy.stats.t(df)
weight_ref = rv_auxiliary1d.logpdf(0) * ndim
def aux_transform(u):
# get uniform gauss/t distributed values:
coords = rv_auxiliary1d.ppf(u)
# rotate & stretch; transform into physical parameters
x = ctr + np.dot(rotation_matrix, coords)
# avoid outside regions
if (x > 0).all() and (x < 1).all():
weight = -rv_auxiliary1d.logpdf(coords).sum() + weight_ref
else:
weight = -1e101
x = u * 0 + 0.5
# add weight as a additional parameter
return np.append(transform(x), weight)
def aux_loglikelihood(x):
x_actual = x[:-1]
weight = x[-1]
if -1e100 < weight < 1e100:
return loglike(x_actual) + weight - weight_ref
else:
return -1e300
return aux_loglikelihood, aux_transform
def get_extended_auxiliary_independent_problem(loglike, transform, ctr, err, df=1):
"""Return a new loglike and transform based on an auxiliary distribution.
Given a likelihood and prior transform, and information about
the (expected) posterior peak, generates a auxiliary
likelihood and prior transform that is identical but
requires fewer nested sampling iterations.
This is achieved by deforming the prior space, and undoing that
transformation by correction weights in the likelihood.
The auxiliary distribution used for transformation/weighting is
a independent Student-t distribution for each parameter.
Usage::
aux_loglikelihood, aux_transform = get_auxiliary_problem(loglike, transform, ctr, invcov, enlargement_factor, df=1)
aux_sampler = ReactiveNestedSampler(parameters, aux_loglikelihood, transform=aux_transform, derived_param_names=['logweight'])
aux_results = aux_sampler.run()
posterior_samples = aux_results['samples'][:,-1]
Parameters
------------
loglike: function
original likelihood function
transform: function
original prior transform function
ctr: array
Posterior center (in u-space).
err: array
Standard deviation around the posterior center (in u-space).
df: float
Number of degrees of freedom of the auxiliary student-t distribution.
The default is recommended. For truly gaussian posteriors,
the student-t can be made more gaussian (by df>=30) for accelation.
Returns
---------
aux_loglike: function
auxiliary loglikelihood function.
aux_transform: function
auxiliary transform function.
Takes d u-space coordinates, and returns d + 1 p-space parameters.
The first d return coordinates are identical to what ``transform`` would return.
The final coordinate is the log of the correction weight.
"""
ndim, = np.shape(ctr)
assert np.shape(err) == (ndim,)
assert df >= 1, ('Degrees of freedom must be above 1', df)
rv_aux = scipy.stats.t(df, ctr, err)
# handle the case where the aux distribution extends beyond the unit cube
aux_lo = rv_aux.cdf(0)
aux_hi = rv_aux.cdf(1)
aux_w = aux_hi - aux_lo
weight_ref = rv_aux.logpdf(ctr).sum()
def aux_transform(u):
# get uniform gauss/t distributed values:
x = rv_aux.ppf(u * aux_w + aux_lo)
weight = -rv_aux.logpdf(x).sum() + weight_ref
return np.append(transform(x), weight)
def aux_loglikelihood(x):
x_actual = x[:-1]
weight = x[-1]
if -1e100 < weight < 1e100:
return loglike(x_actual) + weight - weight_ref
else:
return -1e300
return aux_loglikelihood, aux_transform
def compute_quantile_intervals(steps, upoints, uweights):
"""Compute lower and upper axis quantiles.
Parameters
------------
steps: array
list of quantiles q to compute.
upoints: array
samples, with dimensions (N, d)
uweights: array
sample weights
Returns
---------
ulo: array
list of lower quantiles (at q), one entry for each dimension d.
uhi: array
list of upper quantiles (at 1-q), one entry for each dimension d.
"""
ndim = upoints.shape[1]
nboxes = len(steps)
ulos = np.empty((nboxes + 1, ndim))
uhis = np.empty((nboxes + 1, ndim))
for j, pthresh in enumerate(steps):
for i, ui in enumerate(upoints.transpose()):
order = np.argsort(ui)
c = np.cumsum(uweights[order])
usel = ui[order][np.logical_and(c >= pthresh, c <= 1 - pthresh)]
ulos[j,i] = usel.min()
uhis[j,i] = usel.max()
ulos[-1] = 0
uhis[-1] = 1
return ulos, uhis
def compute_quantile_intervals_refined(steps, upoints, uweights, logsteps_max=20):
"""Compute lower and upper axis quantiles.
Parameters
------------
steps: array
list of quantiles q to compute, with dimensions
upoints: array
samples, with dimensions (N, d)
uweights: array
sample weights. N entries.
logsteps_max: int
number of intermediate steps to inject between largest quantiles interval and full unit cube
Returns
---------
ulo: array
list of lower quantiles (at `q`), of shape (M, d), one entry per quantile and dimension d.
uhi: array
list of upper quantiles (at 1-`q`), of shape (M, d), one entry per quantile and dimension d.
uinterpspace: array
list of steps (length of `steps` plus `logsteps_max` long)
"""
nboxes = len(steps)
ulos_orig, uhis_orig = compute_quantile_intervals(steps, upoints, uweights)
assert len(ulos_orig) == nboxes + 1
assert len(uhis_orig) == nboxes + 1
smallest_axis_width = np.min(uhis_orig[-2,:] - ulos_orig[-2,:])
logsteps = min(logsteps_max, int(np.ceil(-np.log10(max(1e-100, smallest_axis_width)))))
weights = np.logspace(-logsteps, 0, logsteps + 1).reshape((-1, 1))
# print("logspace:", weights, logsteps)
assert len(weights) == logsteps + 1, (weights.shape, logsteps)
# print("quantiles:", ulos_orig, uhis_orig)
ulos_new = ulos_orig[nboxes - 1, :].reshape((1, -1)) * (1 - weights) + 0 * weights
uhis_new = uhis_orig[nboxes - 1, :].reshape((1, -1)) * (1 - weights) + 1 * weights
# print("additional quantiles:", ulos_new, uhis_new)
ulos = np.vstack((ulos_orig[:-1,:], ulos_new))
uhis = np.vstack((uhis_orig[:-1,:], uhis_new))
# print("combined quantiles:", ulos, uhis)
assert (ulos[-1,:] == 0).all()
assert (uhis[-1,:] == 1).all()
uinterpspace = np.ones(nboxes + logsteps + 1)
uinterpspace[:nboxes + 1] = np.linspace(0, 1, nboxes + 1)
assert 0 < uinterpspace[nboxes - 1] < 1, uinterpspace[nboxes]
uinterpspace[nboxes:] = np.linspace(uinterpspace[nboxes - 1], 1, logsteps + 2)[1:]
return ulos, uhis, uinterpspace
def get_auxiliary_contbox_parameterization(
param_names, loglike, transform, upoints, uweights, vectorized=False,
):
"""Return a new loglike and transform based on an auxiliary distribution.
Given a likelihood and prior transform, and information about
the (expected) posterior peak, generates a auxiliary
likelihood and prior transform that is identical but
requires fewer nested sampling iterations.
This is achieved by deforming the prior space, and undoing that
transformation by correction weights in the likelihood.
A additional parameter, "aux_logweight", is added at the end,
which contains the correction weight. You can ignore it.
The auxiliary distribution used for transformation/weighting is
factorized. Each axis considers the ECDF of the auxiliary samples,
and segments it into quantile segments. Within each segment,
the parameter edges in u-space are linearly interpolated.
To see the interpolation quantiles for each axis, use::
steps = 10**-(1.0 * np.arange(1, 8, 2))
ulos, uhis, uinterpspace = compute_quantile_intervals_refined(steps, upoints, uweights)
Parameters
------------
param_names: list
parameter names
loglike: function
original likelihood function
transform: function
original prior transform function
upoints: array
Posterior samples (in u-space).
uweights: array
Weights of samples (needs to sum of 1)
vectorized: bool
whether the loglike & transform functions are vectorized
Returns
---------
aux_param_names: list
new parameter names (`param_names`) plus additional 'aux_logweight'
aux_loglike: function
auxiliary loglikelihood function.
aux_transform: function
auxiliary transform function.
Takes d u-space coordinates, and returns d + 1 p-space parameters.
The first d return coordinates are identical to what ``transform`` would return.
The final coordinate is the log of the correction weight.
vectorized: bool
whether the returned functions are vectorized
Usage
------
::
aux_loglikelihood, aux_transform = get_auxiliary_contbox_parameterization(
loglike, transform, auxiliary_usamples)
aux_sampler = ReactiveNestedSampler(parameters, aux_loglikelihood, transform=aux_transform, derived_param_names=['logweight'])
aux_results = aux_sampler.run()
posterior_samples = aux_results['samples'][:,-1]
"""
upoints = np.asarray(upoints)
assert upoints.ndim == 2, ('expected 2d array for upoints, got shape: %s' % upoints.shape)
mask = np.logical_and(upoints > 0, upoints < 1).all(axis=1)
assert np.all(mask), (
'upoints must be between 0 and 1, have:', upoints[~mask,:])
steps = 10**-(1.0 * np.arange(1, 8, 2))
nsamples, ndim = upoints.shape
assert nsamples > 10
ulos, uhis, uinterpspace = compute_quantile_intervals_refined(steps, upoints, uweights)
aux_param_names = param_names + ['aux_logweight']
def aux_transform(u):
ndim2, = u.shape
assert ndim2 == ndim + 1
umod = np.empty(ndim)
log_aux_volume_factors = 0
for i in range(ndim):
ulo_here = np.interp(u[-1], uinterpspace, ulos[:,i])
uhi_here = np.interp(u[-1], uinterpspace, uhis[:,i])
umod[i] = ulo_here + (uhi_here - ulo_here) * u[i]
log_aux_volume_factors += np.log(uhi_here - ulo_here)
return np.append(transform(umod), log_aux_volume_factors)
def aux_transform_vectorized(u):
nsamples, ndim2 = u.shape
assert ndim2 == ndim + 1
umod = np.empty((nsamples, ndim2 - 1))
log_aux_volume_factors = np.zeros((nsamples, 1))
for i in range(ndim):
ulo_here = np.interp(u[:,-1], uinterpspace, ulos[:,i])
uhi_here = np.interp(u[:,-1], uinterpspace, uhis[:,i])
umod[:,i] = ulo_here + (uhi_here - ulo_here) * u[:,i]
log_aux_volume_factors[:,0] += np.log(uhi_here - ulo_here)
return np.hstack((transform(umod), log_aux_volume_factors))
def aux_loglikelihood(x):
x_actual = x[:-1]
logl = loglike(x_actual)
aux_logweight = x[-1]
# downweight if we are in the auxiliary distribution
return logl + aux_logweight
def aux_loglikelihood_vectorized(x):
x_actual = x[:,:-1]
logl = loglike(x_actual)
aux_logweight = x[:,-1]
# downweight if we are in the auxiliary distribution
return logl + aux_logweight
if vectorized:
return aux_param_names, aux_loglikelihood_vectorized, aux_transform_vectorized, vectorized
else:
return aux_param_names, aux_loglikelihood, aux_transform, vectorized
def reuse_samples(
param_names, loglike, points, logl, logw=None,
logz=0.0, logzerr=0.0, upoints=None,
batchsize=128, vectorized=False, log_weight_threshold=-10,
**kwargs
):
"""
Reweight existing nested sampling run onto a new loglikelihood.
Parameters
------------
param_names: list of strings
Names of the parameters
loglike: function
New likelihood function
points: np.array of shape (npoints, ndim)
Equally weighted (unless logw is passed) posterior points
logl: np.array(npoints)
Previously likelihood values of points
logw: np.array(npoints)
Log-weights of existing points.
logz: float
Previous evidence / marginal likelihood value.
logzerr: float
Previous evidence / marginal likelihood uncertainty.
upoints: np.array of shape (npoints, ndim)
Posterior points before transformation.
vectorized: bool
Whether loglike function is vectorized
batchsize: int
Number of points simultaneously passed to vectorized loglike function
log_weight_threshold: float
Lowest log-weight to consider
Returns
---------
results: dict
All information of the run. Important keys:
Number of nested sampling iterations (niter),
Evidence estimate (logz),
Effective Sample Size (ess),
weighted samples (weighted_samples),
equally weighted samples (samples),
best-fit point information (maximum_likelihood),
posterior summaries (posterior).
"""
if not vectorized:
loglike = vectorize(loglike)
Npoints, ndim = points.shape
if logw is None:
# assume equally distributed if no weights given
logw = np.zeros(Npoints) - np.log(Npoints)
logl_new = np.zeros(Npoints) - np.inf
logw_new = np.zeros(Npoints) - np.inf
assert logl.shape == (Npoints,)
assert logw.shape == (Npoints,)
# process points, highest weight first:
indices = np.argsort(logl + logw)[::-1]
ncall = 0
for i in range(int(np.ceil(Npoints / batchsize))):
batch = indices[i * batchsize:(i + 1) * batchsize]
logl_new[batch] = loglike(points[batch,:])
logw_new[batch] = logw[batch] + logl_new[batch]
ncall += len(batch)
if (logw_new[batch] < np.nanmax(logw_new) - np.log(Npoints) + log_weight_threshold).all():
print("skipping", i)
break
logw_new0 = logw_new.max()
w = np.exp(logw_new - logw_new0)
print("weights:", w)
logz_new = np.log(w.sum()) + logw_new0
w /= w.sum()
ess = len(w) / (1.0 + ((len(w) * w - 1)**2).sum() / len(w))
integral_uncertainty_estimator = (((w - 1 / Npoints)**2).sum() / (Npoints - 1))**0.5
logzerr_new = np.log(1 + integral_uncertainty_estimator)
logzerr_new_total = (logzerr_new**2 + logzerr**2)**0.5
samples = resample_equal(points, w)
information_gain_bits = []
for i in range(ndim):
H, _ = np.histogram(points[:,i], weights=w, density=True, bins=np.linspace(0, 1, 40))
information_gain_bits.append(float((np.log2(1 / ((H + 0.001) * 40)) / 40).sum()))
j = logl_new.argmax()
return dict(
ncall=ncall,
niter=Npoints,
logz=logz_new, logzerr=logzerr_new_total,
ess=ess,
posterior=dict(
mean=samples.mean(axis=0).tolist(),
stdev=samples.std(axis=0).tolist(),
median=np.percentile(samples, 50, axis=0).tolist(),
errlo=np.percentile(samples, 15.8655, axis=0).tolist(),
errup=np.percentile(samples, 84.1345, axis=0).tolist(),
information_gain_bits=information_gain_bits,
),
weighted_samples=dict(
upoints=upoints, points=points, weights=w, logw=logw,
logl=logl_new),
samples=samples,
maximum_likelihood=dict(
logl=logl_new[j],
point=points[j,:].tolist(),
point_untransformed=upoints[j,:].tolist() if upoints is not None else None,
),
param_names=param_names,
)
| 21,414 | 36.243478 | 134 | py |
UltraNest | UltraNest-master/ultranest/pathsampler.py | """MCMC-like step sampling on a trajectory
These features are experimental.
"""
import numpy as np
import matplotlib.pyplot as plt
from ultranest.samplingpath import SamplingPath, ContourSamplingPath, extrapolate_ahead
from ultranest.stepsampler import StepSampler
from ultranest.stepsampler import generate_region_oriented_direction, generate_region_random_direction, generate_random_direction
from ultranest.flatnuts import ClockedStepSampler, ClockedBisectSampler, ClockedNUTSSampler
from ultranest.flatnuts import SingleJumper, DirectJumper, IntervalJumper
class SamplingPathSliceSampler(StepSampler):
"""Slice sampler, respecting the region, on the sampling path.
This first builds up a complete trajectory, respecting reflections.
Then, from the trajectory a new point is drawn with slice sampling.
The trajectory is built by doubling the length to each side and
checking if the point is still inside. If not, reflection is
attempted with the gradient (either provided or region-based estimate).
"""
def __init__(self, nsteps):
"""Initialise sampler.
Parameters
-----------
nsteps: int
number of accepted steps until the sample is considered independent.
"""
StepSampler.__init__(self, nsteps=nsteps)
self.interval = None
self.path = None
def generate_direction(self, ui, region, scale=1):
"""Choose new initial direction according to region.transformLayer axes."""
return generate_region_oriented_direction(ui, region, tscale=1, scale=scale)
def adjust_accept(self, accepted, unew, pnew, Lnew, nc):
"""Adjust proposal given that we have been *accepted* at a new point after *nc* calls."""
if accepted:
# start with a new interval next time
self.interval = None
self.last = unew, Lnew
self.history.append((unew, Lnew))
else:
self.nrejects += 1
# continue on current interval
pass
self.logstat.append([accepted, self.scale])
def adjust_outside_region(self):
"""Adjust proposal given that we have stepped out of region."""
self.logstat.append([False, self.scale])
def move(self, ui, region, ndraw=1, plot=False):
"""Advance by slice sampling on the path."""
if self.interval is None:
v = self.generate_direction(ui, region, scale=self.scale)
self.path = ContourSamplingPath(
SamplingPath(ui, v, 0.0), region)
if not (ui > 0).all() or not (ui < 1).all() or not region.inside(ui.reshape((1, -1))):
assert False, ui
# unit hypercube diagonal gives a reasonable maximum path length
maxlength = len(ui)**0.5
# expand direction until it is surely outside
left = -1
right = +1
while abs(left * self.scale) < maxlength:
xj, vj = self.path.extrapolate(left)
if not (xj > 0).all() or not (xj < 1).all() or not region.inside(xj.reshape((1, -1))):
break
# self.path.add(left, xj, vj, 0.0)
left *= 2
while abs(right * self.scale) < maxlength:
xj, _ = self.path.extrapolate(right)
if not (xj > 0).all() or not (xj < 1).all() or not region.inside(xj.reshape((1, -1))):
break
# self.path.add(right, xj, vj, 0.0)
right *= 2
scale = max(-left, right)
# print("scale %f gave %d %d " % (self.scale, left, right))
if scale < 5:
self.scale /= 1.1
# if scale > 100:
# self.scale *= 1.1
assert self.scale > 1e-10, self.scale
self.interval = (left, right, None)
else:
left, right, mid = self.interval
# we rejected mid, and shrink corresponding side
if mid < 0:
left = mid
elif mid > 0:
right = mid
# shrink direction if outside
while True:
mid = np.random.randint(left, right + 1)
# print("interpolating %d - %d - %d" % (left, mid, right),
# self.path.points)
if mid == 0:
_, xj, _, _ = self.path.points[0]
else:
xj, _ = self.path.extrapolate(mid)
if region.inside(xj.reshape((1, -1))):
self.interval = (left, right, mid)
return xj.reshape((1, -1))
else:
if mid < 0:
left = mid
else:
right = mid
self.interval = (left, right, mid)
class SamplingPathStepSampler(StepSampler):
"""Step sampler on a sampling path."""
def __init__(self, nresets, nsteps, scale=1.0, balance=0.01, nudge=1.1, log=False):
"""Initialise sampler.
Parameters
------------
nresets: int
after this many iterations, select a new direction
nsteps: int
how many steps to make in total
scale: float
initial step size
balance: float
acceptance rate to target
if below, scale is increased, if above, scale is decreased
nudge: float
factor for increasing scale (must be >=1)
nudge=1 implies no step size adaptation.
"""
StepSampler.__init__(self, nsteps=nsteps)
# self.lasti = None
self.path = None
self.nresets = nresets
# initial step scale in transformed space
self.scale = scale
# fraction of times a reject is expected
self.balance = balance
# relative increase in step scale
self.nudge = nudge
assert nudge >= 1
self.log = log
self.grad_function = None
self.istep = 0
self.iresets = 0
self.start()
self.terminate_path()
self.logstat_labels = ['acceptance rate', 'reflection rate', 'scale', 'nstuck']
def __str__(self):
"""Get string representation."""
return '%s(nsteps=%d, nresets=%d, AR=%d%%)' % (
type(self).__name__, self.nsteps, self.nresets, (1 - self.balance) * 100)
def start(self):
"""Start sampler, reset all counters."""
if hasattr(self, 'naccepts') and self.nrejects + self.naccepts > 0:
self.logstat.append([
self.naccepts / (self.nrejects + self.naccepts),
self.nreflects / (self.nreflects + self.nrejects + self.naccepts),
self.scale, self.nstuck])
self.nrejects = 0
self.naccepts = 0
self.nreflects = 0
self.nstuck = 0
self.istep = 0
self.iresets = 0
self.noutside_regions = 0
self.last = None, None
self.history = []
self.direction = +1
self.deadends = set()
self.path = None
def start_path(self, ui, region):
"""Start new trajectory path."""
# print("new direction:", self.scale, self.noutside_regions, self.nrejects, self.naccepts)
v = self.generate_direction(ui, region, scale=self.scale)
assert (v**2).sum() > 0, (v, self.scale)
assert region.inside(ui.reshape((1, -1))).all(), ui
self.path = ContourSamplingPath(SamplingPath(ui, v, 0.0), region)
if self.grad_function is not None:
self.path.gradient = self.grad_function
if not (ui > 0).all() or not (ui < 1).all() or not region.inside(ui.reshape((1, -1))):
assert False, ui
self.direction = +1
self.lasti = 0
self.cache = {0: (True, ui, self.last[1])}
self.deadends = set()
# self.iresets += 1
if self.log:
print()
print("starting new direction", v, 'from', ui)
def terminate_path(self):
"""Terminate current path, and reset path counting variable."""
# check if we went anywhere:
if -1 in self.deadends and +1 in self.deadends:
# self.scale /= self.nudge
self.nstuck += 1
# self.nrejects = 0
# self.naccepts = 0
# self.istep = 0
# self.noutside_regions = 0
self.direction = +1
self.deadends = set()
self.path = None
self.iresets += 1
if self.log:
print("reset %d" % self.iresets)
def set_gradient(self, grad_function):
"""Set gradient function."""
print("set gradient function to %s" % grad_function.__name__)
def plot_gradient_wrapper(x, plot=False):
"""wrapper that makes plots (when desired)"""
v = grad_function(x)
if plot:
plt.plot(x[0], x[1], '+ ', color='k', ms=10)
plt.plot([x[0], v[0] * 1e-2 + x[0]],
[x[1], v[1] * 1e-2 + x[1]], color='gray')
return v
self.grad_function = plot_gradient_wrapper
def generate_direction(self, ui, region, scale):
"""Choose a random axis from region.transformLayer."""
return generate_region_random_direction(ui, region, scale=scale)
# return generate_random_direction(ui, region, scale=scale)
def adjust_accept(self, accepted, unew, pnew, Lnew, nc):
"""Adjust proposal given that we have been *accepted* at a new point after *nc* calls."""
self.cache[self.nexti] = (accepted, unew, Lnew)
if accepted:
# start at new point next time
self.lasti = self.nexti
self.last = unew, Lnew
self.history.append((unew, Lnew))
self.naccepts += 1
else:
# continue on current point, do not update self.last
self.nrejects += 1
self.history.append((unew, Lnew))
assert self.scale > 1e-10, (self.scale, self.istep, self.nrejects)
def adjust_outside_region(self):
"""Adjust proposal given that we landed outside region."""
self.noutside_regions += 1
self.nrejects += 1
def adjust_scale(self, maxlength):
"""Adjust scale, but not above maxlength."""
# print("%2d | %2d | %2d | %2d %2d %2d %2d | %f" % (self.iresets, self.istep,
# len(self.history), self.naccepts, self.nrejects,
# self.noutside_regions, self.nstuck, self.scale))
assert len(self.history) > 1
if self.naccepts < (self.nrejects + self.naccepts) * self.balance:
if self.log:
print("adjusting scale %f down: istep=%d inside=%d outside=%d region=%d nstuck=%d" % (
self.scale, len(self.history), self.naccepts, self.nrejects, self.noutside_regions, self.nstuck))
self.scale /= self.nudge
else:
if self.scale < maxlength or True:
if self.log:
print("adjusting scale %f up: istep=%d inside=%d outside=%d region=%d nstuck=%d" % (
self.scale, len(self.history), self.naccepts, self.nrejects, self.noutside_regions, self.nstuck))
self.scale *= self.nudge
assert self.scale > 1e-10, self.scale
def movei(self, ui, region, ndraw=1, plot=False):
"""Make a move and return the proposed index."""
if self.path is not None:
if self.lasti - 1 in self.deadends and self.lasti + 1 in self.deadends:
# stuck, cannot go anywhere. Stay.
self.nexti = self.lasti
return self.nexti
if self.path is None:
self.start_path(ui, region)
assert not (self.lasti - 1 in self.deadends and self.lasti + 1 in self.deadends), \
(self.deadends, self.lasti)
if self.lasti + self.direction in self.deadends:
self.direction *= -1
self.nexti = self.lasti + self.direction
# print("movei", self.nexti)
# self.nexti = self.lasti + np.random.randint(0, 2) * 2 - 1
return self.nexti
def move(self, ui, region, ndraw=1, plot=False):
"""Advance move."""
u, v = self.get_point(self.movei(ui, region=region, ndraw=ndraw, plot=plot))
return u.reshape((1, -1))
def reflect(self, reflpoint, v, region, plot=False):
"""Reflect at *reflpoint* going in direction *v*. Return new direction."""
normal = self.path.gradient(reflpoint, plot=plot)
if normal is None:
return -v
return v - 2 * (normal * v).sum() * normal
def get_point(self, inew):
"""Get point corresponding to index *inew*."""
ipoints = [(u, v) for i, u, p, v in self.path.points if i == inew]
if len(ipoints) == 0:
# print("getting point %d" % inew, self.path.points) #, "->", self.path.extrapolate(self.nexti))
return self.path.extrapolate(inew)
else:
return ipoints[0]
def __next__(self, region, Lmin, us, Ls, transform, loglike, ndraw=40, plot=False):
"""Get next point.
Parameters
----------
region: MLFriends
region.
Lmin: float
loglikelihood threshold
us: array of vectors
current live points
Ls: array of floats
current live point likelihoods
transform: function
transform function
loglike: function
loglikelihood function
ndraw: int
number of draws to attempt simultaneously.
plot: bool
whether to produce debug plots.
"""
# find most recent point in history conforming to current Lmin
ui, Li = self.last
if Li is not None and not Li >= Lmin:
if self.log:
print("wandered out of L constraint; resetting", ui[0])
ui, Li = None, None
if Li is not None and not region.inside(ui.reshape((1,-1))):
# region was updated and we are not inside anymore
# so reset
if self.log:
print("region change; resetting")
ui, Li = None, None
if Li is None and self.history:
# try to resume from a previous point above the current contour
for uj, Lj in self.history[::-1]:
if Lj >= Lmin and region.inside(uj.reshape((1,-1))):
ui, Li = uj, Lj
if self.log:
print("recovered using history", ui)
break
# select starting point
if Li is None:
# choose a new random starting point
mask = region.inside(us)
assert mask.any(), (
"None of the live points satisfies the current region!",
region.maxradiussq, region.u, region.unormed, us)
i = np.random.randint(mask.sum())
self.starti = i
ui = us[mask,:][i]
if self.log:
print("starting at", ui)
assert np.logical_and(ui > 0, ui < 1).all(), ui
Li = Ls[mask][i]
self.start()
self.history.append((ui, Li))
self.last = (ui, Li)
inew = self.movei(ui, region, ndraw=ndraw)
if self.log:
print("i: %d->%d (step %d)" % (self.lasti, inew, self.istep))
# uold, _ = self.get_point(self.lasti)
_, uold, Lold = self.cache[self.lasti]
if plot:
plt.plot(uold[0], uold[1], 'd', color='brown', ms=4)
uret, pret, Lret = uold, transform(uold), Lold
nc = 0
if inew != self.lasti:
accept = False
if inew not in self.cache:
unew, _ = self.get_point(inew)
if plot:
plt.plot(unew[0], unew[1], 'x', color='k', ms=4)
accept = np.logical_and(unew > 0, unew < 1).all() and region.inside(unew.reshape((1, -1)))
if accept:
if plot:
plt.plot(unew[0], unew[1], '+', color='orange', ms=4)
pnew = transform(unew)
Lnew = loglike(pnew.reshape((1, -1)))
nc = 1
else:
Lnew = -np.inf
if self.log:
print("outside region: ", unew, "from", ui)
self.deadends.add(inew)
self.adjust_outside_region()
else:
_, unew, Lnew = self.cache[self.nexti]
# if plot:
# plt.plot(unew[0], unew[1], 's', color='r', ms=2)
if self.log:
print(" suggested point:", unew)
pnew = transform(unew)
if Lnew >= Lmin:
if self.log:
print(" -> inside.")
if plot:
plt.plot(unew[0], unew[1], 'o', color='g', ms=4)
self.adjust_accept(True, unew, pnew, Lnew, nc)
uret, pret, Lret = unew, pnew, Lnew
else:
if plot:
plt.plot(unew[0], unew[1], '+', color='k', ms=2, alpha=0.3)
if self.log:
print(" -> outside.")
jump_successful = False
if inew not in self.cache and inew not in self.deadends:
# first time we try to go beyond
# try to reflect:
reflpoint, v = self.get_point(inew)
if self.log:
print(" trying to reflect at", reflpoint)
self.nreflects += 1
sign = -1 if inew < 0 else +1
vnew = self.reflect(reflpoint, v * sign, region=region) * sign
xk, vk = extrapolate_ahead(sign, reflpoint, vnew, contourpath=self.path)
if plot:
plt.plot([reflpoint[0], (-v + reflpoint)[0]], [reflpoint[1], (-v + reflpoint)[1]], '-', color='k', lw=0.5, alpha=0.5)
plt.plot([reflpoint[0], (vnew + reflpoint)[0]], [reflpoint[1], (vnew + reflpoint)[1]], '-', color='k', lw=1)
if self.log:
print(" trying", xk)
accept = np.logical_and(xk > 0, xk < 1).all() and region.inside(xk.reshape((1, -1)))
if accept:
pk = transform(xk)
Lk = loglike(pk.reshape((1, -1)))[0]
nc += 1
if Lk >= Lmin:
jump_successful = True
uret, pret, Lret = xk, pk, Lk
if self.log:
print("successful reflect!")
self.path.add(inew, xk, vk, Lk)
self.adjust_accept(True, xk, pk, Lk, nc)
else:
if self.log:
print("unsuccessful reflect")
self.adjust_accept(False, xk, pk, Lk, nc)
else:
if self.log:
print("unsuccessful reflect out of region")
self.adjust_outside_region()
if plot:
plt.plot(xk[0], xk[1], 'x', color='g' if jump_successful else 'r', ms=8)
if not jump_successful:
# unsuccessful. mark as deadend
self.deadends.add(inew)
# print("deadends:", self.deadends)
else:
self.adjust_accept(False, uret, pret, Lret, nc)
# self.adjust_accept(False, unew, pnew, Lnew, nc)
assert inew in self.cache or inew in self.deadends, (inew in self.cache, inew in self.deadends)
else:
# stuck, proposal did not move us
self.nstuck += 1
self.adjust_accept(False, uret, pret, Lret, nc)
# increase step count
self.istep += 1
if self.istep == self.nsteps:
if self.log:
print("triggering re-orientation")
# reset path so we go in a new direction
self.terminate_path()
self.istep = 0
# if had enough resets, return final point
if self.iresets >= self.nresets:
if self.log:
print("walked %d paths; returning sample" % self.iresets)
self.adjust_scale(maxlength=len(uret)**0.5)
self.start()
self.last = None, None
return uret, pret, Lret, nc
# do not have a independent sample yet
return None, None, None, nc
class OtherSamplerProxy(object):
"""Proxy for ClockedSamplers."""
def __init__(self, nnewdirections, sampler='steps', nsteps=0,
balance=0.9, scale=0.1, nudge=1.1, log=False):
"""Initialise sampler.
Parameters
-----------
nnewdirections: int
number of accepted steps until the sample is considered independent.
sampler: str
which sampler to use
nsteps:
number of steps in sampler
balance:
acceptance rate to target
scale:
initial proposal scale
nudge:
adjustment factor for scale when acceptance rate is too low or high.
must be >=1.
"""
self.nsteps = nsteps
self.samplername = sampler
self.sampler = None
self.scale = scale
self.nudge = nudge
self.balance = balance
self.log = log
self.last = None, None
self.ncalls = 0
self.nnewdirections = nnewdirections
self.nreflections = 0
self.nreverses = 0
self.nsteps_done = 0
self.naccepts = 0
self.nrejects = 0
self.logstat = []
self.logstat_labels = ['accepted', 'scale']
def __str__(self):
"""Get string representation."""
return 'Proxy[%s](%dx%d steps, AR=%d%%)' % (
self.samplername, self.nnewdirections, self.nsteps, self.balance * 100)
def accumulate_statistics(self):
"""Accumulate statistics at end of step sequence."""
self.nreflections += self.sampler.nreflections
self.nreverses += self.sampler.nreverses
points = self.sampler.points
# range
ilo, _, _, _ = min(points)
ihi, _, _, _ = max(points)
self.nsteps_done += ihi - ilo
self.naccepts += self.stepper.naccepts
self.nrejects += self.stepper.nrejects
if self.log:
print("%2d direction encountered %2d accepts, %2d rejects" % (
self.nrestarts, self.stepper.naccepts, self.stepper.nrejects))
def adjust_scale(self, maxlength):
"""Adjust proposal scale, but not above maxlength."""
log = self.log
if log:
print("%2d | %2d %2d %2d | %f" % (self.nrestarts,
self.naccepts, self.nrejects, self.nreflections, self.scale))
self.logstat.append([self.naccepts / (self.naccepts + self.nrejects), self.scale])
if self.naccepts < (self.nrejects + self.naccepts) * self.balance:
if log:
print("adjusting scale %f down" % self.scale)
self.scale /= self.nudge
else:
if self.scale < maxlength or True:
if log:
print("adjusting scale %f up" % self.scale)
self.scale *= self.nudge
assert self.scale > 1e-10, self.scale
def startup(self, region, us, Ls):
"""Choose a new random starting point."""
if self.log:
print("starting from scratch...")
mask = region.inside(us)
assert mask.any(), (
"Not all of the live points satisfy the current region!",
region.maxradiussq, region.u[~mask,:], region.unormed[~mask,:], us[~mask,:])
i = np.random.randint(mask.sum())
self.starti = i
ui = us[mask,:][i]
assert np.logical_and(ui > 0, ui < 1).all(), ui
Li = Ls[mask][i]
self.last = ui, Li
self.ncalls = 0
self.nrestarts = 0
self.nreflections = 0
self.nreverses = 0
self.nsteps_done = 0
self.naccepts = 0
self.nrejects = 0
self.sampler = None
self.stepper = None
def start_direction(self, region):
"""Choose a new random direction."""
if self.log:
print("choosing random direction")
ui, Li = self.last
v = generate_random_direction(ui, region, scale=self.scale)
# v = generate_region_random_direction(ui, region, scale=self.scale)
self.nrestarts += 1
if self.sampler is None or True:
samplingpath = SamplingPath(ui, v, Li)
contourpath = ContourSamplingPath(samplingpath, region)
if self.samplername == 'steps':
self.sampler = ClockedStepSampler(contourpath, log=self.log)
self.stepper = DirectJumper(self.sampler, self.nsteps, log=self.log)
elif self.samplername == 'bisect':
self.sampler = ClockedBisectSampler(contourpath, log=self.log)
self.stepper = DirectJumper(self.sampler, self.nsteps, log=self.log)
elif self.samplername == 'nuts':
self.sampler = ClockedNUTSSampler(contourpath, log=self.log)
self.stepper = IntervalJumper(self.sampler, self.nsteps, log=self.log)
else:
assert False
def __next__(self, region, Lmin, us, Ls, transform, loglike, ndraw=40, plot=False):
"""Get next point.
Parameters
----------
region: MLFriends
region.
Lmin: float
loglikelihood threshold
us: array of vectors
current live points
Ls: array of floats
current live point likelihoods
transform: function
transform function
loglike: function
loglikelihood function
ndraw: int
number of draws to attempt simultaneously.
plot: bool
whether to produce debug plots.
"""
# find most recent point in history conforming to current Lmin
ui, Li = self.last
if Li is not None and not Li >= Lmin:
# print("wandered out of L constraint; resetting", ui[0])
ui, Li = None, None
if Li is not None and not region.inside(ui.reshape((1,-1))):
# region was updated and we are not inside anymore
# so reset
ui, Li = None, None
if Li is None:
self.startup(region, us, Ls)
if self.sampler is None:
self.start_direction(region)
self.stepper.prepare_jump()
Llast = None
gaps = {}
while True:
if not self.sampler.is_done():
u, is_independent = self.sampler.next(Llast=Llast)
if not is_independent and u is not None:
# should evaluate point
Llast = None
if region.inside(u.reshape((1,-1))):
p = transform(u.reshape((1, -1)))
L = loglike(p)[0]
self.ncalls += 1
if L > Lmin:
Llast = L
else:
Llast = None
else:
u, i = self.stepper.check_gaps(gaps)
if u is None:
unew, Lnew = self.stepper.make_jump(gaps)
break # done!
# check that u is allowed:
assert i not in gaps
gaps[i] = True
if region.inside(u.reshape((1,-1))):
p = transform(u.reshape((1, -1)))
L = loglike(p)[0]
self.ncalls += 1
if L > Lmin:
# point is OK
gaps[i] = False
unew, Lnew = u, L
break
# if self.log: print("after %d calls, jumped to" % self.ncalls, unew)
assert np.isfinite(unew).all(), unew
assert np.isfinite(Lnew).all(), Lnew
self.accumulate_statistics()
# forget sampler
self.last = unew, Lnew
self.sampler = None
self.stepper = None
# done, reset:
# print("got a sample:", unew)
if self.nrestarts >= self.nnewdirections:
xnew = transform(unew)
self.adjust_scale(maxlength=len(unew)**0.5)
# forget as starting point
self.last = None, None
self.nrestarts = 0
return unew, xnew, Lnew, self.ncalls
else:
return None, None, None, 0
def plot(self, filename):
"""Plot sampler statistics."""
if len(self.logstat) == 0:
return
parts = np.transpose(self.logstat)
plt.figure(figsize=(10, 1 + 3 * len(parts)))
for i, (label, part) in enumerate(zip(self.logstat_labels, parts)):
plt.subplot(len(parts), 1, 1 + i)
plt.ylabel(label)
plt.plot(part)
if np.min(part) > 0:
plt.yscale('log')
plt.savefig(filename, bbox_inches='tight')
plt.close()
| 29,581 | 36.732143 | 141 | py |
UltraNest | UltraNest-master/tests/test_netiterintegrate.py | from __future__ import print_function, division
import os
import numpy as np
from ultranest.store import TextPointStore
from ultranest.netiter import PointPile, TreeNode, count_tree, print_tree, dump_tree
from ultranest.netiter import SingleCounter, MultiCounter, BreadthFirstIterator
def integrate_singleblock(num_live_points, pointstore, x_dim, num_params, dlogz=0.5):
active_u = []
active_v = []
active_logl = []
for i in range(num_live_points):
idx, row = pointstore.pop(-np.inf)
assert row is not None
active_u.append(row[2:2+x_dim])
active_v.append(row[2+x_dim:2+x_dim+num_params])
active_logl.append(row[1])
saved_v = [] # Stored points for posterior results
saved_logl = []
saved_logwt = []
h = 0.0 # Information, initially 0.
logz = -1e300 # ln(Evidence Z), initially Z=0
logvol = 0
logvolf = np.log1p(- np.exp(-1.0 / num_live_points))
#fraction_remain = 1.0
max_iters = 10000000
for it in range(0, max_iters):
# Worst object in collection and its weight (= volume * likelihood)
worst = np.argmin(active_logl)
logwt = logvol + logvolf + active_logl[worst]
# Update evidence Z and information h.
logz_new = np.logaddexp(logz, logwt)
h = (np.exp(logwt - logz_new) * active_logl[worst] + np.exp(logz - logz_new) * (h + logz) - logz_new)
logz = logz_new
logz_remain = np.max(active_logl) - it / num_live_points
#print("L=%.1f N=%d V=%.2e logw=%.2e logZ=%.1f logZremain=%.1f" % (active_logl[worst], num_live_points, logvol, logwt, logz, logz_remain))
# Shrink interval
logvol -= 1.0 / num_live_points
# Add worst object to samples.
saved_v.append(np.array(active_v[worst]))
saved_logwt.append(logwt)
saved_logl.append(active_logl[worst])
# The new likelihood constraint is that of the worst object.
loglstar = active_logl[worst]
idx, row = pointstore.pop(loglstar)
assert row is not None
u = row[2:2+x_dim]
v = row[2+x_dim:2+x_dim+num_params]
logl = row[1]
active_u[worst] = u
active_v[worst] = v
active_logl[worst] = logl
#fraction_remain = np.logaddexp(logz, logz_remain) - logz
# Stopping criterion
if logz_remain < logz:
break
logvol = -len(saved_v) / num_live_points
for i in np.argsort(active_logl):
logwt = logvol - np.log(num_live_points) + active_logl[i]
logz_new = np.logaddexp(logz, logwt)
h = (np.exp(logwt - logz_new) * active_logl[i] + np.exp(logz - logz_new) * (h + logz) - logz_new)
logz = logz_new
#print("L=%.1f N=%d V=%.2e logw=%.2e logZ=%.1f" % (active_logl[i], num_live_points, logvol, logwt, logz))
saved_v.append(np.array(active_v[i]))
saved_logwt.append(logwt)
saved_logl.append(active_logl[i])
saved_v = np.array(saved_v)
saved_wt = np.exp(np.array(saved_logwt) - logz)
saved_logl = np.array(saved_logl)
logzerr = np.sqrt(h / num_live_points)
results = dict(niter=it, logz=logz, logzerr=logzerr,
weighted_samples=dict(v=saved_v, w = saved_wt, logw = saved_logwt, L=saved_logl),
)
return results
def strategy_advice(node, parallel_values, main_iterator, counting_iterators, rootid):
if len(node.children) > 0:
# we don't expand if node already has children
print("not expanding, already has children")
assert False
return np.nan, np.nan
Lmin = parallel_values.min()
Lmax = parallel_values.max()
logZremain = main_iterator.logZremain
# if the remainder dominates, return that range
if logZremain > main_iterator.logZ:
return Lmin, Lmax
#print("not expanding, remainder not dominant")
return np.nan, np.nan
class __Point(object):
def __init__(self, u, p):
self.u = u
self.p = p
def integrate_graph_singleblock(num_live_points, pointstore, x_dim, num_params, dlogz=0.5):
pp = PointPile(x_dim, num_params)
def create_node(pointstore, Lmin):
idx, row = pointstore.pop(Lmin)
assert row is not None
L = row[1]
u = row[2:2+x_dim]
p = row[2+x_dim:2+x_dim+num_params]
assert np.isfinite(L)
return pp.make_node(L, u, p)
# we create a bunch of live points from the prior volume
# each of which is the start of a chord (in the simplest case)
roots = [create_node(pointstore, -np.inf) for i in range(num_live_points)]
iterator_roots = []
np.random.seed(1)
for i in range(10):
# boot-strap which roots are assigned to this iterator
rootids = np.unique(np.random.randint(len(roots), size=len(roots)))
#print(rootids)
iterator_roots.append((SingleCounter(random=True), rootids))
# and we have one that operators on the entire tree
main_iterator = SingleCounter()
main_iterator.Lmax = max(n.value for n in roots)
assert np.isfinite(main_iterator.Lmax)
explorer = BreadthFirstIterator(roots)
Llo, Lhi = -np.inf, np.inf
strategy_stale = True
saved_nodeids = []
saved_logl = []
# we go through each live point (regardless of root) by likelihood value
while True:
#print()
next = explorer.next_node()
if next is None:
break
rootid, node, (active_nodes, active_rootids, active_values, active_node_ids) = next
# this is the likelihood level we have to improve upon
Lmin = node.value
saved_nodeids.append(node.id)
saved_logl.append(Lmin)
expand_node = Lmin <= Lhi and Llo <= Lhi
# if within suggested range, expand
if strategy_stale or not (Lmin <= Lhi):
# check with advisor if we want to expand this node
Llo, Lhi = strategy_advice(node, active_values, main_iterator, [], rootid)
#print("L range to expand:", Llo, Lhi, "have:", Lmin, "=>", Lmin <= Lhi, Llo <= Lhi)
strategy_stale = False
strategy_stale = True
if expand_node:
# sample a new point above Lmin
#print("replacing node", Lmin, "from", rootid, "with", L)
node.children.append(create_node(pointstore, Lmin))
main_iterator.Lmax = max(main_iterator.Lmax, node.children[0].value)
else:
#print("ending node", Lmin)
pass
# inform iterators (if it is their business) about the arc
main_iterator.passing_node(node, active_values)
for it, rootids in iterator_roots:
if rootid in rootids:
mask = np.in1d(active_rootids, rootids, assume_unique=True)
#mask1 = np.array([rootid2 in rootids for rootid2 in active_rootids])
#assert (mask1 == mask).all(), (mask1, mask)
it.passing_node(node, active_values[mask])
#print([it.H for it,_ in iterator_roots])
explorer.expand_children_of(rootid, node)
# points with weights
#saved_u = np.array([pp[nodeid].u for nodeid in saved_nodeids])
saved_v = pp.getp(saved_nodeids)
saved_logwt = np.array(main_iterator.logweights)
saved_wt = np.exp(saved_logwt - main_iterator.logZ)
saved_logl = np.array(saved_logl)
print('%.4f +- %.4f (main)' % (main_iterator.logZ, main_iterator.logZerr))
Zest = np.array([it.logZ for it, _ in iterator_roots])
print('%.4f +- %.4f (bs)' % (Zest.mean(), Zest.std()))
results = dict(niter=len(saved_logwt),
logz=main_iterator.logZ, logzerr=main_iterator.logZerr,
weighted_samples=dict(v=saved_v, w = saved_wt, logw = saved_logwt, L=saved_logl),
tree=TreeNode(-np.inf, children=roots),
)
# return entire tree
return results
def multi_integrate_graph_singleblock(num_live_points, pointstore, x_dim, num_params, dlogz=0.5, withtests=False):
pp = PointPile(x_dim, num_params)
def create_node(pointstore, Lmin):
idx, row = pointstore.pop(Lmin)
assert row is not None
L = row[1]
u = row[2:2+x_dim]
p = row[2+x_dim:2+x_dim+num_params]
return pp.make_node(L, u, p)
# we create a bunch of live points from the prior volume
# each of which is the start of a chord (in the simplest case)
roots = [create_node(pointstore, -np.inf) for i in range(num_live_points)]
# and we have one that operators on the entire tree
main_iterator = MultiCounter(nroots=len(roots), nbootstraps=10, random=True, check_insertion_order=withtests)
main_iterator.Lmax = max(n.value for n in roots)
explorer = BreadthFirstIterator(roots)
Llo, Lhi = -np.inf, np.inf
strategy_stale = True
saved_nodeids = []
saved_logl = []
# we go through each live point (regardless of root) by likelihood value
while True:
#print()
next = explorer.next_node()
if next is None:
break
rootid, node, (active_nodes, active_rootids, active_values, active_nodeids) = next
assert not isinstance(rootid, float)
# this is the likelihood level we have to improve upon
Lmin = node.value
saved_nodeids.append(node.id)
saved_logl.append(Lmin)
expand_node = Lmin <= Lhi and Llo <= Lhi
# if within suggested range, expand
if strategy_stale or not (Lmin <= Lhi):
# check with advisor if we want to expand this node
Llo, Lhi = strategy_advice(node, active_values, main_iterator, [], rootid)
#print("L range to expand:", Llo, Lhi, "have:", Lmin, "=>", Lmin <= Lhi, Llo <= Lhi)
strategy_stale = False
strategy_stale = True
if expand_node:
# sample a new point above Lmin
node.children.append(create_node(pointstore, Lmin))
main_iterator.Lmax = max(main_iterator.Lmax, node.children[0].value)
else:
#print("ending node", Lmin)
pass
# inform iterators (if it is their business) about the arc
assert not isinstance(rootid, float)
main_iterator.passing_node(rootid, node, active_rootids, active_values)
explorer.expand_children_of(rootid, node)
print('tree size:', count_tree(roots))
# points with weights
#saved_u = pp.getu(saved_nodeids)
saved_v = pp.getp(saved_nodeids)
saved_logwt = np.array(main_iterator.logweights)
saved_wt = np.exp(saved_logwt - main_iterator.logZ)
saved_logl = np.array(saved_logl)
print('%.4f +- %.4f (main)' % (main_iterator.logZ, main_iterator.logZerr))
print('%.4f +- %.4f (bs)' % (main_iterator.all_logZ[1:].mean(), main_iterator.all_logZ[1:].std()))
if withtests:
print("insertion order:", float(main_iterator.insertion_order_runlength))
results = dict(niter=len(saved_logwt),
logz=main_iterator.logZ, logzerr=main_iterator.logZerr,
weighted_samples=dict(v=saved_v, w = saved_wt, logw = saved_logwt, L=saved_logl),
tree=TreeNode(-np.inf, children=roots),
)
# return entire tree
return results
testfile = os.path.join(os.path.dirname(__file__), 'eggboxpoints.tsv')
import time
import pytest
@pytest.mark.parametrize("nlive", [100])
def test_singleblock(nlive):
assert os.path.exists(testfile), ("%s does not exist" % testfile)
print("="*80)
print("NLIVE=%d " % nlive)
print("Standard integrator")
pointstore = TextPointStore(testfile, 2 + 2 + 2)
t = time.time()
result = integrate_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2)
print(' %(logz).1f +- %(logzerr).1f in %(niter)d iter' % result, '%.2fs' % (time.time() - t))
pointstore.close()
print("Graph integrator")
pointstore = TextPointStore(testfile, 2 + 2 + 2)
t = time.time()
result2 = integrate_graph_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2)
print(' %(logz).1f +- %(logzerr).1f in %(niter)d iter' % result2, '%.2fs' % (time.time() - t))
pointstore.close()
assert np.isclose(result2['logz'], result['logz'])
print("Vectorized graph integrator")
pointstore = TextPointStore(testfile, 2 + 2 + 2)
t = time.time()
result3 = multi_integrate_graph_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2)
print(' %(logz).1f +- %(logzerr).1f in %(niter)d iter' % result3, '%.2fs' % (time.time() - t))
pointstore.close()
assert np.isclose(result3['logz'], result['logz'])
print("Vectorized graph integrator with insertion order test")
pointstore = TextPointStore(testfile, 2 + 2 + 2)
t = time.time()
result3 = multi_integrate_graph_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2, withtests=True)
print(' %(logz).1f +- %(logzerr).1f in %(niter)d iter' % result3, '%.2fs' % (time.time() - t))
pointstore.close()
assert np.isclose(result3['logz'], result['logz'])
def test_visualisation():
print("testing tree visualisation...")
pp = PointPile(1, 1)
tree = TreeNode()
for i in range(5):
j = np.random.randint(1000)
node = pp.make_node(j, np.array([j]), np.array([j]))
for k in range(i):
j = np.random.randint(1000)
node2 = pp.make_node(j, [j], [j])
node.children.append(node2)
tree.children.append(node)
print(tree)
print_tree(tree.children, title='Empty Tree')
def test_treedump():
print("testing tree dumping...")
pp = PointPile(1, 1)
tree = TreeNode()
for i in range(5):
j = np.random.randint(1000)
node = pp.make_node(j, np.array([j]), np.array([j]))
for k in range(i):
j = np.random.randint(1000)
node2 = pp.make_node(j, [j], [j])
node.children.append(node2)
tree.children.append(node)
dump_tree("test_tree.hdf5", tree.children, pp)
os.remove("test_tree.hdf5")
dump_tree("test_tree.hdf5", roots=tree.children, pointpile=pp)
dump_tree("test_tree.hdf5", tree.children, pp)
os.remove("test_tree.hdf5")
if __name__ == '__main__':
for nlive in [100, 400, 2000]:
test_singleblock(nlive)
#pointstore = TextPointStore(testfile, 2 + 2 + 2)
#nlive = 400
#multi_integrate_graph_singleblock(num_live_points=nlive, pointstore=pointstore, num_params=2, x_dim=2)
#pointstore.close()
| 13,039 | 32.782383 | 140 | py |
UltraNest | UltraNest-master/tests/test_popstepsampling.py | import numpy as np
from ultranest import ReactiveNestedSampler
from ultranest.popstepsampler import PopulationSliceSampler, PopulationRandomWalkSampler
from ultranest.popstepsampler import generate_cube_oriented_direction, generate_random_direction, generate_cube_oriented_direction_scaled
from ultranest.popstepsampler import generate_region_oriented_direction, generate_region_random_direction
def loglike_vectorized(z):
a = np.array([-0.5 * sum([((xi - 0.7 + i*0.001)/0.1)**2 for i, xi in enumerate(x)]) for x in z])
b = np.array([-0.5 * sum([((xi - 0.3 - i*0.001)/0.1)**2 for i, xi in enumerate(x)]) for x in z])
return np.logaddexp(a, b)
def loglike(x):
a = -0.5 * sum([((xi - 0.7 + i*0.001)/0.1)**2 for i, xi in enumerate(x)])
b = -0.5 * sum([((xi - 0.3 - i*0.001)/0.1)**2 for i, xi in enumerate(x)])
return np.logaddexp(a, b)
def transform(x):
return x # * 10. - 5.
paramnames = ['param%d' % i for i in range(3)]
def test_stepsampler_cubeslice(plot=False):
np.random.seed(3)
nsteps = np.random.randint(10, 50)
popsize = np.random.randint(1, 20)
sampler = ReactiveNestedSampler(paramnames, loglike_vectorized, transform=transform, vectorized=True)
sampler.stepsampler = PopulationSliceSampler(
popsize=popsize, nsteps=nsteps,
generate_direction=generate_cube_oriented_direction,
)
r = sampler.run(viz_callback=None, log_interval=50)
sampler.print_results()
a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
assert a.sum() > 1
assert b.sum() > 1
def test_stepsampler_cubegausswalk(plot=False):
np.random.seed(2)
nsteps = np.random.randint(10, 50)
popsize = np.random.randint(1, 20)
sampler = ReactiveNestedSampler(paramnames, loglike_vectorized, transform=transform, vectorized=True)
sampler.stepsampler = PopulationRandomWalkSampler(
popsize=popsize, nsteps=nsteps,
generate_direction=generate_cube_oriented_direction,
scale=0.1,
)
r = sampler.run(viz_callback=None, log_interval=50, max_iters=200, max_num_improvement_loops=0)
sampler.print_results()
a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
assert a.sum() > 1
assert b.sum() > 1
from ultranest.mlfriends import AffineLayer, ScalingLayer, MLFriends, RobustEllipsoidRegion, SimpleRegion
def test_direction_proposals():
proposals = [generate_cube_oriented_direction, generate_random_direction,
generate_region_oriented_direction, generate_region_random_direction]
points = np.random.uniform(size=(100, 10))
minvol = 1.0
scale = 1. # np.random.uniform()
for layer in AffineLayer, ScalingLayer:
transformLayer = layer()
transformLayer.optimize(points, points)
for region_class in MLFriends, RobustEllipsoidRegion, SimpleRegion:
region = region_class(points, transformLayer)
r, f = region.compute_enlargement(minvol=minvol, nbootstraps=30)
region.maxradiussq = r
region.enlarge = f
region.create_ellipsoid(minvol=minvol)
for prop in proposals:
print("test of proposal:", prop, "with region:", region_class, "layer:", layer)
directions = prop(points, region, scale=scale)
assert directions.shape == points.shape, (directions.shape, points.shape)
#assert np.allclose(norms, scale), (norms, scale)
if __name__ == '__main__':
test_stepsampler_cubegausswalk()
test_direction_proposals()
| 3,624 | 41.151163 | 137 | py |
UltraNest | UltraNest-master/tests/test_store.py | from __future__ import print_function, division
import numpy as np
import tempfile
import os
from ultranest.store import TextPointStore, HDF5PointStore, NullPointStore
import pytest
def test_text_store():
PointStore = TextPointStore
try:
fobj, filepath = tempfile.mkstemp()
os.close(fobj)
ptst = PointStore(filepath, 4)
assert ptst.stack_empty
assert ptst.pop(-np.inf)[1] is None, "new store should not return anything"
assert ptst.pop(100)[1] is None, "new store should not return anything"
ptst.close()
ptst = PointStore(filepath, 4)
assert ptst.pop(-np.inf)[1] is None, "empty store should not return anything"
assert ptst.pop(100)[1] is None, "empty store should not return anything"
ptst.close()
ptst = PointStore(filepath, 4)
with pytest.raises(ValueError):
ptst.add([-np.inf, 123, 4], 1)
assert False, "should not allow adding wrong length"
ptst = PointStore(filepath, 4)
assert ptst.stack_empty
ptst.add([-np.inf, 123, 413, 213], 2)
assert ptst.stack_empty
ptst.close()
ptst = PointStore(filepath, 4)
assert not ptst.stack_empty
entry = ptst.pop(-np.inf)[1]
assert entry is not None, ("retrieving entry should succeed", entry)
assert entry[1] == 123, ("retrieving entry should succeed", entry)
assert ptst.pop(100)[1] is None, "other queries should return None"
assert ptst.stack_empty
ptst.add([101, 155, 413, 213], 3)
assert ptst.stack_empty
ptst.close()
ptst = PointStore(filepath, 4)
assert ptst.pop(-np.inf)[1] is not None, "retrieving entry should succeed"
assert ptst.pop(-np.inf)[1] is None, "retrieving unknown entry should fail"
assert ptst.pop(100)[1] is None, "retrieving unknown entry should fail"
ptst.add([99, 156, 413, 213], 4)
ptst.close()
ptst = PointStore(filepath, 4)
assert ptst.pop(-np.inf)[1] is not None, "retrieving entry should succeed"
assert ptst.pop(-np.inf)[1] is None, "retrieving unknown entry should fail"
print(ptst.stack)
entry = ptst.pop(100)[1]
assert entry is not None, ("retrieving entry should succeed", entry)
assert entry[1] == 156, ("retrieving entry should return correct value", entry)
ptst.close()
with pytest.warns(UserWarning):
ptst = PointStore(filepath, 3)
assert ptst.stack_empty
ptst.close()
with pytest.warns(UserWarning):
ptst = PointStore(filepath, 5)
assert ptst.stack_empty
ptst.close()
finally:
os.remove(filepath)
def test_hdf5_store():
PointStore = HDF5PointStore
try:
fobj, filepath = tempfile.mkstemp()
os.close(fobj)
ptst = PointStore(filepath, 4)
assert ptst.stack_empty
assert ptst.pop(-np.inf)[1] is None, "new store should not return anything"
assert ptst.pop(100)[1] is None, "new store should not return anything"
ptst.close()
ptst = PointStore(filepath, 4)
assert ptst.pop(-np.inf)[1] is None, "empty store should not return anything"
assert ptst.pop(100)[1] is None, "empty store should not return anything"
ptst.close()
ptst = PointStore(filepath, 4)
with pytest.raises(ValueError):
ptst.add([-np.inf, 123, 4], 1)
assert False, "should not allow adding wrong length"
ptst = PointStore(filepath, 4)
assert ptst.stack_empty
ptst.add([-np.inf, 123, 413, 213], 2)
assert ptst.stack_empty
ptst.close()
ptst = PointStore(filepath, 4)
assert not ptst.stack_empty
entry = ptst.pop(-np.inf)[1]
assert entry is not None, ("retrieving entry should succeed", entry)
assert entry[1] == 123, ("retrieving entry should succeed", entry)
assert ptst.pop(100)[1] is None, "other queries should return None"
assert ptst.stack_empty
ptst.add([101, 155, 413, 213], 3)
assert ptst.ncalls == 3, (ptst.ncalls)
assert ptst.stack_empty
ptst.close()
ptst = PointStore(filepath, 4)
assert ptst.ncalls == 3, (ptst.ncalls)
assert ptst.pop(-np.inf)[1] is not None, "retrieving entry should succeed"
assert ptst.pop(-np.inf)[1] is None, "retrieving unknown entry should fail"
assert ptst.pop(100)[1] is None, "retrieving unknown entry should fail"
ptst.add([99, 156, 413, 213], 4)
ptst.close()
ptst = PointStore(filepath, 4)
assert ptst.ncalls == 4, (ptst.ncalls)
assert ptst.pop(-np.inf)[1] is not None, "retrieving entry should succeed"
assert ptst.pop(-np.inf)[1] is None, "retrieving unknown entry should fail"
print(ptst.stack)
entry = ptst.pop(100)[1]
assert entry is not None, ("retrieving entry should succeed", entry)
assert entry[1] == 156, ("retrieving entry should return correct value", entry)
ptst.close()
with pytest.raises(IOError):
ptst = PointStore(filepath, 3)
with pytest.raises(IOError):
ptst = PointStore(filepath, 5)
ptst = PointStore(filepath, 4, mode='w')
assert ptst.ncalls == 0, (ptst.ncalls)
assert ptst.pop(-np.inf)[1] is None, "overwritten store should be empty"
assert ptst.pop(100)[1] is None, "overwritten store should not return anything"
ptst.close()
finally:
os.remove(filepath)
def test_nullstore():
ptst = NullPointStore(4)
assert ptst.stack_empty
assert ptst.pop(-np.inf)[1] is None, "new store should not return anything"
assert ptst.pop(100)[1] is None, "new store should not return anything"
ptst.close()
ptst = NullPointStore(4)
assert ptst.pop(-np.inf)[1] is None, "empty store should not return anything"
assert ptst.pop(100)[1] is None, "empty store should not return anything"
ptst.close()
ptst = NullPointStore(4)
assert ptst.stack_empty
# no errors even if we give rubbish input
ptst.add([-np.inf, 123, 413, 213], 1)
ptst.add([10, 123, 413, 213], 2)
ptst.add([10, 123, 413, 213, 123], 3)
ptst.add([99, 123, 413], 4)
assert ptst.stack_empty
ptst.close()
ptst = NullPointStore(4)
assert ptst.stack_empty
entry = ptst.pop(-np.inf)[1]
assert entry is None
ptst.close()
def test_storemany():
for PointStore in TextPointStore, HDF5PointStore:
for N in 1, 2, 10, 100:
print()
print("======== %s N=%d ========" % (PointStore, N))
print()
try:
fobj, filepath = tempfile.mkstemp()
os.close(fobj)
print("writing...")
ptst = PointStore(filepath, 3)
for i in range(N):
ptst.add([-np.inf, i-0.1, i-0.1], i)
for i in range(N):
ptst.add([i, i+1, i+1], i+N)
print(i, i+1, "storing:", [i, i+0.1, i+.1])
for i in range(N):
ptst.add([-np.inf, i-0.1, i-0.1], i+2*N)
for i in range(N-1,-1,-1):
ptst.add([N-i, N-i+.5, N-i+.5], (N-i)+3*N)
print(N-i, N-i+1, "storing:", [N-i, N-i+.5, N-i+.5])
ptst.close()
print("reading...")
ptst = PointStore(filepath, 3)
assert ptst.ncalls == 4*N, (ptst.ncalls, N, 4*N)
print('stack[0]:', ptst.stack)
assert len(ptst.stack) == 4 * N
for i in range(N):
idx, row = ptst.pop(-np.inf)
assert row is not None
assert len(ptst.stack) == 3 * N
print('stack[1]:', ptst.stack)
for i in range(N):
idx, row = ptst.pop(i)
print(i, i+.1, "reading:", row)
assert row is not None
assert row[0] == i
assert row[1] >= i+.1
#assert row == i+1
ptst.reset()
print('stack[2]:', ptst.stack)
assert len(ptst.stack) == 2 * N
for i in range(N):
idx, row = ptst.pop(-np.inf)
assert row is not None
print('stack[3]:', ptst.stack)
assert len(ptst.stack) == N
for i in range(N-1,-1,-1):
ptst.reset()
idx, row = ptst.pop(N-i)
print(N-i, N-i+.1, "reading:", row)
assert row is not None
assert row[0] == N-i
assert row[1] >= N-i+.1
#assert row == i+1
assert len(ptst.stack) == 0
assert ptst.stack_empty
ptst.close()
finally:
os.remove(filepath)
| 7,601 | 30.028571 | 81 | py |
UltraNest | UltraNest-master/tests/benchmark_maxradius.py | from ultranest.mlfriends import MLFriends, ScalingLayer, AffineLayer
import numpy as np
import time
import matplotlib.pyplot as plt
def benchmark_maxradius():
print(" ndim | npts | duration")
for ndim in 2, 4, 8, 16, 32, 64:
plotpoints = []
np.random.seed(ndim)
for npts in 100, 400, 1000, 4000:
points = np.random.uniform(size=(npts,ndim))
transformLayer = ScalingLayer()
region = MLFriends(points, transformLayer)
niter = 0
total_duration = 0
while total_duration < 1:
start = time.time()
maxr = region.compute_maxradiussq(nbootstraps=20)
total_duration += time.time() - start
niter += 1
print('%5d | %5d | %.2fms val=%f' % (ndim, npts, total_duration * 1000 / niter, maxr))
plotpoints.append((npts, total_duration * 1000 / niter / npts**2))
plt.plot(*zip(*plotpoints), label='ndim=%d' % ndim)
plt.xlabel('Number of live points')
plt.ylabel('Duration [ms] / nlive$^2$')
plt.yscale('log')
plt.xscale('log')
plt.legend(loc='best', prop=dict(size=10))
plt.savefig('testmaxradius.pdf', bbox_inches='tight')
plt.close()
def benchmark_transform():
npts = 400
for layer in 'scale', 'affine':
print(" ndim | duration [%s]" % layer)
tplotpoints = []
rplotpoints = []
nplotpoints = []
for ndim in 2, 4, 8, 16, 32, 64, 128, 256,:
np.random.seed(ndim)
points = np.random.uniform(0.4, 0.6, size=(npts,ndim))
transformLayer = ScalingLayer() if layer == 'scale' else AffineLayer()
region = MLFriends(points, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid()
niter = 0
total_duration = 0
while total_duration < .1:
start = time.time()
u = region.transformLayer.untransform(np.random.normal(size=(ndim)))
region.transformLayer.transform(u)
total_duration += time.time() - start
niter += 1
print('%5d | %.2fms ' % (ndim, total_duration * 1000 / niter))
tplotpoints.append((ndim, total_duration * 1000 / niter))
niter = 0
total_duration = 0
while total_duration < .1:
u = np.random.normal(0.5, 0.1, size=(10, ndim))
start = time.time()
region.inside(u)
total_duration += time.time() - start
niter += 1
print('%5d | %.2fms ' % (ndim, total_duration * 1000 / niter))
rplotpoints.append((ndim, total_duration * 1000 / niter))
niter = 0
total_duration = 0
while total_duration < .1:
u = np.random.normal(0.5, 0.1, size=(10, ndim))
start = time.time()
array = np.empty((10), dtype=int)
array[:] = -1
array = np.empty((10), dtype=int)
array[:] = -1
array = np.empty((10), dtype=int)
array[:] = -1
total_duration += time.time() - start
niter += 1
print('%5d | %.2fms ' % (ndim, total_duration * 1000 / niter))
nplotpoints.append((ndim, total_duration * 1000 / niter))
plt.plot(*zip(*tplotpoints), label=layer + ' transform')
plt.plot(*zip(*rplotpoints), label=layer + ' region.inside')
plt.plot(*zip(*nplotpoints), label=layer + ' array')
plt.xlabel('Number of dimensions')
plt.ylabel('Duration [ms]')
plt.yscale('log')
plt.xscale('log')
plt.legend(loc='best', prop=dict(size=10))
plt.savefig('testtransform.pdf', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
#benchmark_maxradius()
benchmark_transform()
| 3,347 | 31.504854 | 90 | py |
UltraNest | UltraNest-master/tests/test_regionsampling.py | import numpy as np
import os
import matplotlib.pyplot as plt
from ultranest.mlfriends import ScalingLayer, AffineLayer, MLFriends
from ultranest.mlfriends import RobustEllipsoidRegion, SimpleRegion, WrappingEllipsoid
from numpy.testing import assert_allclose
here = os.path.dirname(__file__)
def test_region_sampling_scaling(plot=False):
np.random.seed(1)
upoints = np.random.uniform(0.2, 0.5, size=(1000, 2))
upoints[:,1] *= 0.1
transformLayer = ScalingLayer(wrapped_dims=[])
transformLayer.optimize(upoints, upoints)
region = MLFriends(upoints, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
print("enlargement factor:", region.enlarge, 1 / region.enlarge)
region.create_ellipsoid()
nclusters = transformLayer.nclusters
assert nclusters == 1
assert np.allclose(region.unormed, region.transformLayer.transform(upoints)), "transform should be reproducible"
assert region.inside(upoints).all(), "live points should lie near live points"
if plot:
plt.plot(upoints[:,0], upoints[:,1], 'x ')
for method in region.sampling_methods:
points, nc = method(nsamples=400)
plt.plot(points[:,0], points[:,1], 'o ', label=str(method.__name__))
plt.legend(loc='best')
plt.savefig('test_regionsampling_scaling.pdf', bbox_inches='tight')
plt.close()
for method in region.sampling_methods:
print("sampling_method:", method)
newpoints = method(nsamples=4000)
lo1, lo2 = newpoints.min(axis=0)
hi1, hi2 = newpoints.max(axis=0)
assert 0.15 < lo1 < 0.25, (method.__name__, newpoints, lo1, hi1, lo2, hi2)
assert 0.015 < lo2 < 0.025, (method.__name__, newpoints, lo1, hi1, lo2, hi2)
assert 0.45 < hi1 < 0.55, (method.__name__, newpoints, lo1, hi1, lo2, hi2)
assert 0.045 < hi2 < 0.055, (method.__name__, newpoints, lo1, hi1, lo2, hi2)
assert region.inside(newpoints).mean() > 0.99, region.inside(newpoints).mean()
region.maxradiussq = 1e-90
assert np.allclose(region.unormed, region.transformLayer.transform(upoints)), "transform should be reproducible"
assert region.inside(upoints).all(), "live points should lie very near themselves"
def test_region_sampling_affine(plot=False):
np.random.seed(1)
upoints = np.random.uniform(size=(1000, 2))
upoints[:,1] *= 0.5
transformLayer = AffineLayer(wrapped_dims=[])
transformLayer.optimize(upoints, upoints)
region = MLFriends(upoints, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
print("enlargement factor:", region.enlarge, 1 / region.enlarge)
region.create_ellipsoid()
nclusters = transformLayer.nclusters
assert nclusters == 1
assert np.allclose(region.unormed, region.transformLayer.transform(upoints)), "transform should be reproducible"
assert region.inside(upoints).all(), "live points should lie near live points"
if plot:
plt.plot(upoints[:,0], upoints[:,1], 'x ')
for method in region.sampling_methods:
points, nc = method(nsamples=400)
plt.plot(points[:,0], points[:,1], 'o ', label=str(method.__name__))
plt.legend(loc='best')
plt.savefig('test_regionsampling_affine.pdf', bbox_inches='tight')
plt.close()
for method in region.sampling_methods:
print("sampling_method:", method)
newpoints = method(nsamples=4000)
lo1, lo2 = newpoints.min(axis=0)
hi1, hi2 = newpoints.max(axis=0)
assert 0 <= lo1 < 0.1, (method.__name__, newpoints, lo1, hi1, lo2, hi2)
assert 0 <= lo2 < 0.1, (method.__name__, newpoints, lo1, hi1, lo2, hi2)
assert 0.95 < hi1 <= 1, (method.__name__, newpoints, lo1, hi1, lo2, hi2)
assert 0.45 <= hi2 < 0.55, (method.__name__, newpoints, lo1, hi1, lo2, hi2)
assert region.inside(newpoints).all()
region.maxradiussq = 1e-90
assert np.allclose(region.unormed, region.transformLayer.transform(upoints)), "transform should be reproducible"
assert region.inside(upoints).all(), "live points should lie very near themselves"
def test_region_ellipsoid(plot=False):
np.random.seed(1)
points = np.random.uniform(0.4, 0.6, size=(1000, 2))
points[:,1] *= 0.5
transformLayer = AffineLayer(wrapped_dims=[])
transformLayer.optimize(points, points)
region = MLFriends(points, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
print("enlargement factor:", region.enlarge, 1 / region.enlarge)
region.create_ellipsoid()
nclusters = transformLayer.nclusters
assert nclusters == 1
bpts = np.random.uniform(size=(100, 2))
mask = region.inside_ellipsoid(bpts)
d = (bpts - region.ellipsoid_center)
mask2 = np.einsum('ij,jk,ik->i', d, region.ellipsoid_invcov, d) <= region.enlarge
assert_allclose(mask, mask2)
def test_region_mean_distances():
np.random.seed(1)
points = np.random.uniform(0.4, 0.6, size=(10000, 2))
#points[:,1] *= 0.5
mask = np.abs((points[:,0]-0.5)**2 + (points[:,1]-0.5)**2 - 0.08**2) < 0.02**2
print('circle:', mask.sum())
points = points[mask]
mask = points[:,0] < 0.5
print('half-circle:', mask.sum())
points = points[mask]
transformLayer = AffineLayer(wrapped_dims=[])
transformLayer.optimize(points, points)
region = MLFriends(points, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
print("enlargement factor:", region.enlarge, 1 / region.enlarge)
region.create_ellipsoid()
meandist = region.compute_mean_pair_distance()
t = transformLayer.transform(region.u)
d = 0
N = 0
for i in range(len(t)):
for j in range(i):
d += ((t[i,:] - t[j,:])**2).sum()**0.5
#print(i, j, t[i,:], t[j,:], ((t[i,:] - t[j,:])**2).sum())
N += 1
print((meandist, d, N, t))
assert np.isclose(meandist, d / N), (meandist, d, N)
def test_ellipsoids():
tpoints = np.random.uniform(0.4, 0.6, size=(1000, 1))
tregion = WrappingEllipsoid(tpoints)
print(tregion.variable_dims)
tregion.enlarge = tregion.compute_enlargement(nbootstraps=30)
tregion.create_ellipsoid()
for umax in 0.6, 0.5:
print()
print(umax)
points = np.random.uniform(0.4, 0.6, size=(1000, 3))
points = points[points[:,0] < umax]
tpoints = points * 10
tpoints[:,0] = np.floor(tpoints[:,0])
print(points, tpoints)
transformLayer = AffineLayer(wrapped_dims=[])
transformLayer.optimize(points, points)
region = MLFriends(points, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid()
inside = region.inside(points)
assert inside.shape == (len(points),), (inside.shape, points.shape)
assert inside.all()
region = RobustEllipsoidRegion(points, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid()
inside = region.inside(points)
assert inside.shape == (len(points),), (inside.shape, points.shape)
assert inside.all()
region = SimpleRegion(points, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid()
inside = region.inside(points)
assert inside.shape == (len(points),), (inside.shape, points.shape)
assert inside.all()
tregion = WrappingEllipsoid(tpoints)
print(tregion.variable_dims)
tregion.enlarge = tregion.compute_enlargement(nbootstraps=30)
tregion.create_ellipsoid()
inside = tregion.inside(tpoints)
assert inside.shape == (len(tpoints),), (inside.shape, tpoints.shape)
assert inside.all()
if __name__ == '__main__':
#test_region_sampling_scaling(plot=True)
#test_region_sampling_affine(plot=True)
test_ellipsoids()
| 8,215 | 40.494949 | 116 | py |
UltraNest | UltraNest-master/tests/test_ordertest.py | from __future__ import print_function, division
import numpy as np
from ultranest.ordertest import UniformOrderAccumulator, infinite_U_zscore
def test_invalid_order():
sample_acc = UniformOrderAccumulator()
sample_acc.add(2, 3)
try:
sample_acc.add(4, 3)
assert False
except ValueError:
pass
def test_diff_expand():
sample_acc = UniformOrderAccumulator()
sample_acc.add(1, 3)
sample_acc.add(4, 5)
sample_acc.add(5, 6)
def test_order_correctness():
np.random.seed(1)
Nlive = 400
N = 1000
nruns = []
for frac in 1, 0.9:
print("frac:", frac)
sample_acc = UniformOrderAccumulator()
runlength = []
samples = []
for i in range(N):
order = np.random.randint(0, Nlive * frac)
sample_acc.add(order, Nlive)
samples.append(order)
zscore = sample_acc.zscore
assert np.isclose(zscore, infinite_U_zscore(np.asarray(samples), Nlive)), (zscore, infinite_U_zscore(np.asarray(samples), Nlive), samples)
if abs(zscore) > 3:
runlength.append(len(sample_acc))
print("split after %d" % (runlength[-1]))
sample_acc.reset()
samples = []
print('runlength:', runlength)
nruns.append(len(runlength))
nruns1, nruns2 = nruns
print("number of runs:", nruns1, nruns2)
assert nruns1 == 0, (nruns1)
assert nruns2 > 0, (nruns1, nruns2)
if __name__ == '__main__':
test_order_correctness()
| 1,331 | 25.117647 | 141 | py |
UltraNest | UltraNest-master/tests/test_hotstart.py | from __future__ import print_function, division
import numpy as np
import scipy.stats
from numpy import log10
from ultranest import ReactiveNestedSampler
from ultranest.utils import vectorize
from ultranest.integrator import warmstart_from_similar_file
from ultranest.hotstart import reuse_samples, get_extended_auxiliary_problem
from ultranest.hotstart import compute_quantile_intervals, get_auxiliary_contbox_parameterization, compute_quantile_intervals_refined
import os
import tempfile
rng_data = np.random.RandomState(42)
Ndata = 100
mean_true = 42.0
sigma_true = 0.1
y = rng_data.normal(mean_true, sigma_true, size=Ndata)
parameters = ['mean', 'scatter']
def prior_transform(x):
z = np.empty_like(x)
z[0] = x[0] * 2000 - 1000
z[1] = 10**(x[1] * 4 - 2)
return z
def log_likelihood(params):
mean, sigma = params
return scipy.stats.norm.logpdf(y, mean, sigma).sum()
def extended_prior_transform(x):
z = np.empty(3)
z[0] = x[0] * 2000 - 1000
z[1] = 10**(x[1] * 4 - 2)
z[2] = 2 * np.sqrt(2 * np.log(2)) * z[1]
return z
def extended_log_likelihood(params):
mean, sigma, fwhm = params
return scipy.stats.norm.logpdf(y, mean, sigma).sum()
def test_contbox_hotstart():
rng_samples = np.random.RandomState(43)
N = 100000
samples = rng_samples.normal(0.1, 1e-6, size=(N,2))
samples[:,1] = rng_samples.uniform(size=N)
weights = (np.ones(N) / N).reshape((-1,1))
logl = weights * 0
steps = [0.1, 0.01]
ulos, uhis = compute_quantile_intervals(steps, samples, weights)
print("quantiles:", ulos)
print("quantiles:", uhis)
ulos2, uhis2, uinterpspace = compute_quantile_intervals_refined(steps, samples, weights)
print("refined quantiles:", ulos2)
print("refined quantiles:", uhis2)
print("interpolation steps:", uinterpspace)
assert np.diff(ulos, axis=0).shape == (2,2), ulos
assert (np.diff(uinterpspace) > 0).all(), uinterpspace
assert (np.diff(ulos, axis=0) < 0).all(), (ulos, uhis)
assert (np.diff(uhis, axis=0) > 0).all(), (ulos, uhis)
assert (np.diff(ulos2, axis=0) < 0).all(), (ulos2, uhis2)
assert (np.diff(uhis2, axis=0) > 0).all(), (ulos2, uhis2)
assert ulos.shape == (2+1, len(steps)), (uhis.shape, ulos.shape)
assert uhis.shape == ulos.shape, (uhis.shape, ulos.shape)
assert len(uinterpspace) == len(uhis2)
assert len(uinterpspace) == len(uhis2)
tol = dict(atol=1e-3, rtol=0.01)
for i in 1, 0:
for j, q in enumerate(steps):
expectation = np.quantile(samples[:,i], q)
actual = ulos[j,i]
print(i, j, q, expectation, actual)
assert np.isclose(expectation, actual, **tol), (i, j, q, expectation, actual)
expectation = np.quantile(samples[:,i], 1-q)
actual = uhis[j,i]
print(i, j, 1-q, expectation, actual)
assert np.isclose(expectation, actual, **tol), (i, j, 1-q, expectation, actual)
aux_param_names, aux_loglike, aux_transform, vectorized = get_auxiliary_contbox_parameterization(
parameters, loglike=log_likelihood, transform=prior_transform,
vectorized=False, upoints=samples, uweights=weights,
)
assert aux_param_names == parameters + ['aux_logweight'], (aux_param_names, parameters)
p = aux_transform(np.random.uniform(size=3))
assert p.shape == (len(aux_param_names),)
L = float(aux_loglike(p))
print(L)
del aux_param_names, aux_loglike, aux_transform
aux_param_names, aux_vloglike, aux_vtransform, vectorized = get_auxiliary_contbox_parameterization(
parameters, loglike=vectorize(log_likelihood), transform=vectorize(prior_transform),
vectorized=True, upoints=samples, uweights=weights,
)
print(aux_param_names, parameters)
assert aux_param_names == parameters + ['aux_logweight'], (aux_param_names, parameters)
p = aux_vtransform(np.random.uniform(size=(11, 3)))
assert p.shape == (11, len(aux_param_names)), p.shape
L = aux_vloglike(p)
assert L.shape == (11,), L.shape
print(L)
del aux_param_names, aux_vloglike, aux_vtransform
with tempfile.TemporaryDirectory() as tmpdirname:
tmpfilename = os.path.join(tmpdirname, 'weighted_posterior_samples.txt')
print(tmpfilename)
np.savetxt(
tmpfilename,
np.hstack((weights, logl, samples)),
header='weight logl mean scatter',
fmt='%f'
)
aux_param_names, aux_loglike, aux_transform, vectorized = warmstart_from_similar_file(
tmpfilename,
parameters,
extended_log_likelihood,
extended_prior_transform,
vectorized=False,
)
assert aux_param_names == parameters + ['aux_logweight'], (aux_param_names, parameters)
p = aux_transform(np.random.uniform(size=3))
assert p.shape == (len(aux_param_names)+1,)
L = float(aux_loglike(p))
print(L)
aux_param_names, aux_vloglike, aux_vtransform, vectorized = warmstart_from_similar_file(
tmpfilename,
parameters,
vectorize(extended_log_likelihood),
vectorize(extended_prior_transform),
vectorized=True,
)
assert aux_param_names == parameters + ['aux_logweight'], (aux_param_names, parameters)
p = aux_vtransform(np.random.uniform(size=(11, 3)))
assert p.shape == (11, len(aux_param_names)+1)
L = aux_vloglike(p)
assert L.shape == (11,)
print(L)
def test_hotstart_SLOW():
np.random.seed(2)
ctr = np.array([(42.0 + 1000) / 2000, (log10(0.1) + 2) / 4])
cov = np.diag([0.01 / 2000, (log10(0.1) + 2) / 4 - (log10(0.09) + 2) / 4])**2
invcov = np.linalg.inv(cov)
Lguess = log_likelihood(prior_transform(np.random.uniform(size=len(parameters))))
Lctr = log_likelihood(prior_transform(ctr))
print(Lguess, Lctr)
assert Lguess < Lctr - 100, (Lguess, Lctr)
aux_log_likelihood, aux_transform = get_extended_auxiliary_problem(
log_likelihood, prior_transform, ctr, invcov,
enlargement_factor=len(parameters)**0.5, df=20)
proposals = np.array([aux_transform(np.random.uniform(size=len(parameters))) for i in range(40)])
valid = proposals[:,2] > -1e100
assert valid.sum() > 0.9, valid.sum()
proposals = proposals[valid,:]
print("proposals:", proposals, valid.sum())
assert (np.abs(proposals[:,0] - 42) < 2).mean() > 0.9, proposals
assert (np.abs(log10(proposals[:,1] / 0.1)) < 0.5).mean() > 0.9, proposals
Lproposed = np.array([log_likelihood(p[:-1]) for p in proposals])
assert np.mean(Lproposed > Lctr - 10) > 0.5, (Lproposed, Lctr)
aux_sampler = ReactiveNestedSampler(
parameters, aux_log_likelihood, transform=aux_transform,
derived_param_names=['aux_logweight'],
)
aux_results = aux_sampler.run(frac_remain=0.5, viz_callback=None)
aux_sampler.print_results()
ref_sampler = ReactiveNestedSampler(
parameters, log_likelihood, transform=prior_transform,
)
ref_results = ref_sampler.run(frac_remain=0.5, viz_callback=None)
ref_sampler.print_results()
assert aux_results['ncall'] < ref_results['ncall'] / 4, (ref_results['ncall'], aux_results['ncall'])
assert np.abs(ref_results['posterior']['mean'][0] - aux_results['posterior']['mean'][0]) < 0.5, (ref_results['posterior'], aux_results['posterior'])
assert np.abs(ref_results['posterior']['mean'][1] - aux_results['posterior']['mean'][1]) < 0.05, (ref_results['posterior'], aux_results['posterior'])
assert 0.8 < (ref_results['posterior']['stdev'][0] / aux_results['posterior']['stdev'][0]) < 1.2, (ref_results['posterior'], aux_results['posterior'])
assert 0.8 < (ref_results['posterior']['stdev'][1] / aux_results['posterior']['stdev'][1]) < 1.2, (ref_results['posterior'], aux_results['posterior'])
assert np.abs(ref_results['logzerr'] - aux_results['logzerr']) < 0.5, (ref_results['logzerr'], aux_results['logzerr'])
print("RECYCLING:")
print("ref:", ref_results)
rec_results = reuse_samples(parameters, log_likelihood, **ref_results['weighted_samples'], **ref_results)
#assert rec_results['ncall'] < ref_results['ncall'] / 4, (ref_results['ncall'], rec_results['ncall'])
assert np.abs(ref_results['posterior']['mean'][0] - rec_results['posterior']['mean'][0]) < 0.5, (ref_results['posterior'], rec_results['posterior'])
assert np.abs(ref_results['posterior']['mean'][1] - rec_results['posterior']['mean'][1]) < 0.05, (ref_results['posterior'], rec_results['posterior'])
assert 0.8 < (ref_results['posterior']['stdev'][0] / rec_results['posterior']['stdev'][0]) < 1.2, (ref_results['posterior'], rec_results['posterior'])
assert 0.8 < (ref_results['posterior']['stdev'][1] / rec_results['posterior']['stdev'][1]) < 1.2, (ref_results['posterior'], rec_results['posterior'])
assert np.abs(ref_results['logzerr'] - rec_results['logzerr']) < 0.5, (ref_results['logzerr'], rec_results['logzerr'])
print("rec:", rec_results)
del rec_results
logls = np.array([log_likelihood(s) for s in ref_results['samples']])
rec_results2 = reuse_samples(parameters, log_likelihood, points=ref_results['samples'], logl=logls)
print("rec2:", rec_results2)
assert rec_results2['ncall'] == len(logls), (ref_results['ncall'], rec_results2['ncall'])
assert np.abs(ref_results['posterior']['mean'][0] - rec_results2['posterior']['mean'][0]) < 0.5, (ref_results['posterior'], rec_results2['posterior'])
assert np.abs(ref_results['posterior']['mean'][1] - rec_results2['posterior']['mean'][1]) < 0.05, (ref_results['posterior'], rec_results2['posterior'])
assert 0.5 < (ref_results['posterior']['stdev'][0] / rec_results2['posterior']['stdev'][0]) < 1.5, (ref_results['posterior'], rec_results2['posterior'])
assert 0.5 < (ref_results['posterior']['stdev'][1] / rec_results2['posterior']['stdev'][1]) < 1.5, (ref_results['posterior'], rec_results2['posterior'])
if __name__ == '__main__':
test_hotstart_SLOW()
test_contbox_hotstart()
| 10,117 | 47.879227 | 156 | py |
UltraNest | UltraNest-master/tests/test_stepsampling.py | import numpy as np
from ultranest.mlfriends import ScalingLayer, AffineLayer, MLFriends
from ultranest import ReactiveNestedSampler
from ultranest.stepsampler import RegionMHSampler, CubeMHSampler, CubeSliceSampler, RegionSliceSampler, SpeedVariableRegionSliceSampler, RegionBallSliceSampler
from ultranest.stepsampler import generate_region_random_direction, ellipsoid_bracket, crop_bracket_at_unit_cube
from ultranest.pathsampler import SamplingPathStepSampler
from numpy.testing import assert_allclose
#here = os.path.dirname(__file__)
def loglike_vectorized(z):
a = np.array([-0.5 * sum([((xi - 0.7 + i*0.001)/0.1)**2 for i, xi in enumerate(x)]) for x in z])
b = np.array([-0.5 * sum([((xi - 0.3 - i*0.001)/0.1)**2 for i, xi in enumerate(x)]) for x in z])
return np.logaddexp(a, b)
def loglike(x):
a = -0.5 * sum([((xi - 0.7 + i*0.001)/0.1)**2 for i, xi in enumerate(x)])
b = -0.5 * sum([((xi - 0.3 - i*0.001)/0.1)**2 for i, xi in enumerate(x)])
return np.logaddexp(a, b)
def transform(x):
return x # * 10. - 5.
def transform1(x):
return x**2
paramnames = ['param%d' % i for i in range(3)]
#paramnames = ['param%d' % i for i in range(40)]
def test_stepsampler_cubemh(plot=False):
np.random.seed(1)
sampler = ReactiveNestedSampler(paramnames, loglike_vectorized, transform=transform1, vectorized=True)
sampler.stepsampler = CubeMHSampler(nsteps=4 * len(paramnames))
r = sampler.run(log_interval=50, min_num_live_points=400)
sampler.print_results()
a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
assert a.sum() > 1, a.sum()
assert b.sum() > 1, b.sum()
def test_stepsampler_regionmh(plot=False):
np.random.seed(2)
sampler = ReactiveNestedSampler(paramnames, loglike_vectorized, transform=transform, vectorized=True)
sampler.stepsampler = RegionMHSampler(nsteps=4 * len(paramnames))
r = sampler.run(log_interval=50, min_num_live_points=400)
sampler.print_results()
a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
assert a.sum() > 1, a
assert b.sum() > 1, b
def test_stepsampler_cubeslice(plot=False):
np.random.seed(3)
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform1)
sampler.stepsampler = CubeSliceSampler(nsteps=len(paramnames))
r = sampler.run(log_interval=50, min_num_live_points=400)
sampler.print_results()
a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
assert a.sum() > 1
assert b.sum() > 1
def test_stepsampler_regionslice(plot=False):
np.random.seed(4)
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform)
sampler.stepsampler = RegionSliceSampler(nsteps=len(paramnames))
r = sampler.run(log_interval=50, min_num_live_points=400)
sampler.print_results()
a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
assert a.sum() > 1
assert b.sum() > 1
def test_stepsampler_variable_speed_SLOW(plot=False):
matrices = [
np.array([[True, True, True], [False, True, True], [False, False, True]]),
[Ellipsis, slice(1,None), slice(2,4)]
]
for i, matrix in enumerate(matrices):
np.random.seed(42 + i)
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform)
sampler.stepsampler = SpeedVariableRegionSliceSampler(matrix)
r = sampler.run(log_interval=50, min_num_live_points=400)
sampler.print_results()
a = (np.abs(r['samples'] - 0.7) < 0.1).all(axis=1)
b = (np.abs(r['samples'] - 0.3) < 0.1).all(axis=1)
assert a.sum() > 1
assert b.sum() > 1
def make_region(ndim, us=None):
if us is None:
us = np.random.uniform(size=(1000, ndim))
if ndim > 1:
transformLayer = AffineLayer()
else:
transformLayer = ScalingLayer()
transformLayer.optimize(us, us)
region = MLFriends(us, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid(minvol=1.0)
return region
def test_stepsampler(plot=False):
np.random.seed(6)
region = make_region(len(paramnames))
Ls = loglike_vectorized(region.u)
stepsampler = CubeMHSampler(nsteps=len(paramnames))
while True:
u1, p1, L1, nc = stepsampler.__next__(region, -1e100, region.u, Ls, transform, loglike)
if u1 is not None:
break
assert L1 > -1e100
print(u1, L1)
while True:
u2, p2, L2, nc = stepsampler.__next__(region, -1e100, region.u, Ls, transform, loglike)
if u2 is not None:
break
assert L2 > -1e100
print(u2, L2)
assert np.all(u1 != u2)
assert np.all(L1 != L2)
def test_stepsampler_adapt_when_stuck(plot=False):
# check that a stuck sampler can free itself
np.random.seed(7)
us = np.random.normal(0.7, 0.0001, size=(1000, len(paramnames)))
region = make_region(len(paramnames), us=us)
Ls = loglike_vectorized(us)
Lmin = Ls.min()
print('CubeMHSampler')
stepsampler = CubeMHSampler(nsteps=1, region_filter=True)
np.random.seed(23)
old_scale = stepsampler.scale
for i in range(1000):
if i > 100:
assert False, i
unew, pnew, Lnew, nc = stepsampler.__next__(region, Lmin, us, Ls, transform, loglike, ndraw=10)
if unew is not None:
break
new_scale = stepsampler.scale
assert new_scale != old_scale
assert new_scale < 0.01, (new_scale, unew)
print('CubeSliceSampler')
stepsampler = CubeSliceSampler(nsteps=1, region_filter=True)
np.random.seed(23)
old_scale = stepsampler.scale
for j in range(100):
for i in range(1000):
if i > 100:
assert False, i
unew, pnew, Lnew, nc = stepsampler.__next__(region, Lmin, us, Ls, transform, loglike, ndraw=10)
if unew is not None:
break
new_scale = stepsampler.scale
assert new_scale != old_scale
assert new_scale < 0.01, (new_scale, unew)
def test_stepsampler_regionmh_adapt(plot=False):
np.random.seed(8)
region = make_region(len(paramnames))
Ls = loglike_vectorized(region.u)
try:
RegionMHSampler(nsteps=len(paramnames), adaptive_nsteps='Hello')
assert False, 'expected error'
except ValueError:
pass
for sampler_class in RegionMHSampler, CubeMHSampler, CubeSliceSampler, RegionSliceSampler:
for adaptation in False, 'move-distance', 'proposal-total-distances', 'proposal-summed-distances':
print()
stepsampler = sampler_class(nsteps=len(paramnames), adaptive_nsteps=adaptation)
print(stepsampler)
stepsampler.region_changed(Ls, region)
np.random.seed(23)
old_scale = stepsampler.scale
for i in range(5):
while True:
unew, pnew, Lnew, nc = stepsampler.__next__(region, -1e100, region.u, Ls, transform, loglike)
if unew is not None:
break
new_scale = stepsampler.scale
assert new_scale != old_scale
if adaptation:
assert stepsampler.nsteps != len(paramnames)
else:
assert stepsampler.nsteps == len(paramnames)
def assert_point_touches_ellipsoid(ucurrent, v, t, ellipsoid_center, ellipsoid_invcov, enlarge):
unext = ucurrent + v * t
d = unext - ellipsoid_center
r = np.einsum('j,jk,k->', d, ellipsoid_invcov, d)
assert np.isclose(r, enlarge), (ucurrent, t, r, enlarge)
def test_ellipsoid_bracket(plot=False):
for seed in range(20):
print("seed:", seed)
np.random.seed(seed)
if seed % 2 == 0:
us = np.random.normal(size=(2**np.random.randint(3, 10), 2))
us /= ((us**2).sum(axis=1)**0.5).reshape((-1, 1))
us = us * 0.1 + 0.5
else:
us = np.random.uniform(size=(2**np.random.randint(3, 10), 2))
if plot:
import matplotlib.pyplot as plt
plt.plot(us[:,0], us[:,1], 'o ', ms=2)
transformLayer = ScalingLayer()
region = MLFriends(us, transformLayer)
try:
region.maxradiussq, region.enlarge = region.compute_enlargement()
region.create_ellipsoid()
except ValueError:
continue
print(region.ellipsoid_center)
print(region.enlarge)
print(region.ellipsoid_cov)
print(region.ellipsoid_invcov)
print(region.ellipsoid_axes)
print(region.ellipsoid_inv_axes)
ucurrent = np.array([2**0.5*0.1/2+0.5, 2**0.5*0.1/2+0.5])
ucurrent = np.array([0.4, 0.525])
v = np.array([1., 0])
if plot: plt.plot(ucurrent[0], ucurrent[1], 'o')
print("from", ucurrent, "in direction", v)
left, right = ellipsoid_bracket(ucurrent, v, region.ellipsoid_center, region.ellipsoid_inv_axes, region.enlarge)
uleft = ucurrent + v * left
uright = ucurrent + v * right
if plot:
plt.plot([uleft[0], uright[0]], [uleft[1], uright[1]], 'x-')
plt.savefig('test_ellipsoid_bracket.pdf', bbox_inches='tight')
plt.close()
print("ellipsoid bracket:", left, right)
assert left <= 0, left
assert right >= 0, right
assert_point_touches_ellipsoid(ucurrent, v, left, region.ellipsoid_center, region.ellipsoid_invcov, region.enlarge)
assert_point_touches_ellipsoid(ucurrent, v, right, region.ellipsoid_center, region.ellipsoid_invcov, region.enlarge)
def test_crop_bracket(plot=False):
ucurrent = np.array([0.39676747, 0.53881673])
v = np.array([-0.79619985, -0.60503372])
ellipsoid_center = np.array([0.23556461, 0.49899689])
ellipsoid_inv_axes = np.array([[-3.28755896, 0.70136518], [ 1.33333377, 1.72933397]])
enlarge = 26.66439694551674
ellipsoid_invcov = np.array([[11.29995701, -3.17051875], [-3.17051875, 4.76837493]])
#enlarge = 1.0
#ellipsoid_inv_axes = np.array([[1.0, 0.], [0., 1]])
eleft, eright = ellipsoid_bracket(ucurrent, v, ellipsoid_center, ellipsoid_inv_axes, enlarge)
if plot:
us = np.random.uniform(-2, +2, size=(10000, 2))
d = us - ellipsoid_center
r = np.einsum('ij,jk,ik->i', d, ellipsoid_invcov, d)
mask_inside = r <= enlarge
import matplotlib.pyplot as plt
plt.plot(us[mask_inside,0], us[mask_inside,1], '+', ms=2)
plt.plot(ucurrent[0], ucurrent[1], 'o ', ms=2)
plt.plot([ucurrent[0] + eleft * v[0], ucurrent[0] + eright * v[0]],
[ucurrent[1] + eleft * v[1], ucurrent[1] + eright * v[1]],
'-s', ms=8)
plt.savefig('test_crop_bracket.pdf', bbox_inches='tight')
print("left:", eleft, ucurrent + v * eleft)
assert eleft <= 0, eleft
assert_point_touches_ellipsoid(ucurrent, v, eleft, ellipsoid_center, ellipsoid_invcov, enlarge)
print("right:", eright, ucurrent + v * eright)
assert eright >= 0, eright
assert_point_touches_ellipsoid(ucurrent, v, eright, ellipsoid_center, ellipsoid_invcov, enlarge)
left, right, cropleft, cropright = crop_bracket_at_unit_cube(ucurrent, v, eleft, eright)
if plot:
plt.plot([ucurrent[0] + left * v[0], ucurrent[0] + right * v[0]],
[ucurrent[1] + left * v[1], ucurrent[1] + right * v[1]],
's--', ms=8)
plt.savefig('test_crop_bracket.pdf', bbox_inches='tight')
plt.close()
assert cropleft
assert cropright
assert (ucurrent + v * left <= 1).all(), (ucurrent, v, ellipsoid_center, ellipsoid_inv_axes, enlarge)
assert (ucurrent + v * right <= 1).all(), (ucurrent, v, ellipsoid_center, ellipsoid_inv_axes, enlarge)
assert (ucurrent + v * left >= 0).all(), (ucurrent, v, ellipsoid_center, ellipsoid_inv_axes, enlarge)
assert (ucurrent + v * right >= 0).all(), (ucurrent, v, ellipsoid_center, ellipsoid_inv_axes, enlarge)
if __name__ == '__main__':
#test_stepsampler_cubemh(plot=True)
#test_stepsampler_regionmh(plot=False)
#test_stepsampler_de(plot=False)
#test_stepsampler_cubeslice(plot=True)
#test_stepsampler_regionslice(plot=True)
test_ellipsoid_bracket()
#test_crop_bracket(plot=True)
| 12,508 | 39.482201 | 159 | py |
UltraNest | UltraNest-master/tests/test_flatnuts.py | import numpy as np
from ultranest.mlfriends import AffineLayer, ScalingLayer, MLFriends
from ultranest.flatnuts import ClockedStepSampler, ClockedBisectSampler, ClockedNUTSSampler
from ultranest.flatnuts import SingleJumper, DirectJumper
from ultranest.samplingpath import SamplingPath, ContourSamplingPath
from numpy.testing import assert_allclose
def gap_free_path(sampler, ilo, ihi, transform, loglike, Lmin):
"""
Check if sampling path at all intermediate points between ilo and ihi
are above Lmin.
"""
for i in range(ilo, ihi):
xi, vi, Li, onpath = sampler.contourpath.samplingpath.interpolate(i)
assert onpath
if Li is None:
pi = transform(xi)
Li = loglike(pi)
if not Li > Lmin:
return False
return True
def check_starting_point(sampler, startx, startL, transform, loglike, Lmin):
""" Verify that if going 0 steps, should return start point. """
assert sampler.goals == [], sampler.goals
sampler.set_nsteps(0)
Llast = None
sample, is_independent = sampler.next(Llast)
assert is_independent, (sample, is_independent)
unew, Lnew = sample
assert_allclose(startx, unew)
assert_allclose(startL, Lnew)
assert sampler.goals == [], sampler.goals
sample, is_independent = sampler.next(Llast)
assert sample is None and not is_independent, (sample, is_independent)
def test_detailed_balance():
def loglike(x):
x, y = x.transpose()
return -0.5 * (x**2 + ((y - 0.5)/0.2)**2)
def transform(u):
return u
Lmin = -0.5
for i in range(1, 100):
print()
print("---- seed=%d ----" % i)
print()
np.random.seed(i)
points = np.random.uniform(size=(10000, 2))
L = loglike(points)
mask = L > Lmin
points = points[mask,:][:400,:]
active_u = points
active_values = L[mask][:400]
transformLayer = AffineLayer(wrapped_dims=[])
transformLayer.optimize(points, points)
region = MLFriends(points, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid()
nclusters = transformLayer.nclusters
assert nclusters == 1
assert np.allclose(region.unormed, region.transformLayer.transform(points)), "transform should be reproducible"
assert region.inside(points).all(), "live points should lie near live points"
v = np.random.normal(size=2)
v /= (v**2).sum()**0.5
v *= 0.04
print("StepSampler ----")
print("FORWARD SAMPLING FROM", 0, active_u[0], v, active_values[0])
samplingpath = SamplingPath(active_u[0], v, active_values[0])
problem = dict(loglike=loglike, transform=transform, Lmin=Lmin)
sampler = ClockedStepSampler(ContourSamplingPath(samplingpath, region))
check_starting_point(sampler, active_u[0], active_values[0], **problem)
sampler.expand_onestep(fwd=True, **problem)
sampler.expand_onestep(fwd=True, **problem)
sampler.expand_onestep(fwd=True, **problem)
sampler.expand_onestep(fwd=True, **problem)
sampler.expand_onestep(fwd=False, **problem)
sampler.expand_to_step(4, **problem)
sampler.expand_to_step(-4, **problem)
check_starting_point(sampler, active_u[0], active_values[0], **problem)
starti, startx, startv, startL = max(sampler.points)
print()
print("BACKWARD SAMPLING FROM", starti, startx, startv, startL)
samplingpath2 = SamplingPath(startx, -startv, startL)
sampler2 = ClockedStepSampler(ContourSamplingPath(samplingpath2, region))
check_starting_point(sampler2, startx, startL, **problem)
sampler2.expand_to_step(starti, **problem)
check_starting_point(sampler2, startx, startL, **problem)
starti2, startx2, startv2, startL2 = max(sampler2.points)
assert_allclose(active_u[0], startx2)
assert_allclose(v, -startv2)
starti, startx, startv, startL = min(sampler.points)
print()
print("BACKWARD SAMPLING FROM", starti, startx, startv, startL)
samplingpath3 = SamplingPath(startx, startv, startL)
sampler3 = ClockedStepSampler(ContourSamplingPath(samplingpath3, region))
check_starting_point(sampler3, startx, startL, **problem)
sampler3.expand_to_step(-starti, **problem)
check_starting_point(sampler3, startx, startL, **problem)
starti3, startx3, startv3, startL3 = max(sampler3.points)
assert_allclose(active_u[0], startx3)
assert_allclose(v, startv3)
print()
print("BisectSampler ----")
log = dict(log=True)
print("FORWARD SAMPLING FROM", 0, active_u[0], v, active_values[0])
samplingpath = SamplingPath(active_u[0], v, active_values[0])
sampler = ClockedBisectSampler(ContourSamplingPath(samplingpath, region), **log)
check_starting_point(sampler, active_u[0], active_values[0], **problem)
sampler.expand_to_step(10, **problem)
check_starting_point(sampler, active_u[0], active_values[0], **problem)
starti, startx, startv, startL = max(sampler.points)
print()
print("BACKWARD SAMPLING FROM", starti, startx, startv, startL)
samplingpath2 = SamplingPath(startx, -startv, startL)
sampler2 = ClockedBisectSampler(ContourSamplingPath(samplingpath2, region), **log)
check_starting_point(sampler2, startx, startL, **problem)
sampler2.expand_to_step(starti, **problem)
check_starting_point(sampler2, startx, startL, **problem)
starti2, startx2, startv2, startL2 = max(sampler2.points)
if gap_free_path(sampler, 0, starti, **problem) and gap_free_path(sampler2, 0, starti2, **problem):
assert_allclose(active_u[0], startx2)
assert_allclose(v, -startv2)
starti, startx, startv, startL = min(sampler.points)
print()
print("BACKWARD SAMPLING FROM", starti, startx, startv, startL)
samplingpath3 = SamplingPath(startx, -startv, startL)
sampler3 = ClockedBisectSampler(ContourSamplingPath(samplingpath3, region), **log)
check_starting_point(sampler3, startx, startL, **problem)
sampler3.expand_to_step(starti, **problem)
check_starting_point(sampler3, startx, startL, **problem)
starti3, startx3, startv3, startL3 = min(sampler3.points)
if gap_free_path(sampler, 0, starti, **problem) and gap_free_path(sampler3, 0, starti3, **problem):
assert_allclose(active_u[0], startx3)
assert_allclose(v, -startv3)
print()
print("NUTSSampler ----")
print("FORWARD SAMPLING FROM", 0, active_u[0], v, active_values[0])
samplingpath = SamplingPath(active_u[0], v, active_values[0])
np.random.seed(i)
sampler = ClockedNUTSSampler(ContourSamplingPath(samplingpath, region))
sampler.get_independent_sample(**problem)
def makejump(stepper, sampler, transform, loglike, Lmin):
stepper.prepare_jump()
Llast = None
while not sampler.is_done():
u, is_independent = sampler.next(Llast=Llast)
if not is_independent:
# should evaluate
p = transform(u)
L = loglike(p)
if L > Lmin:
Llast = L
else:
Llast = None
return stepper.make_jump()
def make_region(ndim):
us = np.random.uniform(size=(1000, ndim))
if ndim > 1:
transformLayer = AffineLayer()
else:
transformLayer = ScalingLayer()
transformLayer.optimize(us, us)
region = MLFriends(us, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid(minvol=1.0)
return region
def test_singlejumper():
Lmin = -1.0
us = 0.5 + np.zeros((100, 2))
#Ls = np.zeros(100)
region = make_region(2)
def transform(x): return x
def loglike(x): return 0.0
def gradient(x, plot=False):
j = np.argmax(np.abs(x - 0.5))
v = np.zeros(len(x))
v[j] = -1 if x[j] > 0.5 else 1
return v
def nocall(x):
assert False
ui = us[np.random.randint(len(us)),:]
v = np.array([0.01, 0.01])
path = ContourSamplingPath(SamplingPath(ui, v, 0.0), region)
path.gradient = nocall
sampler = ClockedStepSampler(path)
stepper = SingleJumper(sampler, 4)
assert (stepper.naccepts, stepper.nrejects) == (0, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 0, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.51, 0.51])
assert (stepper.naccepts, stepper.nrejects) == (1, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 1, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.52, 0.52])
assert (stepper.naccepts, stepper.nrejects) == (2, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 2, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.53, 0.53])
assert (stepper.naccepts, stepper.nrejects) == (3, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 3, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.54, 0.54])
assert (stepper.naccepts, stepper.nrejects) == (4, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 4, stepper.isteps
print()
print("make reflect")
print()
def loglike(x): return 0.0 if x[0] < 0.505 else -100
path = ContourSamplingPath(SamplingPath(ui, v, 0.0), region)
path.gradient = gradient
sampler = ClockedStepSampler(path)
stepper = SingleJumper(sampler, 4)
assert (stepper.naccepts, stepper.nrejects) == (0, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 0, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.50, 0.52])
assert (stepper.naccepts, stepper.nrejects) == (1, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 1, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.49, 0.53])
assert (stepper.naccepts, stepper.nrejects) == (2, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 2, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.48, 0.54])
assert (stepper.naccepts, stepper.nrejects) == (3, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 3, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.47, 0.55])
assert (stepper.naccepts, stepper.nrejects) == (4, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 4, stepper.isteps
print()
print("make stuck")
print()
# make stuck
def loglike(x): return -100
path = ContourSamplingPath(SamplingPath(ui, v, 0.0), region)
path.gradient = gradient
sampler = ClockedStepSampler(path)
stepper = SingleJumper(sampler, 4)
assert (stepper.naccepts, stepper.nrejects) == (0, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 0, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.50, 0.50])
assert (stepper.naccepts, stepper.nrejects) == (0, 1), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 1, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.50, 0.50])
assert (stepper.naccepts, stepper.nrejects) == (0, 2), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 2, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.50, 0.50])
assert (stepper.naccepts, stepper.nrejects) == (0, 3), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 3, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.50, 0.50])
assert (stepper.naccepts, stepper.nrejects) == (0, 4), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 4, stepper.isteps
def test_directjumper():
Lmin = -1.0
us = 0.5 + np.zeros((100, 2))
#Ls = np.zeros(100)
region = make_region(2)
def transform(x): return x
def loglike(x): return 0.0
def gradient(x, plot=False):
j = np.argmax(np.abs(x - 0.5))
v = np.zeros(len(x))
v[j] = -1 if x[j] > 0.5 else 1
return v
def nocall(x):
assert False
ui = us[np.random.randint(len(us)),:]
v = np.array([0.01, 0.01])
path = ContourSamplingPath(SamplingPath(ui, v, 0.0), region)
path.gradient = nocall
sampler = ClockedBisectSampler(path)
stepper = DirectJumper(sampler, 4)
assert (stepper.naccepts, stepper.nrejects) == (0, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 0, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.54, 0.54])
assert (stepper.naccepts, stepper.nrejects) == (4, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 4, stepper.isteps
print()
print("make reflect")
print()
def loglike(x): return 0.0 if x[0] < 0.505 else -100
path = ContourSamplingPath(SamplingPath(ui, v, 0.0), region)
path.gradient = gradient
sampler = ClockedBisectSampler(path)
stepper = DirectJumper(sampler, 4)
assert (stepper.naccepts, stepper.nrejects) == (0, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 0, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.47, 0.55])
assert (stepper.naccepts, stepper.nrejects) == (4, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 4, stepper.isteps
print()
print("make stuck")
print()
# make stuck
def loglike(x): return -100
path = ContourSamplingPath(SamplingPath(ui, v, 0.0), region)
path.gradient = gradient
sampler = ClockedBisectSampler(path)
stepper = DirectJumper(sampler, 4)
assert (stepper.naccepts, stepper.nrejects) == (0, 0), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 0, stepper.isteps
x, L = makejump(stepper, sampler, transform, loglike, Lmin)
assert_allclose(x, [0.50, 0.50])
assert (stepper.naccepts, stepper.nrejects) == (0, 4), (stepper.naccepts, stepper.nrejects)
assert stepper.isteps == 4, stepper.isteps
if __name__ == '__main__':
test_detailed_balance(plot=True)
| 14,966 | 41.885387 | 119 | py |
UltraNest | UltraNest-master/tests/test_clustering.py | from __future__ import print_function, division
import numpy as np
import os
import matplotlib.pyplot as plt
from ultranest.utils import create_logger
from ultranest import ReactiveNestedSampler
from ultranest.mlfriends import MLFriends, AffineLayer
here = os.path.dirname(__file__)
def test_clustering():
from ultranest.mlfriends import update_clusters
for i in range(5):
np.random.seed(i * 100)
points = np.random.uniform(size=(100,2))
nclusters, clusteridxs, overlapped_points = update_clusters(points, points, 0.1**2)
for i in np.unique(clusteridxs):
x, y = points[clusteridxs == i].transpose()
plt.scatter(x, y)
plt.savefig('testclustering_0p1.pdf', bbox_inches='tight')
plt.close()
assert 1 < nclusters < 30
nclusters, clusteridxs, overlapped_points = update_clusters(points, points, 0.2**2)
for i in np.unique(clusteridxs):
x, y = points[clusteridxs == i].transpose()
plt.scatter(x, y)
plt.savefig('testclustering_0p2.pdf', bbox_inches='tight')
plt.close()
assert 1 <= nclusters < 2
def test_clusteringcase():
from ultranest.mlfriends import update_clusters
here = os.path.dirname(__file__)
points = np.loadtxt(os.path.join(here, "clusters2.txt"))
maxr = np.loadtxt(os.path.join(here, "clusters2_radius.txt"))
# transformLayer = ScalingLayer()
# transformLayer.optimize(points)
# region = MLFriends(points, transformLayer)
# maxr = region.compute_maxradiussq(nbootstraps=30)
print('maxradius:', maxr)
nclusters, clusteridxs, overlapped_points = update_clusters(points, points, maxr)
plt.title('nclusters: %d' % nclusters)
for i in np.unique(clusteridxs):
x, y = points[clusteridxs == i].transpose()
plt.scatter(x, y)
plt.savefig('testclustering_2.pdf', bbox_inches='tight')
plt.close()
def test_clusteringcase_eggbox():
from ultranest.mlfriends import update_clusters, ScalingLayer, MLFriends
points = np.loadtxt(os.path.join(here, "eggboxregion.txt"))
transformLayer = ScalingLayer()
transformLayer.optimize(points, points)
region = MLFriends(points, transformLayer)
maxr = region.compute_maxradiussq(nbootstraps=30)
assert 1e-10 < maxr < 5e-10
print('maxradius:', maxr)
nclusters, clusteridxs, overlapped_points = update_clusters(points, points, maxr)
# plt.title('nclusters: %d' % nclusters)
# for i in np.unique(clusteridxs):
# x, y = points[clusteridxs == i].transpose()
# plt.scatter(x, y)
# plt.savefig('testclustering_eggbox.pdf', bbox_inches='tight')
# plt.close()
assert 14 < nclusters < 20, nclusters
class MockIntegrator(ReactiveNestedSampler):
def __init__(self):
self.use_mpi = False
self.mpi_size = 1
self.mpi_rank = 0
self.region = None
self.transformLayer = None
self.wrapped_axes = []
self.log = True
self.logger = create_logger("mock")
self.region_class = MLFriends
self.transform_layer_class = AffineLayer
def test_overclustering_eggbox_txt():
from ultranest.mlfriends import update_clusters, ScalingLayer, MLFriends
np.random.seed(1)
for i in [20, 23, 24, 27, 49]:
print()
print("==== TEST CASE %d =====================" % i)
print()
points = np.loadtxt(os.path.join(here, "overclustered_u_%d.txt" % i))
for k in range(3):
transformLayer = ScalingLayer(wrapped_dims=[])
transformLayer.optimize(points, points)
region = MLFriends(points, transformLayer)
maxr = region.compute_maxradiussq(nbootstraps=30)
region.maxradiussq = maxr
nclusters = transformLayer.nclusters
print("manual: r=%e nc=%d" % (region.maxradiussq, nclusters))
# assert 1e-10 < maxr < 5e-10
nclusters, clusteridxs, overlapped_points = update_clusters(points, points, maxr)
print("reclustered: nc=%d" % (nclusters))
if False:
plt.title('nclusters: %d' % nclusters)
for k in np.unique(clusteridxs):
x, y = points[clusteridxs == k].transpose()
plt.scatter(x, y)
plt.savefig('testoverclustering_eggbox_%d.pdf' % i, bbox_inches='tight')
plt.close()
assert 14 < nclusters < 20, (nclusters, i)
for j in range(3):
nclusters, clusteridxs, overlapped_points = update_clusters(points, points, maxr)
assert 14 < nclusters < 20, (nclusters, i)
def test_overclustering_eggbox_update(plot=False):
np.random.seed(1)
for i in [20, 23, 24, 27, 42]:
print()
print("==== TEST CASE %d =====================" % i)
print()
mock = MockIntegrator()
print("loading...")
data = np.load(os.path.join(here, "overclustered_%d.npz" % i))
print("loading... done")
nsamples, mock.x_dim = data['u0'].shape
noverlap = 0
for i, u1 in enumerate(data['u']):
assert len((u1 == data['u0']).all(axis=1)) == nsamples
noverlap += (u1 == data['u0']).all(axis=1).sum()
print("u0:%d -> u:%d : %d points are common" % (nsamples, nsamples, noverlap))
mock._update_region(data['u0'], data['u0'])
nclusters = mock.transformLayer.nclusters
print("initialised with: r=%e nc=%d" % (mock.region.maxradiussq, nclusters))
smallest_cluster = min(
(mock.transformLayer.clusterids == i).sum()
for i in np.unique(mock.transformLayer.clusterids))
if smallest_cluster == 1:
print("found lonely points")
print(" --- intermediate tests how create_new reacts ---")
nextTransformLayer = mock.transformLayer.create_new(data['u0'], mock.region.maxradiussq)
print("updated to (with same data): r=%e nc=%d" % (mock.region.maxradiussq, nclusters))
smallest_cluster = min((nextTransformLayer.clusterids == i).sum() for i in np.unique(nextTransformLayer.clusterids))
assert smallest_cluster > 1, ("found lonely points", i, nclusters, np.unique(mock.transformLayer.clusterids, return_counts=True))
nextTransformLayer = mock.transformLayer.create_new(data['u'], mock.region.maxradiussq)
nclusters = nextTransformLayer.nclusters
print("updated to (with new data): r=%e nc=%d" % (mock.region.maxradiussq, nclusters))
smallest_cluster = min((nextTransformLayer.clusterids == i).sum() for i in np.unique(nextTransformLayer.clusterids))
if smallest_cluster > 1:
# this happens because mock.region.maxradiussq is not valid anymore
# when nlive changes
print("found lonely points", i, nclusters, np.unique(mock.transformLayer.clusterids, return_counts=True))
if plot:
for xi0, yi0, xi, yi in zip(data['u0'][:,0], data['u0'][:,1], data['u'][:,0], data['u'][:,1]):
plt.plot([xi0, xi], [yi0, yi], 'x-', ms=2)
plt.savefig('testoverclustering_eggbox_%d_diff.pdf' % i, bbox_inches='tight')
plt.close()
print(" --- end ---")
if len(data['u']) < nsamples or True:
# maxradius has to be invalidated if live points change
print("setting maxradiussq to None")
mock.region.maxradiussq = None
updated = mock._update_region(data['u'], data['u'])
nclusters = mock.transformLayer.nclusters
print("transitioned to : r=%e nc=%d %s" % (mock.region.maxradiussq, nclusters, updated))
smallest_cluster = min((mock.transformLayer.clusterids == i).sum() for i in np.unique(mock.transformLayer.clusterids))
if smallest_cluster == 1:
print("found lonely points")
for k in np.unique(mock.transformLayer.clusterids):
x, y = mock.region.u[mock.transformLayer.clusterids == k].transpose()
print('cluster %d/%d: %d points @ %.5f +- %.5f , %.5f +- %.5f' % (k, nclusters, len(x), x.mean(), x.std(), y.mean(), y.std()))
if plot:
plt.title('nclusters: %d' % nclusters)
for k in np.unique(mock.transformLayer.clusterids):
x, y = mock.region.u[mock.transformLayer.clusterids == k].transpose()
plt.scatter(x, y, s=2)
plt.savefig('testoverclustering_eggbox_%d.pdf' % i, bbox_inches='tight')
plt.close()
assert 14 < nclusters < 20, (nclusters, i)
assert smallest_cluster > 1, (i, nclusters, np.unique(mock.transformLayer.clusterids, return_counts=True))
if __name__ == '__main__':
# test_clustering()
# test_clusteringcase()
test_overclustering_eggbox_update(plot=True)
| 8,779 | 41.415459 | 138 | py |
UltraNest | UltraNest-master/tests/test_transforms.py | from ultranest.mlfriends import ScalingLayer, AffineLayer
import numpy as np
import matplotlib.pyplot as plt
def genpoints_following_cov(covmatrix, size=1000):
u = np.random.uniform(-5, 5, size=(100000, 2))
mask = np.einsum('ij,jk,ik->i', u, covmatrix, u) <= 1
points = u[mask,:][:size,:]
return points
def test_transform():
np.random.seed(1)
corrs = np.arange(-1, 1, 0.1)
corrs *= 0.999
for corr in corrs:
for scaleratio in [1, 0.001]:
covmatrix = np.array([[1., corr], [corr, 1.]])
points = np.random.multivariate_normal(np.zeros(2), covmatrix, size=1000)
print(corr, scaleratio, covmatrix.flatten(), points.shape)
points[:,0] = points[:,0] * 0.01 * scaleratio + 0.5
points[:,1] = points[:,1] * 0.01 + 0.5
layer = ScalingLayer()
layer.optimize(points, points)
tpoints = layer.transform(points)
assert tpoints.shape == points.shape, (tpoints.shape, points.shape)
points2 = layer.untransform(tpoints)
assert tpoints.shape == points2.shape, (tpoints.shape, points2.shape)
assert (points2 == points).all(), (points, tpoints, points2)
# transform a single point
points = points[0]
tpoints = layer.transform(points)
assert tpoints.shape == points.shape, (tpoints.shape, points.shape)
points2 = layer.untransform(tpoints)
assert tpoints.shape == points2.shape, (tpoints.shape, points2.shape)
assert (points2 == points).all(), (points, tpoints, points2)
def test_affine_transform(plot=False):
np.random.seed(1)
corrs = [0, 0.6, 0.95, 0.999]
for corr in corrs:
for scaleratio in [1]: #, 0.001]:
covmatrix = np.array([[1., corr], [corr, 1.]])
# should draw uniformly sampled points
points = genpoints_following_cov(covmatrix, size=400)
print('settings: corr:', corr, 'scaleratio:', scaleratio, 'covmatrix:', covmatrix.flatten(), points.shape)
points[:,0] = points[:,0] * 0.01 * scaleratio + 0.5
points[:,1] = points[:,1] * 0.01 + 0.5
layer = AffineLayer()
layer.optimize(points, points)
points3 = layer.untransform(genpoints_following_cov(np.diag([1,1]), size=400))
#print('cov:', layer.cov, 'covmatrix:', covmatrix, 'ratio:', layer.cov / covmatrix)
tpoints = layer.transform(points)
assert tpoints.shape == points.shape, (tpoints.shape, points.shape)
points2 = layer.untransform(tpoints)
assert tpoints.shape == points2.shape, (tpoints.shape, points2.shape)
if plot and scaleratio == 1:
plt.figure(figsize=(9,4))
plt.subplot(1, 2, 1)
plt.scatter(points[:,0], points[:,1])
plt.scatter(points2[:,0], points2[:,1], marker='x')
plt.scatter(points3[:,0], points3[:,1], marker='+')
plt.subplot(1, 2, 2)
plt.scatter(tpoints[:,0], tpoints[:,1])
lo, hi = plt.xlim()
lo2, hi2 = plt.ylim()
lo, hi = min(lo, lo2), max(hi, hi2)
plt.xlim(lo, hi)
plt.ylim(lo, hi)
plt.savefig("testtransform_affine_corr%s_scale%s.pdf" % (corr, scaleratio), bbox_inches='tight')
plt.close()
assert (points2 == points).all(), (points, tpoints, points2)
# transform a single point
points = points[0]
tpoints = layer.transform(points)
assert tpoints.shape == points.shape, (tpoints.shape, points.shape)
points2 = layer.untransform(tpoints)
assert tpoints.shape == points2.shape, (tpoints.shape, points2.shape)
assert (points2 == points).all(), (points, tpoints, points2)
def test_wrap(plot=False):
np.random.seed(1)
for Npoints in 10, 100, 1000:
for wrapids in [[], [0], [1], [0,1]]:
print("Npoints=%d wrapped_dims=%s" % (Npoints, wrapids))
#wrapids = np.array(wrapids)
points = np.random.normal(0.5, 0.01, size=(Npoints, 2))
for wrapi in wrapids:
points[:,wrapi] = np.fmod(points[:,wrapi] + 0.5, 1)
assert (points > 0).all(), points
assert (points < 1).all(), points
layer = ScalingLayer(wrapped_dims=wrapids)
layer.optimize(points, points)
tpoints = layer.transform(points)
assert tpoints.shape == points.shape, (tpoints.shape, points.shape)
points2 = layer.untransform(tpoints)
assert tpoints.shape == points2.shape, (tpoints.shape, points2.shape)
if plot:
plt.subplot(1, 2, 1)
plt.scatter(points[:,0], points[:,1])
plt.scatter(points2[:,0], points2[:,1], marker='x')
plt.subplot(1, 2, 2)
plt.scatter(tpoints[:,0], tpoints[:,1])
plt.savefig("testtransform_%d_wrap%d.pdf" % (Npoints, len(wrapids)), bbox_inches='tight')
plt.close()
assert np.allclose(points2, points), (points, tpoints, points2)
layer = AffineLayer(wrapped_dims=wrapids)
layer.optimize(points, points)
tpoints = layer.transform(points)
assert tpoints.shape == points.shape, (tpoints.shape, points.shape)
points2 = layer.untransform(tpoints)
assert tpoints.shape == points2.shape, (tpoints.shape, points2.shape)
if __name__ == '__main__':
test_affine_transform(plot=True)
#test_wrap(plot=True)
#test_transform()
| 4,886 | 35.744361 | 109 | py |
UltraNest | UltraNest-master/tests/test_samplingpath.py | import numpy as np
import matplotlib.pyplot as plt
from ultranest.mlfriends import AffineLayer, MLFriends
from ultranest.samplingpath import SamplingPath, ContourSamplingPath
from ultranest.samplingpath import box_line_intersection, nearest_box_intersection_line, linear_steps_with_reflection, angle, get_sphere_tangents, norm
from numpy.testing import assert_allclose
def test_horizontal():
(c1, _, ax1), (c2, _, ax2) = box_line_intersection(np.array([0.5, 0.5]), np.array([0, 1.]))
print((c1, ax1), (c2, ax2))
assert ax1 == 1
np.testing.assert_allclose(c1, [0.5, 0])
assert ax2 == 1
np.testing.assert_allclose(c2, [0.5, 1])
(c1, _, ax1), (c2, _, ax2) = box_line_intersection(np.array([0.3, 0.3]), np.array([1, 0.]))
print((c1, ax1), (c2, ax2))
assert ax1 == 0
np.testing.assert_allclose(c1, [0, 0.3])
assert ax2 == 0
np.testing.assert_allclose(c2, [1, 0.3])
def test_corner():
start, direction = np.array([0.6, 0.5]), np.array([0.4, 0.5])
print("starting ray:", start, direction)
(c1, _, ax1), (c2, _, ax2) = box_line_intersection(start, direction)
print((c1, ax1), (c2, ax2))
np.testing.assert_allclose(c2, [1, 1])
start = c2
direction[ax2] *= -1
print("restarting ray:", start, direction)
(c1, _, ax1), (c2, _, ax2) = box_line_intersection(start, direction)
print((c1, ax1), (c2, ax2))
np.testing.assert_allclose(c1, [1., 1.])
np.testing.assert_allclose(c2, [0.2, 0.])
start = c2
direction[ax2] *= -1
(c1, _, ax1), (c2, _, ax2) = box_line_intersection(start, direction)
print((c1, ax1), (c2, ax2))
np.testing.assert_allclose(c1, [0.2, 0.])
np.testing.assert_allclose(c2, [0., 0.25])
def test_wrap():
start, direction = np.array([0.1, 0.89]), np.array([0.2, 0.1])
wrap = np.array([False, False])
newpoint, _ = linear_steps_with_reflection(start, direction, 0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.1, 0.89])
newpoint, _ = linear_steps_with_reflection(start, direction, 1.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.3, 0.99])
newpoint, _ = linear_steps_with_reflection(start, direction, 2.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.5, 0.91])
newpoint, _ = linear_steps_with_reflection(start, direction, 3.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.7, 0.81])
newpoint, _ = linear_steps_with_reflection(start, direction, 4.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.9, 0.71])
newpoint, _ = linear_steps_with_reflection(start, direction, 5.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.9, 0.61])
start, direction = np.array([0.1, 0.89]), np.array([0.2, 0.1])
wrap = np.array([True, False])
newpoint, _ = linear_steps_with_reflection(start, direction, 0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.1, 0.89])
newpoint, _ = linear_steps_with_reflection(start, direction, 1.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.3, 0.99])
newpoint, _ = linear_steps_with_reflection(start, direction, 2.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.5, 0.91])
newpoint, _ = linear_steps_with_reflection(start, direction, 3.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.7, 0.81])
newpoint, _ = linear_steps_with_reflection(start, direction, 4.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.9, 0.71])
newpoint, _ = linear_steps_with_reflection(start, direction, 5.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.1, 0.61])
start, direction = np.array([0.1, 0.89]), np.array([0.2, 0.1])
wrap = np.array([False, True])
newpoint, _ = linear_steps_with_reflection(start, direction, 0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.1, 0.89])
newpoint, _ = linear_steps_with_reflection(start, direction, 1.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.3, 0.99])
newpoint, _ = linear_steps_with_reflection(start, direction, 2.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.5, 0.09])
newpoint, _ = linear_steps_with_reflection(start, direction, 3.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.7, 0.19])
newpoint, _ = linear_steps_with_reflection(start, direction, 4.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.9, 0.29])
newpoint, _ = linear_steps_with_reflection(start, direction, 5.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.9, 0.39])
start, direction = np.array([0.1, 0.89]), np.array([0.2, 0.1])
wrap = np.array([True, True])
newpoint, _ = linear_steps_with_reflection(start, direction, 0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.1, 0.89])
newpoint, _ = linear_steps_with_reflection(start, direction, 1.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.3, 0.99])
newpoint, _ = linear_steps_with_reflection(start, direction, 2.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.5, 0.09])
newpoint, _ = linear_steps_with_reflection(start, direction, 3.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.7, 0.19])
newpoint, _ = linear_steps_with_reflection(start, direction, 4.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.9, 0.29])
newpoint, _ = linear_steps_with_reflection(start, direction, 5.0, wrapped_dims=wrap)
assert_allclose(newpoint, [0.1, 0.39])
def test_random():
for i in range(100):
ndim = 1 + i
start = np.random.uniform(size=ndim)
direction = np.random.normal(size=ndim)
direction /= (direction**2).sum()**0.5
reset = np.random.binomial(1, 0.1, size=ndim) == 1
direction[reset] = -start[reset]
# check that the returned result is symmetric to the direction
(c1, _, ax1), (c2, _, ax2) = box_line_intersection(start, direction)
(b1, _, ax1), (b2, _, ax2) = box_line_intersection(start, -direction)
np.testing.assert_allclose(c1, b2)
np.testing.assert_allclose(b1, c2)
# check that the i+j step is consistent with making i steps and then j steps
wrapped_dims = np.random.binomial(0.5, 1, size=ndim).astype(bool)
a, b = linear_steps_with_reflection(start, direction, 1 * 0.04, wrapped_dims=wrapped_dims)
c, d = linear_steps_with_reflection(a, b, 1 * 0.04, wrapped_dims=wrapped_dims)
e, f = linear_steps_with_reflection(start, direction, 2 * 0.04, wrapped_dims=wrapped_dims)
# check that the length is not infinite
wrapped_dims = np.ones(ndim, dtype=bool)
a, b = linear_steps_with_reflection(start, direction, 1000000, wrapped_dims=wrapped_dims)
def test_forward(plot=False):
np.random.seed(1)
for j in range(40):
if j % 2 == 0:
wrapped_dims = np.array([False, False])
else:
wrapped_dims = None
start = np.random.uniform(size=2)
direction = np.random.normal(size=2)
direction /= (direction**2).sum()**0.5
points = []
for i in range(100):
newpoint, _ = linear_steps_with_reflection(start, direction, i * 0.04, wrapped_dims=wrapped_dims)
points.append(newpoint)
points = np.array(points)
a, b = linear_steps_with_reflection(start, direction, 1 * 0.04, wrapped_dims=wrapped_dims)
c, d = linear_steps_with_reflection(a, b, 1 * 0.04, wrapped_dims=wrapped_dims)
e, f = linear_steps_with_reflection(start, direction, 2 * 0.04, wrapped_dims=wrapped_dims)
assert_allclose(c, e)
assert_allclose(d, f)
np.testing.assert_allclose(points[0], start)
if plot:
plt.plot(start[0], start[1], 'o ')
plt.plot(points[:,0], points[:,1], 'x-')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.savefig('flatnuts_test_forward_%02d.png' % j, bbox_inches='tight')
plt.close()
assert np.isfinite(points).all(), (j, points)
assert (points > 0).all(), (j, points)
assert (points < 1).all(), (j, points)
delta = ((points[1:,:] - points[:-1,:])**2).sum(axis=1)**0.5
#print(delta.max(), delta.min(), direction)
assert (delta <= 0.04001).all(), (j, delta, np.where(delta > 0.1), points)
def test_samplingpath():
x0 = np.array([0.5, 0.5])
v0 = np.array([0.1, 0.0])
L0 = 0.
path = SamplingPath(x0, v0, L0)
assert path.interpolate(0) == (x0, v0, L0, True)
try:
path.interpolate(1)
assert False
except KeyError:
pass
try:
path.interpolate(-1)
assert False
except KeyError:
pass
path.add(-1, x0 - v0, v0, 1.0)
x1, v1, L1, on_path = path.interpolate(-1)
assert_allclose(x1, x0 - v0)
assert_allclose(v1, v0)
assert_allclose(L1, 1.0)
assert on_path
path.add(4, x0 + 4*v0, v0, 4.0)
x1, v1, L1, on_path = path.interpolate(1)
assert_allclose(x1, x0 + v0)
assert_allclose(v1, v0)
assert L1 is None, L1
assert on_path
def test_samplingpath_cubereflect():
x0 = np.array([0.1, 0.1])
v0 = np.array([0.1, 0.01])
L0 = 0.
path = SamplingPath(x0, v0, L0)
path.add(-1, x0 - v0, v0, 1.0)
def test_samplingpath_oddcase():
x0 = np.array([0.19833663, 0.49931288, 0.62744967, 0.47308545, 0.48858042, 0.49025685,
0.48481497, 0.49068977, 0.49562456, 0.51102634] )
v0 = np.array([-0.00053468, -0.00106889, 0.0012165, 0.00737494, 0.00152363, -0.00164736,
0.00371493, 0.02057758, -0.00260349, 0.01266826])
L0 = 0.
path = SamplingPath(x0, v0, L0)
for i in range(-10, 11):
if i != 0:
path.extrapolate(i)
def get_reflection_angles(normal, v):
angles = (normal * (v / norm(v))).sum(axis=1)
#mask_forward1 = angles < 0
# additionally, the reverse should work:
vnew = -(v.reshape((1, -1)) - 2 * angles.reshape((-1, 1)) * normal)
anglesnew = (normal * (vnew / norm(vnew, axis=1).reshape((-1, 1)))).sum(axis=1)
assert anglesnew.shape == (len(normal),), (anglesnew.shape, normal.shape)
mask_forward = np.logical_and(angles < 0, anglesnew < 0)
return mask_forward, angles, anglesnew
def test_reversible_gradient(plot=False):
def loglike(x):
x, y = x.transpose()
return -0.5 * (x**2 + ((y - 0.5)/0.2)**2)
def transform(u):
return u
Lmin = -0.5
for i in [84] + list(range(1, 100)):
print("setting seed = %d" % i)
np.random.seed(i)
points = np.random.uniform(size=(10000, 2))
L = loglike(points)
mask = L > Lmin
points = points[mask,:][:100,:]
active_u = points
active_values = L[mask][:100]
transformLayer = AffineLayer(wrapped_dims=[])
transformLayer.optimize(points, points)
region = MLFriends(points, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid()
nclusters = transformLayer.nclusters
assert nclusters == 1
assert np.allclose(region.unormed, region.transformLayer.transform(points)), "transform should be reproducible"
assert region.inside(points).all(), "live points should lie near live points"
if i == 84:
v = np.array([0.03477044, -0.01977415])
reflpoint = np.array([0.09304075, 0.29114574])
elif i == 4:
v = np.array([0.03949306, -0.00634806])
reflpoint = np.array([0.9934771, 0.55358031])
else:
v = np.random.normal(size=2)
v /= (v**2).sum()**0.5
v *= 0.04
j = np.random.randint(len(active_u))
reflpoint = np.random.normal(active_u[j,:], 0.04)
if not (reflpoint < 1).all() and not (reflpoint > 0).all():
continue
bpts = region.transformLayer.transform(reflpoint).reshape((1, -1))
tt = get_sphere_tangents(region.unormed, bpts)
t = region.transformLayer.untransform(tt * 1e-3 + region.unormed) - region.u
# compute new vector
normal = t / norm(t, axis=1).reshape((-1, 1))
print("reflecting at ", reflpoint, "with direction", v)
mask_forward1, angles, anglesnew = get_reflection_angles(normal, v)
if mask_forward1.any():
j = np.argmin(((region.unormed[mask_forward1,:] - bpts)**2).sum(axis=1))
k = np.arange(len(normal))[mask_forward1][j]
angles_used = angles[k]
normal_used = normal[k,:]
print("chose normal", normal_used, k)
#chosen_point = region.u[k,:]
vnew = -(v - 2 * angles_used * normal_used)
assert vnew.shape == v.shape
mask_forward2, angles2, anglesnew2 = get_reflection_angles(normal, vnew)
#j2 = np.argmin(((region.unormed[mask_forward2,:] - bpts)**2).sum(axis=1))
#chosen_point2 = region.u[mask_forward2,:][0,:]
#assert j2 == j, (j2, j)
assert mask_forward2[k]
#assert_allclose(chosen_point, chosen_point2)
#for m, a, b, m2, a2, b2 in zip(mask_forward1, angles, anglesnew, mask_forward2, angles2, anglesnew2):
# if m != m2:
# print(' ', m, a, b, m2, a2, b2)
#print("using normal", normal)
#print("changed v from", v, "to", vnew)
#angles2 = -(normal * (vnew / norm(vnew))).sum(axis=1)
#mask_forward2 = angles < 0
if plot:
plt.figure(figsize=(5,5))
plt.title('%d' % mask_forward1.sum())
plt.plot((reflpoint + v)[0], (reflpoint + v)[1], '^', color='orange')
plt.plot((reflpoint + vnew)[:,0], (reflpoint + vnew)[:,1], '^ ', color='lime')
plt.plot(reflpoint[0], reflpoint[1], '^ ', color='r')
plt.plot(region.u[:,0], region.u[:,1], 'x ', ms=2, color='k')
plt.plot(region.u[mask_forward1,0], region.u[mask_forward1,1], 'o ', ms=6, mfc='None', mec='b')
plt.plot(region.u[mask_forward2,0], region.u[mask_forward2,1], 's ', ms=8, mfc='None', mec='g')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.savefig('test_flatnuts_reversible_gradient_%d.png' % i, bbox_inches='tight')
plt.close()
assert mask_forward1[k] == mask_forward2[k], (mask_forward1[k], mask_forward2[k])
print("reflecting at ", reflpoint, "with direction", v)
# make that step, then try to go back
j = np.arange(len(normal))[mask_forward1][0]
normal = normal[j,:]
angles = (normal * (v / norm(v))).sum()
v2 = v - 2 * angle(normal, v) * normal
print("reflecting with", normal, "new direction", v2)
#newpoint = reflpoint + v2
#angles2 = (normal * (v2 / norm(v2))).sum()
v3 = v2 - 2 * angle(normal, v2) * normal
print("re-reflecting gives direction", v3)
assert_allclose(v3, v)
print()
print("FORWARD:", v, reflpoint)
samplingpath = SamplingPath(reflpoint - v, v, active_values[0])
contourpath = ContourSamplingPath(samplingpath, region)
normal = contourpath.gradient(reflpoint)
if normal is not None:
assert normal.shape == v.shape, (normal.shape, v.shape)
print("BACKWARD:", v, reflpoint)
v2 = -(v - 2 * angle(normal, v) * normal)
normal2 = contourpath.gradient(reflpoint)
assert_allclose(normal, normal2)
normal2 = normal
v3 = -(v2 - 2 * angle(normal2, v2) * normal2)
assert_allclose(v3, v)
if __name__ == '__main__':
test_forward()
test_horizontal()
test_corner()
test_random()
test_samplingpath()
test_samplingpath_cubereflect()
test_reversible_gradient(plot=True)
import sys
if len(sys.argv) > 1:
# estimate how many reflections we have before we u-turn
ndim = int(sys.argv[1])
seq = []
tseq = []
for j in range(100):
start = np.random.uniform(size=ndim)
initial_direction = np.random.normal(size=ndim)
initial_direction /= (initial_direction**2).sum()**0.5
direction = initial_direction.copy()
_, t_initial, _ = nearest_box_intersection_line(start, direction, fwd=True)
t_total = 0
for i in range(10000):
start, t, i = nearest_box_intersection_line(start, direction, fwd=True)
direction[i] *= -1
t_total += t
if (direction * initial_direction).sum() <= 0:
break
seq.append(i)
tseq.append(t_total / t_initial)
# print number of reflections before u-turn and distance compared to a slice sampling distance
# the numbers are ~ndim/2 and ~ndim
# which means that the track is a very long coherent walk!
print(np.mean(seq), np.mean(tseq))
| 17,244 | 42.54798 | 151 | py |
UltraNest | UltraNest-master/tests/test_viz.py | import numpy as np
from ultranest.viz import round_parameterlimits
def wrap_single_test(vlo, vhi, plo_expected, phi_expected):
assert vlo < vhi
err_msg = 'for input values (%s, %s)' % (vlo, vhi)
plo, phi, fmts = round_parameterlimits(np.asarray([vlo]), np.asarray([vhi]))
np.testing.assert_allclose(plo, plo_expected, err_msg=err_msg)
np.testing.assert_allclose(phi, phi_expected, err_msg=err_msg)
def wrap_single_fmt_test(vlo, vhi, fmt_expected):
assert vlo < vhi
plo, phi, fmts = round_parameterlimits(np.asarray([vlo]), np.asarray([vhi]), [(vlo, vhi)])
assert fmts[0] == fmt_expected, (fmts, fmt_expected)
fmt = fmts[0]
assert fmt % plo != fmt % phi, (fmt, plo, phi, fmt % plo, fmt % phi)
def test_rounding_pos():
wrap_single_test(0.00003, 0.001, 0, 0.001)
wrap_single_test(0.1, 0.9, 0, 1)
wrap_single_test(1.5, 150, 0, 1000)
wrap_single_test(20000, 100000, 0, 100000)
def test_rounding_u():
# test 0-1 range
wrap_single_test(0, 1., 0, 1)
wrap_single_test(0.0001, 0.99, 0, 1)
wrap_single_test(0.001, 0.99, 0, 1)
wrap_single_test(0.01, 0.9999, 0, 1)
def test_rounding_negpos():
wrap_single_test(-0.1, 0.9, -1, 1)
wrap_single_test(-1.5, 150, -1000, 1000)
wrap_single_test(-20000, 100000, -100000, 100000)
def test_rounding_withguess():
plo, phi, fmts = round_parameterlimits(
np.asarray([-3.14, 0.01, 3000]),
np.asarray([0.9, 0.3, 100000]),
[(-3.14, 3.14), (0, 1.0), (-2000, 10000)])
assert np.allclose(plo, [-3.14, 0, 0]), plo
assert np.allclose(phi, [3.14, 1, 100000]), phi
plo, phi, fmts = round_parameterlimits(
np.asarray([1.4, 24.0, 0.4]),
np.asarray([2.6, 25.5, 7.99]),
[(1, 3), (20, 26), (0.1, 8.0)])
assert np.allclose(plo, [1, 20, 0.1]), plo
assert np.allclose(phi, [3, 26, 8]), phi
def test_fmt():
wrap_single_fmt_test(-4.14, -4.13, "%+.3f")
wrap_single_fmt_test(1.0, 3.0, "%+.1f")
wrap_single_fmt_test(2000, 50000, "%+.1e")
if __name__ == '__main__':
test_rounding_u()
test_rounding_pos()
test_rounding_negpos()
test_rounding_withguess()
| 2,151 | 33.709677 | 94 | py |
UltraNest | UltraNest-master/tests/test_run.py | import os
import numpy as np
import shutil
import tempfile
import pytest
import json
import pandas
from ultranest import NestedSampler, ReactiveNestedSampler, read_file
from ultranest.integrator import warmstart_from_similar_file
import ultranest.mlfriends
from numpy.testing import assert_allclose
def test_run():
def loglike(y):
z = np.log10(y)
a = np.array([-0.5 * sum([((xi - 0.83456 + i*0.1)/0.5)**2 for i, xi in enumerate(x)]) for x in z])
b = np.array([-0.5 * sum([((xi - 0.43456 - i*0.1)/0.5)**2 for i, xi in enumerate(x)]) for x in z])
loglike.ncalls += len(a)
return np.logaddexp(a, b)
loglike.ncalls = 0
def transform(x):
return 10**(10. * x - 5.)
paramnames = ['Hinz', 'Kunz']
sampler = NestedSampler(paramnames, loglike, transform=transform, num_live_points=400, vectorized=True)
r = sampler.run(log_interval=50)
ncalls = loglike.ncalls
if sampler.mpi_size > 1:
ncalls = sampler.comm.gather(ncalls, root=0)
if sampler.mpi_rank == 0:
print("ncalls on the different MPI ranks:", ncalls)
ncalls = sum(sampler.comm.bcast(ncalls, root=0))
assert abs(r['ncall'] - ncalls) <= 2 * sampler.mpi_size, (r['ncall'], ncalls)
open('nestedsampling_results.txt', 'a').write("%.3f\n" % r['logz'])
sampler.plot()
def test_dlogz_reactive_run_SLOW():
def loglike(y):
return -0.5 * np.sum(((y - 0.5)/0.001)**2, axis=1)
paramnames = ['Hinz', 'Kunz']
sampler = ReactiveNestedSampler(paramnames, loglike, vectorized=True)
print("running for ess")
firstresults = sampler.run(min_num_live_points=50, cluster_num_live_points=0, max_num_improvement_loops=3, min_ess=10000, viz_callback=None)
print()
print({k:v for k, v in firstresults.items() if 'logzerr' in k})
print()
assert firstresults['logzerr'] > 0.1 * 2
print("running again for logz")
for niter, results in enumerate(sampler.run_iter(min_num_live_points=1, cluster_num_live_points=0, max_num_improvement_loops=10, dlogz=0.1, viz_callback=None, region_class=ultranest.mlfriends.RobustEllipsoidRegion)):
print("logzerr in iteration %d" % niter, results['logzerr'])
print()
print({k:v for k, v in results.items() if 'logzerr' in k})
assert results['logzerr'] < 0.1 * 2
def test_reactive_run():
np.random.seed(1)
evals = set()
def loglike(z):
#print(loglike.ncalls, z[0,0])
[evals.add(str(x[0])) for x in z]
a = np.array([-0.5 * sum([((xi - 0.83456 + i*0.1)/0.5)**2 for i, xi in enumerate(x)]) for x in z])
b = np.array([-0.5 * sum([((xi - 0.43456 - i*0.1)/0.5)**2 for i, xi in enumerate(x)]) for x in z])
loglike.ncalls += len(a)
return np.logaddexp(a, b)
loglike.ncalls = 0
def transform(x):
return 10. * x - 5.
paramnames = ['Hinz', 'Kunz']
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
draw_multiple=False, vectorized=True)
r = sampler.run(log_interval=50, min_num_live_points=400)
# test that the number of likelihood calls is correct
ncalls = loglike.ncalls
nunique = len(evals)
if sampler.mpi_size > 1:
ncalls = sampler.comm.gather(ncalls, root=0)
if sampler.mpi_rank == 0:
print("ncalls on the different MPI ranks:", ncalls)
ncalls = sum(sampler.comm.bcast(ncalls, root=0))
allevals = sampler.comm.gather(evals, root=0)
if sampler.mpi_rank == 0:
print("evals on the different MPI ranks:", [len(e) for e in allevals])
allevals = len(set.union(*allevals))
else:
allevals = None
nunique = sampler.comm.bcast(allevals, root=0)
if sampler.mpi_rank == 0:
print('ncalls:', ncalls, 'nunique:', nunique)
assert abs(r['ncall'] - ncalls) <= 2 * sampler.mpi_size, (r['ncall'], ncalls)
assert ncalls == nunique, (ncalls, nunique)
if sampler.mpi_rank == 0:
open('nestedsampling_reactive_results.txt', 'a').write("%.3f\n" % r['logz'])
print(r)
assert r['niter'] > 100
assert -10 < r['logz'] < 10
assert 0.01 < r['logzerr'] < 0.5
assert 1 < r['ess'] < 10000
sampler.plot()
def test_plateau_SLOW():
def loglike(y):
a = -0.5 * ((y/0.1)**2).sum()
if a < -1:
return -1e100
return a
def transform(x):
return x * 2 - 1
paramnames = ['Hinz', 'Kunz']
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform)
print(sampler.run(min_num_live_points=400)['logz'])
print(sampler.run_sequence['nlive'][:-400])
assert sampler.run_sequence['nlive'][-400] == 400, sampler.run_sequence['nlive'][:-400]
def test_flat():
def loglike(y):
return 0
paramnames = ['Hinz', 'Kunz']
sampler = ReactiveNestedSampler(paramnames, loglike)
print(sampler.run(min_num_live_points=400)['logz'])
print(sampler.run_sequence['nlive'][:-400])
def test_reactive_run_extraparams():
np.random.seed(1)
def loglike(z):
return -0.5 * z[-1].sum()
loglike.ncalls = 0
def transform(x):
z = 10. * x - 5.
return np.append(z, np.abs(z).sum())
paramnames = ['Hinz', 'Kunz']
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
derived_param_names=['ctr_distance'])
r = sampler.run()
assert r['samples'].shape[1] == 3
sampler.plot()
def test_return_summary():
sigma = np.array([0.1, 0.01])
centers = np.array([0.5, 0.75])
paramnames = ['a', 'b']
ndim = len(paramnames)
def loglike(theta):
like = -0.5 * (((theta - centers)/sigma)**2) - 0.5 * np.log(2 * np.pi * sigma**2) * ndim
return like.sum()
def transform(x):
return x
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform)
r = sampler.run()
print(r)
assert r['paramnames'] == paramnames
assert r['niter'] > 100
assert -10 < r['logz'] < 10
assert 0.01 < r['logzerr'] < 0.5
assert 1 < r['ess'] < 10000
assert 0.4 < r['posterior']['mean'][0] < 0.6
assert 0.74 < r['posterior']['mean'][1] < 0.76
assert 0.4 < r['posterior']['median'][0] < 0.6
assert 0.74 < r['posterior']['median'][1] < 0.76
assert 0.05 < r['posterior']['stdev'][0] < 0.2
assert 0.005 < r['posterior']['stdev'][1] < 0.02
assert 0.35 < r['posterior']['errlo'][0] < 0.45
assert 0.72 < r['posterior']['errlo'][1] < 0.75
assert 0.55 < r['posterior']['errup'][0] < 0.65
assert 0.75 < r['posterior']['errup'][1] < 0.78
N, ndim2 = r['samples'].shape
assert ndim2 == ndim
assert N > 10
N, ndim2 = r['weighted_samples']['points'].shape
assert ndim2 == ndim
assert N > 10
assert r['weighted_samples']['logw'].shape == (N,)
assert r['weighted_samples']['weights'].shape == (N,)
assert r['weighted_samples']['bootstrapped_weights'].shape[0] == N
assert r['weighted_samples']['logl'].shape == (N,)
@pytest.mark.parametrize("dlogz", [2.0, 0.5, 0.1])
def test_run_resume(dlogz):
sigma = 0.01
ndim = 1
def loglike(theta):
like = -0.5 * (((theta - 0.5)/sigma)**2).sum(axis=1) - 0.5 * np.log(2 * np.pi * sigma**2) * ndim
return like
def transform(x):
return x
paramnames = ['a']
def myadd(row):
assert False, (row, 'should not need to add more points in resume')
last_results = None
#for dlogz in 0.5, 0.1, 0.01:
np.random.seed(int(dlogz*100))
folder = tempfile.mkdtemp()
try:
for i in range(2):
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=folder, resume=True, vectorized=True)
r = sampler.run(log_interval=50, dlogz=dlogz, min_num_live_points=400)
sampler.print_results()
sampler.pointstore.close()
if i == 1:
sampler.pointstore.add = myadd
del r['weighted_samples']
del r['samples']
if last_results is not None:
print("ran with dlogz:", dlogz)
print("first run gave:", last_results)
print("second run gave:", r)
assert last_results['logzerr'] < 1.0
assert r['logzerr'] < 1.0
assert np.isclose(last_results['logz'], r['logz'], atol=0.5)
last_results = r
finally:
shutil.rmtree(folder, ignore_errors=True)
@pytest.mark.parametrize("storage_backend", ['hdf5', 'tsv', 'csv'])
def test_reactive_run_resume_eggbox(storage_backend):
def loglike(z):
chi = (np.cos(z / 2.)).prod(axis=1)
loglike.ncalls += len(z)
return (2. + chi)**5
loglike.ncalls = 0
def transform(x):
return x * 10 * np.pi
paramnames = ['a', 'b']
ndim = len(paramnames)
#last_results = None
folder = tempfile.mkdtemp()
np.random.seed(1)
try:
for i in range(2):
print()
print("====== Running Eggbox problem [%d] =====" % (i+1))
print()
sampler = ReactiveNestedSampler(paramnames,
loglike, transform=transform,
log_dir=folder, resume=True, vectorized=True, draw_multiple=False,
storage_backend=storage_backend)
initial_ncalls = int(sampler.ncall)
num_live_points = 100
loglike.ncalls = 0
r = sampler.run(max_iters=200 + i*200,
max_num_improvement_loops=0,
min_num_live_points=num_live_points,
cluster_num_live_points=0)
sampler.print_results()
if storage_backend == 'hdf5':
print("pointstore:", sampler.pointstore.fileobj['points'].shape)
sampler.pointstore.close()
print(loglike.ncalls, r['ncall'], initial_ncalls)
ncalls = loglike.ncalls
if sampler.mpi_size > 1:
ncalls = sampler.comm.gather(ncalls, root=0)
if sampler.mpi_rank == 0:
print("ncalls on the different MPI ranks:", ncalls)
ncalls = sum(sampler.comm.bcast(ncalls, root=0))
ncalls = ncalls + initial_ncalls
assert abs(r['ncall'] - ncalls) <= 2 * sampler.mpi_size, (i, r['ncall'], ncalls, r['ncall'] - ncalls)
assert paramnames == r['paramnames'], 'paramnames should be in results'
results2 = json.load(open(folder + '/info/results.json'))
print('CSV content:')
print(open(folder + '/info/post_summary.csv').read())
post_summary = pandas.read_csv(folder + '/info/post_summary.csv')
print(post_summary, post_summary.columns)
for k, v in r.items():
if k in results2:
print("checking results[%s] ..." % k)
assert results2[k] == r[k], (k, results2[k], r[k])
assert r['paramnames'] == paramnames
samples = np.loadtxt(folder + '/chains/equal_weighted_post.txt', skiprows=1)
data = np.loadtxt(folder + '/chains/weighted_post.txt', skiprows=1)
data_u = np.loadtxt(folder + '/chains/weighted_post_untransformed.txt', skiprows=1)
assert (data[:,:2] == data_u[:,:2]).all()
assert_allclose(samples.mean(axis=0), r['posterior']['mean'])
assert_allclose(np.median(samples, axis=0), r['posterior']['median'])
assert_allclose(np.std(samples, axis=0), r['posterior']['stdev'])
for k, v in r.items():
if k == 'posterior':
for k1, v1 in v.items():
if k1 == 'information_gain_bits':
continue
for param, value in zip(paramnames, v[k1]):
print("checking %s of parameter '%s':" % (k1, param), value)
assert np.isclose(post_summary[param + '_' + k1].values, value), (param, k1, post_summary[param + '_' + k1].values, value)
elif k == 'samples':
assert_allclose(samples, r['samples'])
elif k == 'paramnames':
assert v == paramnames
elif k == 'weighted_samples':
print(k, v.keys())
assert_allclose(data[:,0], v['weights'])
assert_allclose(data[:,1], v['logl'])
assert_allclose(data[:,2:], v['points'])
assert_allclose(data_u[:,2:], v['upoints'])
elif k == 'maximum_likelihood':
print(k, v.keys())
assert_allclose(data[-1,1], v['logl'])
assert_allclose(data[-1,2:], v['point'])
assert_allclose(data_u[-1,2:], v['point_untransformed'])
elif k.startswith('logzerr') or '_bs' in k or 'Herr' in k:
print(" skipping", k, np.shape(v))
#assert_allclose(r[k], v, atol=0.5)
elif k == 'insertion_order_MWW_test':
print('insertion_order_MWW_test:', r[k], v)
assert r[k] == v, (r[k], v)
else:
print(" ", k, np.shape(v))
assert_allclose(r[k], v)
logw = r['weighted_samples']['logw']
v = r['weighted_samples']['points']
L = r['weighted_samples']['logl']
assert results2['niter'] == len(r['samples'])
# the results are not exactly the same, because the sampling adds
#ncalls = loglike.ncalls
#sampler = ReactiveNestedSampler(paramnames,
# loglike, transform=transform,
# log_dir=folder, resume=True, vectorized=True, num_test_samples=0)
#print("pointstore:", sampler.pointstore.fileobj['points'].shape)
#assert ncalls == loglike.ncalls, (ncalls, loglike.ncalls)
if storage_backend == 'hdf5':
sequence, results = read_file(folder, ndim, random=False, num_bootstraps=0)
print("sampler results: ********************")
print({k:v for k, v in r.items() if np.asarray(v).size < 20 and k != 'weighted_samples'})
print("reader results: ********************")
print({k:v for k, v in results.items() if np.asarray(v).size < 20 and k != 'weighted_samples'})
for k, v in results.items():
if k == 'posterior' or k == 'samples':
pass
elif k == 'weighted_samples' or k == 'maximum_likelihood':
for k2, v2 in results[k].items():
if k2 == 'bootstrapped_weights': continue
print(" ", k, "::", k2, np.shape(v2))
assert_allclose(r[k][k2], v2)
elif k.startswith('logzerr') or '_bs' in k or 'Herr' in k:
print(" skipping", k, np.shape(v))
#assert_allclose(r[k], v, atol=0.5)
elif k == 'insertion_order_MWW_test':
print('insertion_order_MWW_test:', r[k], v)
assert r[k] == v, (r[k], v)
else:
print(" ", k, np.shape(v))
assert_allclose(r[k], v)
logw = r['weighted_samples']['logw']
v = r['weighted_samples']['points']
L = r['weighted_samples']['logl']
assert sequence['logz'][-1] - r['logz'] < 0.5, (results['logz'][-1], r['logz'])
assert sequence['logzerr'][-1] <= r['logzerr_single'], (results['logzerr'][-1], r['logzerr'])
#assert_allclose(sequence['logz_final'], r['logz_single'], atol=0.3)
#assert_allclose(sequence['logzerr_final'], r['logzerr_single'], atol=0.1)
assert r['niter'] <= sequence['niter'] <= r['niter'], (sequence['niter'], r['niter'])
assert results['niter'] == len(sequence['logz']) == len(sequence['logzerr']) == len(sequence['logvol']) == len(sequence['logwt'])
assert results['niter'] == len(results['samples'])
data = np.loadtxt(folder + '/chains/weighted_post.txt', skiprows=1)
assert_allclose(data[:,0], results['weighted_samples']['weights'])
assert_allclose(data[:,1], results['weighted_samples']['logl'])
assert_allclose(v, results['weighted_samples']['points'])
assert_allclose(logw, results['weighted_samples']['logw'])
assert_allclose(L, results['weighted_samples']['logl'])
assert_allclose(L, sequence['logl'])
#assert_allclose(logw + L, sequence['logwt'])
assert sequence['logvol'].shape == logw.shape == (len(L),), (sequence['logvol'].shape, logw.shape)
assert sequence['logwt'].shape == logw.shape == (len(L),), (sequence['logwt'].shape, logw.shape)
#assert_allclose(logw, sequence['logvols'])
#assert results['samples_untransformed'].shape == v.shape == (len(L), ndim), (results['samples_untransformed'].shape, v.shape)
finally:
shutil.rmtree(folder, ignore_errors=True)
def test_reactive_run_warmstart_gauss():
center = 0
def loglike(z):
chi2 = (((z - center)/0.001)**2).sum(axis=1)
loglike.ncalls += len(z)
return -0.5 * chi2
loglike.ncalls = 0
def transform(x):
return x * 20000 - 10000
paramnames = ['a']
folder = tempfile.mkdtemp()
np.random.seed(1)
first_ncalls = None
resume_ncalls = None
try:
for i, resume in enumerate(['overwrite', 'resume', 'resume-similar']):
print()
print("====== Running Gauss problem [%d] =====" % (i+1))
print()
center = (i+1) * 1e-4
try:
sampler = ReactiveNestedSampler(paramnames,
loglike, transform=transform,
log_dir=folder, resume=resume, vectorized=True, draw_multiple=False,
warmstart_max_tau=0.5)
except Exception as e:
# we expect an error for resuming with a changed likelihood
if resume != 'resume':
raise e
else:
assert 'loglikelihood function changed' in str(e), e
print("Exception as expected:", e)
continue
initial_ncalls = int(sampler.ncall)
if i == 0:
assert initial_ncalls == 0
num_live_points = 100
loglike.ncalls = 0
r = sampler.run(
max_num_improvement_loops=0,
min_num_live_points=num_live_points,
cluster_num_live_points=0, viz_callback=None, frac_remain=
0.5)
sampler.print_results()
print("pointstore:", sampler.pointstore.fileobj['points'].shape)
sampler.pointstore.close()
print(loglike.ncalls, r['ncall'], initial_ncalls)
ncalls = loglike.ncalls
if sampler.mpi_size > 1:
ncalls = sampler.comm.gather(ncalls, root=0)
if sampler.mpi_rank == 0:
print("ncalls on the different MPI ranks:", ncalls)
ncalls = sum(sampler.comm.bcast(ncalls, root=0))
ncalls = ncalls + initial_ncalls
if i == 0:
first_ncalls = ncalls
if i == 2:
resume_ncalls = loglike.ncalls
assert abs(r['ncall'] - ncalls) <= 2 * sampler.mpi_size, (i, r['ncall'], ncalls, r['ncall'] - ncalls)
assert paramnames == r['paramnames'], 'paramnames should be in results'
finally:
shutil.rmtree(folder, ignore_errors=True)
# make sure warm start is much faster
assert resume_ncalls < first_ncalls - 800, (resume_ncalls, first_ncalls)
def test_run_compat():
from ultranest.solvecompat import pymultinest_solve_compat as solve
ndim = 2
sigma = 0.01
centers = 0.5
paramnames = ['a', 'b']
def loglike(theta):
like = -0.5 * (((theta - centers)/sigma)**2).sum() - 0.5 * np.log(2 * np.pi * sigma**2) * ndim
return like
def transform(x):
params = x.copy()
params[0] = 10 * x[0] - 5.
params[1] = 10**(x[1] - 1)
return params
result = solve(LogLikelihood=loglike, Prior=transform,
n_dims=ndim, outputfiles_basename=None,
verbose=True, resume='resume', importance_nested_sampling=False)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(paramnames, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
def test_run_warmstart_gauss_SLOW():
center = None
stdev = 0.001
def loglike(z):
chi2 = (((z - center) / stdev)**2).sum(axis=1)
loglike.ncalls += len(z)
return -0.5 * chi2
loglike.ncalls = 0
def transform(x):
return x * 20000 - 10000
paramnames = ['a']
folder = tempfile.mkdtemp()
np.random.seed(1)
ncalls = []
try:
for i, resume in enumerate(['overwrite', 'resume-hot', 'resume-hot', 'resume-hot']):
print()
print("====== Running Gauss problem [%d] =====" % (i+1))
print()
center = [0, 0, stdev, 1][i]
print("center:", center, "folder:", folder)
if i == 0:
sampler = ReactiveNestedSampler(paramnames,
loglike, transform=transform,
log_dir=folder, resume=resume, vectorized=True)
else:
aux_param_names, aux_loglike, aux_transform, vectorized = warmstart_from_similar_file(
os.path.join(folder, 'chains', 'weighted_post_untransformed.txt'),
paramnames, loglike=loglike, transform=transform, vectorized=True,
)
sampler = ReactiveNestedSampler(aux_param_names,
aux_loglike, transform=aux_transform, vectorized=True)
sampler.run(viz_callback=None)
sampler.print_results()
print("expected posterior:", center, '+-', stdev)
print(sampler.results.keys())
print(sampler.results['posterior'].keys())
print(sampler.results['posterior']['mean'], sampler.results['posterior']['stdev'])
print(sampler.results['weighted_samples']['upoints'], sampler.results['weighted_samples']['weights'])
assert center - stdev < sampler.results['posterior']['mean'][0] < center + stdev, (center, sampler.results['posterior'])
assert stdev * 0.8 < sampler.results['posterior']['stdev'][0] < stdev * 1.2, (center, sampler.results['posterior'])
ncalls.append(sampler.ncall)
finally:
shutil.rmtree(folder, ignore_errors=True)
print(ncalls)
# make sure hot start is much faster
assert ncalls[1] < ncalls[0] - 800, (ncalls)
assert ncalls[2] < ncalls[0] - 800, (ncalls)
if __name__ == '__main__':
#test_run_compat()
#test_run_resume(dlogz=0.5)
#test_reactive_run_resume(dlogz=0.5, min_ess=1000)
#test_reactive_run()
#test_run()
#test_reactive_run_warmstart_gauss()
#test_reactive_run_extraparams()
#test_reactive_run_resume_eggbox('hdf5')
#test_dlogz_reactive_run()
test_plateau()
| 23,520 | 38.933786 | 220 | py |
UltraNest | UltraNest-master/tests/test_utils.py | import numpy as np
import tempfile
import os
from ultranest.utils import vectorize, is_affine_transform, normalised_kendall_tau_distance, make_run_dir
from ultranest.utils import distributed_work_chunk_size
from numpy.testing import assert_allclose
import pytest
def test_vectorize():
def myfunc(x):
return (x**2).sum()
myvfunc = vectorize(myfunc)
a = np.array([1.2, 2.3, 3.4])
assert_allclose(np.array([myfunc(a)]), myvfunc([a]))
b = np.array([[1.2, 2.3, 3.4], [1.2, 2.3, 3.4]])
assert_allclose(np.array([myfunc(b[0]), myfunc(b[1])]), myvfunc(b))
class FuncClass(object):
def __call__(self, x):
return (x**2).sum()
def foo(self, x):
return x
mycaller = FuncClass()
vectorize(mycaller)
vectorize(mycaller.foo)
def test_is_affine_transform():
na = 2**np.random.randint(1, 10)
d = 2**np.random.randint(1, 3)
a = np.random.uniform(-1, 1, size=(na, d))
assert is_affine_transform(a, a)
assert is_affine_transform(a, a * 2.0)
assert is_affine_transform(a, a - 1)
assert is_affine_transform(a, a * 10000 - 5000.)
assert not is_affine_transform(a, a**2)
def test_tau():
assert normalised_kendall_tau_distance(np.arange(400), np.arange(400)) == 0
assert normalised_kendall_tau_distance(np.arange(2000), np.arange(2000)) == 0
a = np.array([1, 2, 3, 4, 5])
b = np.array([3, 4, 1, 2, 5])
assert normalised_kendall_tau_distance(a, b) == 0.4
i, j = np.meshgrid(np.arange(len(a)), np.arange(len(b)))
assert normalised_kendall_tau_distance(a, b, i, j) == 0.4
assert normalised_kendall_tau_distance(a, a, i, j) == 0
try:
normalised_kendall_tau_distance(np.arange(5), np.arange(10))
raise Exception("expect error")
except AssertionError:
pass
def test_make_log_dirs():
import shutil
try:
filepath = tempfile.mkdtemp()
make_run_dir(filepath, max_run_num=3)
assert os.path.exists(os.path.join(filepath, 'run1'))
make_run_dir(filepath, max_run_num=3)
assert os.path.exists(os.path.join(filepath, 'run2'))
try:
make_run_dir(filepath, max_run_num=3)
assert False
except ValueError:
pass
finally:
shutil.rmtree(filepath)
@pytest.mark.parametrize("mpi_size", [1, 4, 10, 37, 53, 100, 1000, 513])
@pytest.mark.parametrize("num_live_points_missing", [0, 1, 4, 10, 17, 31, 100, 1000, 513])
def test_distributed_work_chunk_size(mpi_size, num_live_points_missing):
processes = range(mpi_size)
todo = [distributed_work_chunk_size(num_live_points_missing, rank, mpi_size) for rank in processes]
assert sum(todo) == num_live_points_missing
assert max(todo) - min(todo) in {0, 1}
| 2,560 | 29.129412 | 105 | py |
UltraNest | UltraNest-master/docs/gauss.py | import argparse
import numpy as np
from numpy import log
# define command line arguments:
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=400)
parser.add_argument('--sigma', type=float, default=0.1)
parser.add_argument('--slice', action='store_true')
parser.add_argument('--slice_steps', type=int, default=100)
parser.add_argument('--log_dir', type=str, default='logs/loggauss')
args = parser.parse_args()
ndim = args.x_dim
sigma = args.sigma
width = max(0, 1 - 5 * sigma)
centers = (np.sin(np.arange(ndim)/2.) * width + 1.) / 2.
# Here, we implement a vectorized loglikelihood, which can
# process many points at the same time. This reduces function calls.
def loglike(theta):
like = -0.5 * (((theta - centers)/sigma)**2).sum(axis=1) - 0.5 * np.log(2 * np.pi * sigma**2) * ndim
return like
def transform(x):
return x
paramnames = ['param%d' % (i+1) for i in range(ndim)]
# set up nested sampler:
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir=args.log_dir + 'RNS-%dd' % ndim, resume=True,
vectorized=True)
if args.slice:
# set up step sampler. Here, we use a differential evolution slice sampler:
import ultranest.stepsampler
sampler.stepsampler = ultranest.stepsampler.SliceSampler(
nsteps=args.slice_steps,
generate_direction=ultranest.stepsampler.generate_mixture_random_direction,
)
# run sampler, with a few custom arguments:
sampler.run(dlogz=0.5 + 0.1 * ndim,
update_interval_volume_fraction=0.4 if ndim > 20 else 0.2,
max_num_improvement_loops=3,
min_num_live_points=args.num_live_points)
sampler.print_results()
if args.slice:
sampler.stepsampler.plot(filename = args.log_dir + 'RNS-%dd/stepsampler_stats_regionslice.pdf' % ndim)
sampler.plot()
| 1,959 | 30.612903 | 106 | py |
UltraNest | UltraNest-master/docs/modoverview.py | import importlib
sections = [
('Modules commonly used directly', ['integrator', 'hotstart', 'plot', 'stepsampler', 'popstepsampler', 'solvecompat']),
('Internally used modules', ['mlfriends', 'netiter', 'ordertest', 'stepfuncs', 'store', 'viz']),
('Experimental modules, no guarantees', ['dychmc', 'dyhmc', 'flatnuts', 'pathsampler', 'samplingpath']),
]
fout = open('API.rst', 'w')
fout.write("""API
===
`Full API documentation on one page <ultranest.html>`_
The main interface is :py:class:`ultranest.integrator.ReactiveNestedSampler`,
also available as `ultranest.ReactiveNestedSampler`.
""")
for section, modules in sections:
fout.write("\n%s:\n%s\n\n" % (section, '-'*80))
for mod in modules:
moddoc = importlib.import_module('ultranest.%s' % mod).__doc__
modtitle = moddoc.strip().split('\n')[0]
print('%-15s: %s' % (mod, modtitle))
fout.write(" * :py:mod:`ultranest.%s`: %s\n" % (mod, modtitle))
fout.write("""
Alphabetical list of submodules
-------------------------------
.. toctree::
:maxdepth: 2
ultranest
""")
| 1,057 | 24.804878 | 120 | py |
UltraNest | UltraNest-master/docs/simple.py | import scipy.stats
paramnames = ['param1', 'param2', 'param3']
centers = [0.4, 0.5, 0.6]
sigma = 0.1
def transform(cube):
return cube
def loglike(theta):
return scipy.stats.norm(centers, sigma).logpdf(theta).sum()
from ultranest import ReactiveNestedSampler
sampler = ReactiveNestedSampler(paramnames, loglike, transform=transform,
log_dir='my_gauss', # folder where to store files
resume=True, # whether to resume from there (otherwise start from scratch)
)
sampler.run(
min_num_live_points=400,
dlogz=0.5, # desired accuracy on logz
min_ess=400, # number of effective samples
update_interval_volume_fraction=0.4, # how often to update region
max_num_improvement_loops=3, # how many times to go back and improve
)
sampler.print_results()
sampler.plot()
sampler.plot_trace()
| 820 | 25.483871 | 79 | py |
UltraNest | UltraNest-master/docs/conf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ultranest documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_ext'))
import sphinx_rtd_theme
import ultranest
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosectionlabel',
'nbsphinx',
'sphinx_rtd_theme',
'sphinx.ext.napoleon',
'edit_on_github',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'UltraNest'
copyright = u"2014-2022, Johannes Buchner"
author = u"Johannes Buchner"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ultranest.__version__
# The full version, including alpha/beta/rc tags.
release = ultranest.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['build', 'Thumbs.db', '.DS_Store',
'_build', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
autosectionlabel_prefix_document = True
# avoid time-out when running the doc
nbsphinx_timeout = 4 * 60 * 60
nbsphinx_execute_arguments = [
"--InlineBackend.figure_formats={'svg', 'pdf'}",
"--InlineBackend.rc=figure.dpi=96",
]
autodoc_member_order = 'bysource'
autoclass_content = 'both'
edit_on_github_project = 'JohannesBuchner/UltraNest'
edit_on_github_branch = 'master'
#edit_on_github_url
edit_on_github_src = 'docs/' # optional. default: ''
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_baseurl = 'https://johannesbuchner.github.io/UltraNest/'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'style_external_links': True,
# 'vcs_pageview_mode': 'edit',
'style_nav_header_background': '#2980B9',
#'only_logo': False,
}
html_logo = "static/logo.svg"
html_show_sourcelink = False
html_favicon = "static/icon.ico"
html_show_sphinx = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ultranestdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ultranest.tex',
u'UltraNest Documentation',
u'Johannes Buchner', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ultranest',
u'UltraNest Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ultranest',
u'UltraNest Documentation',
author,
'ultranest',
'One line description of project.',
'Miscellaneous'),
]
| 5,899 | 27.780488 | 77 | py |
UltraNest | UltraNest-master/docs/_ext/edit_on_github.py | """
Sphinx extension to add ReadTheDocs-style "Edit on GitHub" links to the
sidebar.
Loosely based on https://github.com/astropy/astropy/pull/347
"""
import os
import warnings
__licence__ = 'BSD (3 clause)'
def get_github_url(app, view, path):
return (
'https://github.com/{project}/{view}/{branch}/{src_path}{path}'.format(
project=app.config.edit_on_github_project,
view=view,
branch=app.config.edit_on_github_branch,
src_path=app.config.edit_on_github_src_path,
path=path))
def html_page_context(app, pagename, templatename, context, doctree):
if templatename != 'page.html':
return
if not app.config.edit_on_github_project:
warnings.warn("edit_on_github_project not specified")
return
if not doctree:
warnings.warn("doctree is None")
return
path = os.path.relpath(doctree.get('source'), app.builder.srcdir)
show_url = get_github_url(app, 'blob', path)
edit_url = get_github_url(app, 'edit', path)
context['show_on_github_url'] = show_url
context['edit_on_github_url'] = edit_url
def setup(app):
app.add_config_value('edit_on_github_project', '', True)
app.add_config_value('edit_on_github_branch', 'master', True)
app.add_config_value('edit_on_github_src_path', '', True) # 'eg' "docs/"
app.connect('html-page-context', html_page_context)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 1,519 | 27.148148 | 79 | py |
UltraNest | UltraNest-master/evaluate/problems.py | import numpy as np
from math import gamma, pi, exp
def random_vector(ndim, length=1):
v = np.random.normal(size=ndim)
return v * length / (v**2).sum()**0.5
def random_point_in_sphere(ndim, radius=1):
return random_vector(ndim, radius * np.random.uniform()**(1. / ndim))
def nsphere_volume(radius, ndim):
return pi**(ndim/2.) / gamma(ndim/2. + 1) * radius**ndim
def gradient_to_center(x, ctr=0.5):
""" return normalised vector pointing to center """
v = ctr - x
v /= (v**2).sum()**0.5
return v
def transform(x): return x
def loglike_gauss(x):
""" gaussian problem (circles) """
return -0.5 * ((x - 0.5)**2).sum()
gradient_gauss = gradient_to_center
def volume_gauss(loglike, ndim):
""" compute volume enclosed at loglike threshold """
sqr_radius = -2 * loglike
radius = sqr_radius**0.5
if radius >= 0.5:
# the volume is still touching the unit cube
return np.nan
# compute volume of a n-sphere
return nsphere_volume(radius, ndim)
def warmup_gauss(ndim):
return 0.5 + random_point_in_sphere(ndim, radius = 0.4)
def generate_asymgauss_problem(ndim):
asym_sigma = 0.1 / (1 + 4*np.arange(ndim))
asym_sigma_max = asym_sigma.max()
def loglike_asymgauss(x):
""" assymmetric gaussian problem"""
return -0.5 * (((x - 0.5)/asym_sigma)**2).sum()
def volume_asymgauss(loglike, ndim):
""" compute volume enclosed at loglike threshold """
sqr_radius = -2 * loglike
radius = sqr_radius**0.5
# assume that all of that is in the asym_sigma_max direction
# how far would that be?
if radius * asym_sigma_max >= 0.5:
# the volume is still touching the unit cube
return np.nan
# compute volume of a n-sphere
return nsphere_volume(radius, ndim) * np.product(asym_sigma / asym_sigma_max)
gradient_asymgauss = gradient_to_center
def warmup_asymgauss(ndim):
return 0.5 + random_point_in_sphere(ndim, radius = asym_sigma)
return loglike_asymgauss, gradient_asymgauss, volume_asymgauss, warmup_asymgauss
def generate_corrgauss_problem(ndim, gamma=0.95):
mean = np.zeros(ndim)
M = np.ones((ndim, ndim)) * gamma
np.fill_diagonal(M, 1)
Minv = np.linalg.inv(M)
Mdet = np.linalg.det(M)
center = np.zeros(ndim)
loglike_asymgauss, gradient_asymgauss, volume_asymgauss, warmup_asymgauss = generate_asymgauss_problem(ndim)
from ultranest.mlfriends import AffineLayer
layer = AffineLayer(center, M, Minv)
def warmup_corrgauss(ndim):
# the gaussian is defined in our aux coordinate system:
y = warmup_asymgauss(ndim)
# so transform to these
return layer.transform(y - 0.5) + 0.5
def loglike_corrgauss(x):
""" gaussian problem """
# transform back to aux coordinate system, where gaussian is nice
y = layer.untransform(x - 0.5) + 0.5
return loglike_asymgauss(y)
def volume_corrgauss(loglike, ndim):
# volume is defined in aux coordinate system
# we hope that no intersection with unit cube happens
return volume_asymgauss(loglike, ndim) / Mdet
def gradient_corrgauss(x):
y = layer.untransform(x - 0.5) + 0.5
return gradient_to_center(y)
return loglike_corrgauss, gradient_corrgauss, volume_corrgauss, warmup_corrgauss
def loglike_pyramid(x):
""" hyper-pyramid problem (squares) """
return -np.abs(x - 0.5).max()**0.01
def gradient_pyramid(x):
j = np.argmax(np.abs(x - 0.5))
v = np.zeros(len(x))
v[j] = -1 if x[j] > 0.5 else 1
return v
def volume_pyramid(loglike, ndim):
""" compute volume enclosed at loglike threshold """
sidelength = (-loglike)**100
return sidelength**ndim
def warmup_pyramid(ndim):
return np.random.uniform(0.4, 0.6, size=ndim)
def loglike_multigauss(x):
""" two-peaked gaussian problem """
a = -0.5 * (((x - 0.4)/0.01)**2).sum()
b = -0.5 * (((x - 0.6)/0.01)**2).sum()
return np.logaddexp(a, b)
def gradient_multigauss(x, plot=False):
va = gradient_to_center(x, ctr=0.4)
vb = gradient_to_center(x, ctr=0.6)
logwa = -0.5 * (((x - 0.4)/0.01)**2).sum()
logwb = -0.5 * (((x - 0.6)/0.01)**2).sum()
logwmax = max(logwa, logwb)
wa = exp(logwa - logwmax)
wb = exp(logwb - logwmax)
v = va * wa + vb * wb
# normalise
v /= (v**2).sum()**0.5
return v
def volume_multigauss(loglike, ndim):
""" compute volume enclosed at loglike threshold """
sqr_radius = -2 * loglike
radius = sqr_radius**0.5 * 0.01
if radius >= 0.5:
# the volume is still touching the unit cube
return np.nan
if radius >= (0.2**2 * ndim)**0.5:
# the two peaks are still touching each other
return np.nan
# compute volume of a n-sphere
return nsphere_volume(radius, ndim)
def warmup_multigauss(ndim):
if np.random.uniform() < 0.5:
ctr = 0.4
else:
ctr = 0.6
return ctr + random_point_in_sphere(ndim, radius = 0.04)
def loglike_shell(x):
""" gaussian shell, tilted """
# square distance from center
r = ((x - 0.5)**2).sum()
# gaussian shell centered at 0.5, radius 0.4, thickness 0.004
L1 = -0.5 * ((r - 0.4**2) / 0.004)**2
return L1
def gradient_shell(x):
r = ((x - 0.5)**2).sum()
# second term gives the vector pointing to the center
# third term is positive if r > 0.4, negative otherwise
# v = -4 * (x - 0.5) * ((r - 0.4))**3
# v /= (v**2).sum()**0.5
# simplified:
v = gradient_to_center(x)
if r < 0.4:
# point outwards if inside
v *= -1
return v
def volume_shell(loglike, ndim):
""" compute volume enclosed at loglike threshold """
sqr_deviation = -2 * loglike * (0.004)**2
# how far are we from the center of the shell?
deviation = sqr_deviation**0.5
if deviation >= 0.1:
# the volume is still touching the unit cube
return np.nan
# so 0.4 +- deviation is the current shell
outer_volume = nsphere_volume(0.4 + deviation, ndim)
if deviation >= 0.4:
# all of the enclosed volume is contained
inner_volume = 0
else:
inner_volume = nsphere_volume(0.4 - deviation, ndim)
volume = outer_volume - inner_volume
return volume
def warmup_shell(ndim):
radius = 0.1
ctr = 0.5
# choose radial distance inside shell
length = 0.4 + np.random.uniform(-radius, radius)
# choose direction
x = ctr + random_vector(ndim, length=length)
return x
def get_problem(problemname, ndim):
if problemname == 'circgauss':
return loglike_gauss, gradient_gauss, volume_gauss, warmup_gauss
elif problemname == 'asymgauss':
return generate_asymgauss_problem(ndim)
elif problemname == 'corrgauss':
return generate_corrgauss_problem(ndim)
elif problemname == 'pyramid':
return loglike_pyramid, gradient_pyramid, volume_pyramid, warmup_pyramid
elif problemname == 'multigauss':
return loglike_multigauss, gradient_multigauss, volume_multigauss, warmup_multigauss
elif problemname == 'shell':
return loglike_shell, gradient_shell, volume_shell, warmup_shell
raise Exception("Problem '%s' unknown" % problemname)
| 7,381 | 30.147679 | 112 | py |
UltraNest | UltraNest-master/evaluate/viz_sampling.py | import numpy as np
import matplotlib.pyplot as plt
from ultranest.mlfriends import ScalingLayer, AffineLayer, MLFriends
from ultranest.stepsampler import RegionMHSampler, CubeMHSampler
from ultranest.stepsampler import CubeSliceSampler, RegionSliceSampler, SamplingPathSliceSampler, SamplingPathStepSampler, OtherSamplerProxy
from ultranest.stepsampler import GeodesicSliceSampler, RegionGeodesicSliceSampler
#from ultranest.stepsampler import DESampler
import tqdm
from problems import transform, get_problem
def prepare_problem(problemname, ndim, nlive, sampler):
loglike, grad, volume, warmup = get_problem(problemname, ndim=ndim)
if hasattr(sampler, 'set_gradient'):
sampler.set_gradient(grad)
np.random.seed(1)
us = np.random.uniform(size=(nlive, ndim))
if ndim > 1:
transformLayer = AffineLayer()
else:
transformLayer = ScalingLayer()
transformLayer.optimize(us, us)
region = MLFriends(us, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid(minvol=1.0)
Ls = np.array([loglike(u) for u in us])
ncalls = 0
nok = 0
i = 0
while True:
if i % int(nlive * 0.2) == 0:
minvol = (1 - 1./nlive)**i
nextTransformLayer = transformLayer.create_new(us, region.maxradiussq, minvol=minvol)
nextregion = MLFriends(us, nextTransformLayer)
nextregion.maxradiussq, nextregion.enlarge = nextregion.compute_enlargement(nbootstraps=30)
if nextregion.estimate_volume() <= region.estimate_volume():
region = nextregion
transformLayer = region.transformLayer
region.create_ellipsoid(minvol=minvol)
# replace lowest likelihood point
j = np.argmin(Ls)
Lmin = float(Ls[j])
while True:
u, v, logl, nc = sampler.__next__(region, Lmin, us, Ls, transform, loglike)
ncalls += nc
if logl is not None:
break
us[j,:] = u
region.u[j,:] = u
region.unormed[j,:] = region.transformLayer.transform(u)
Ls[j] = logl
i = i + 1
#print(i, Lmin, volume(Lmin, ndim))
if np.isfinite(volume(Lmin, ndim)):
nok += 1
if nok > 2 * nlive + 1000:
break
return region, i, Lmin, us, Ls, transform, loglike
class MLFriendsSampler(object):
def __init__(self):
self.ndraw = 40
self.nsteps = -1
def __next__(self, region, Lmin, us, Ls, transform, loglike):
u, father = region.sample(nsamples=self.ndraw)
nu = u.shape[0]
self.starti = np.random.randint(len(us))
if nu > 0:
u = u[0,:]
v = transform(u)
logl = loglike(v)
accepted = logl > Lmin
if accepted:
return u, v, logl, 1
return None, None, None, 1
return None, None, None, 0
def __str__(self):
return 'MLFriends'
def main(args):
nlive = args.num_live_points
ndim = args.x_dim
nsteps = args.nsteps
problemname = args.problem
#num_warmup_steps = nlive * 10
np.random.seed(1)
#sampler = MLFriendsSampler()
#region, it, Lmin, us, Ls, transform, loglike = prepare_problem(problemname, ndim, nlive, sampler)
samplers = [
#('cubemh', CubeMHSampler(nsteps=1)),
#('regionmh', RegionMHSampler(nsteps=1)),
#('cubeslice', CubeSliceSampler(nsteps=1)),
#('regionslice', RegionSliceSampler(nsteps=1)),
#('pathslice', SamplingPathSliceSampler(nsteps=1)),
#('pathstep', SamplingPathStepSampler(nsteps=12, nresets=12, log=True)),
#('stepsampler', OtherSamplerProxy(nsteps=10, sampler='steps')),
('geodesic', GeodesicSliceSampler(nsteps=2)),
('regiongeodesic', RegionGeodesicSliceSampler(nsteps=2)),
]
if args.sampler != 'all':
samplers = [(name, sampler) for name, sampler in samplers if name == args.sampler]
for samplername, sampler in samplers:
print("exploring with %s ..." % sampler)
region, it, Lmin, us, Ls, transform, loglike = prepare_problem(problemname, ndim, nlive, sampler)
nc = 0
starti = 0
startu = us[starti,:]
# take 20 steps
print("taking %d steps..." % nsteps)
sampler.reset()
sampler.path = None
for i in range(nsteps):
ax = plt.figure(figsize=(10,10)).gca()
filename = 'viz_%s_sampler_%s_step%02d.png' % (problemname, samplername, i+1)
# replace lowest likelihood point
plt.plot(us[:,0], us[:,1], 'x', ms=2, color='k')
Lmin = Ls.min()
#sampler.__next__(region, Lmin, us, Ls, transform, loglike, plot=True)
plt.plot(startu[0], startu[1], 'x', ms=6, color='k')
while True:
unew = sampler.move(startu, region, plot=ax)
mask = np.logical_and(unew > 0, unew < 1).all(axis=1)
unew = unew[mask,:]
mask = region.inside(unew)
if not mask.all():
plt.plot(unew[~mask,0], unew[~mask,1], 'v', color='r')
if mask.any():
plt.plot(unew[mask,0], unew[mask,1], '^', color='b')
# choose first
j = np.where(mask)[0][0]
unew = unew[j,:]
pnew = transform(unew)
Lnew = loglike(pnew)
nc += 1
if Lnew >= Lmin:
plt.plot(unew[0], unew[1], 'o ', ms=4, color='g')
plt.plot([startu[0], unew[0]], [startu[1], unew[1]], '--', color='green')
sampler.adjust_accept(True, unew, pnew, Lnew, nc)
startu = unew
break
else:
plt.plot(unew[0], unew[1], 'o', ms=4, color='orange')
sampler.adjust_accept(False, unew, pnew, Lnew, nc)
else:
sampler.adjust_outside_region()
xlo, xhi = plt.xlim()
ylo, yhi = plt.ylim()
lo = min(xlo, ylo)
hi = max(xhi, yhi)
lo, hi = 0, 1
lo, hi = us.min(), us.max()
lo, hi = lo - (hi - lo), hi + (hi - lo)
plt.xlim(lo, hi)
plt.ylim(lo, hi)
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(filename, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=40)
parser.add_argument("--problem",
choices=['circgauss', 'asymgauss', 'pyramid', 'multigauss', 'shell'])
parser.add_argument('--nsteps', type=int, default=20)
parser.add_argument('--sampler', default='all',
choices=['all', 'cubemh', 'regionmh', 'cubeslice', 'regionslice'])
args = parser.parse_args()
main(args)
| 7,322 | 37.340314 | 140 | py |
UltraNest | UltraNest-master/evaluate/evaluate_sampling.py | import numpy as np
import matplotlib.pyplot as plt
from ultranest.mlfriends import ScalingLayer, AffineLayer, RobustEllipsoidRegion
from ultranest.stepsampler import RegionMHSampler, CubeMHSampler
from ultranest.stepsampler import CubeSliceSampler, RegionSliceSampler, RegionBallSliceSampler, RegionSequentialSliceSampler, SpeedVariableRegionSliceSampler
#from ultranest.stepsampler import AHARMSampler
#from ultranest.stepsampler import OtherSamplerProxy, SamplingPathSliceSampler, SamplingPathStepSampler
#from ultranest.stepsampler import GeodesicSliceSampler, RegionGeodesicSliceSampler
import tqdm
import joblib
import warnings, traceback
from problems import transform, get_problem
mem = joblib.Memory('.', verbose=False)
def quantify_step(a, b):
# euclidean step distance
stepsize = np.linalg.norm(a - b)
# assuming a
center = 0.5
da = a - center
db = b - center
ra = np.linalg.norm(da)
rb = np.linalg.norm(db)
# compute angle between vectors da, db
angular_step = np.arccos(np.dot(da, db) / (ra * rb))
# compute step in radial direction
radial_step = np.abs(ra - rb)
return [stepsize, angular_step, radial_step]
@mem.cache
def evaluate_warmed_sampler(problemname, ndim, nlive, nsteps, sampler, seed=1, region_class=RobustEllipsoidRegion):
loglike, grad, volume, warmup = get_problem(problemname, ndim=ndim)
if hasattr(sampler, 'set_gradient'):
sampler.set_gradient(grad)
np.random.seed(seed)
def multi_loglike(xs):
return np.asarray([loglike(x) for x in xs])
us = np.array([warmup(ndim) for i in range(nlive)])
Ls = np.array([loglike(u) for u in us])
vol0 = volume(Ls.min(), ndim)
nwarmup = 3 * nlive
if ndim > 1:
transformLayer = AffineLayer()
else:
transformLayer = ScalingLayer()
transformLayer.optimize(us, us)
region = region_class(us, transformLayer)
region.maxradiussq, region.enlarge = region.compute_enlargement(nbootstraps=30)
region.create_ellipsoid(minvol=vol0)
assert region.ellipsoid_center is not None
sampler.region_changed(Ls, region)
Lsequence = []
stepsequence = []
ncalls = 0
for i in tqdm.trange(nsteps + nwarmup):
if i % int(nlive * 0.2) == 0:
minvol = (1 - 1./nlive)**i * vol0
with warnings.catch_warnings(), np.errstate(all='raise'):
try:
nextTransformLayer = transformLayer.create_new(us, region.maxradiussq, minvol=minvol)
nextregion = region_class(us, nextTransformLayer)
nextregion.maxradiussq, nextregion.enlarge = nextregion.compute_enlargement(nbootstraps=30)
if isinstance(nextregion, RobustEllipsoidRegion) or nextregion.estimate_volume() <= region.estimate_volume():
nextregion.create_ellipsoid(minvol=minvol)
region = nextregion
transformLayer = region.transformLayer
assert region.ellipsoid_center is not None
sampler.region_changed(Ls, region)
except Warning as w:
print("not updating region because: %s" % w)
except FloatingPointError as e:
print("not updating region because: %s" % e)
except np.linalg.LinAlgError as e:
print("not updating region because: %s" % e)
# replace lowest likelihood point
j = np.argmin(Ls)
Lmin = float(Ls[j])
while True:
u, v, logl, nc = sampler.__next__(region, Lmin, us, Ls, transform, multi_loglike)
if i > nwarmup:
ncalls += nc
if logl is not None:
assert np.isfinite(u).all(), u
assert np.isfinite(v).all(), v
assert np.isfinite(logl), logl
break
if i > nwarmup:
Lsequence.append(Lmin)
stepsequence.append(quantify_step(us[sampler.starti,:], u))
us[j,:] = u
Ls[j] = logl
Lsequence = np.asarray(Lsequence)
return Lsequence, ncalls, np.array(stepsequence)
class MLFriendsSampler(object):
def __init__(self):
self.ndraw = 40
self.nsteps = -1
self.adaptive_nsteps = False
def __next__(self, region, Lmin, us, Ls, transform, loglike):
u = region.sample(nsamples=self.ndraw)
nu = u.shape[0]
self.starti = np.random.randint(len(us))
if nu > 0:
u = u[:1,:]
v = transform(u)
logl = loglike(v)[0]
accepted = logl > Lmin
if accepted:
return u[0], v[0], logl, 1
return None, None, None, 1
return None, None, None, 0
def __str__(self):
return 'MLFriends'
def plot(self, filename):
pass
def region_changed(self, Ls, region):
pass
def main(args):
nlive = args.num_live_points
ndim = args.x_dim
nsteps = args.nsteps
problemname = args.problem
samplers = [
#CubeMHSampler(nsteps=16), #CubeMHSampler(nsteps=4), CubeMHSampler(nsteps=1),
#RegionMHSampler(nsteps=16), #RegionMHSampler(nsteps=4), RegionMHSampler(nsteps=1),
##DESampler(nsteps=16), DESampler(nsteps=4), #DESampler(nsteps=1),
CubeSliceSampler(nsteps=2*ndim), #CubeSliceSampler(nsteps=ndim), CubeSliceSampler(nsteps=max(1, ndim//2)),
#RegionSliceSampler(nsteps=ndim), RegionSliceSampler(nsteps=max(1, ndim//2)),
#RegionSliceSampler(nsteps=2), RegionSliceSampler(nsteps=4),
#RegionSliceSampler(nsteps=ndim), RegionSliceSampler(nsteps=4*ndim),
#RegionBallSliceSampler(nsteps=2*ndim), RegionBallSliceSampler(nsteps=ndim), RegionBallSliceSampler(nsteps=max(1, ndim//2)),
# RegionSequentialSliceSampler(nsteps=2*ndim), RegionSequentialSliceSampler(nsteps=ndim), RegionSequentialSliceSampler(nsteps=max(1, ndim//2)),
#SpeedVariableRegionSliceSampler([Ellipsis]*ndim), SpeedVariableRegionSliceSampler([slice(i, ndim) for i in range(ndim)]),
#SpeedVariableRegionSliceSampler([Ellipsis]*ndim + [slice(1 + ndim//2, None)]*ndim),
]
if ndim < 14:
samplers.insert(0, MLFriendsSampler())
colors = {}
linestyles = {1:':', 2:':', 4:'--', 16:'-', 32:'-', 64:'-', -1:'-'}
markers = {1:'x', 2:'x', 4:'^', 16:'o', 32:'s', 64:'s', -1:'o'}
for isteps, ls, m in (max(1, ndim // 2), ':', 'x'), (ndim, '--', '^'), (ndim+1, '--', '^'), (ndim * 2, '-', 'o'), (ndim * 4, '-.', '^'), (ndim * 8, '-', 'v'), (ndim * 16, '-', '>'):
if isteps not in markers:
markers[isteps] = m
if isteps not in linestyles:
linestyles[isteps] = ls
Lsequence_ref = None
label_ref = None
axL = plt.figure('Lseq').gca()
axS = plt.figure('shrinkage').gca()
axspeed = plt.figure('speed').gca()
plt.figure('stepsize', figsize=(14, 6))
axstep1 = plt.subplot(1, 3, 1)
axstep2 = plt.subplot(1, 3, 2)
axstep3 = plt.subplot(1, 3, 3)
lastspeed = None, None, None
for sampler in samplers:
print("evaluating sampler: %s" % sampler)
Lsequence, ncalls, steps = evaluate_warmed_sampler(problemname, ndim, nlive, nsteps, sampler)
loglike, grad, volume, warmup = get_problem(problemname, ndim=ndim)
assert np.isfinite(Lsequence).all(), Lsequence
vol = np.asarray([volume(Li, ndim) for Li in Lsequence])
assert np.isfinite(vol).any(), ("Sampler has not reached interesting likelihoods", vol, Lsequence)
shrinkage = 1 - (vol[np.isfinite(vol)][1:] / vol[np.isfinite(vol)][:-1])**(1. / ndim)
fullsamplername = str(sampler)
samplername = fullsamplername.split('(')[0]
label = fullsamplername + ' %d evals' % ncalls
if Lsequence_ref is None:
label_ref = label
Lsequence_ref = Lsequence
ls = '-'
color = 'pink'
else:
color = colors.get(samplername)
ls = '-' if sampler.adaptive_nsteps else linestyles[sampler.nsteps]
l, = axL.plot(Lsequence_ref, Lsequence, label=label, color=color, linestyle=ls, lw=1)
colors[samplername] = l.get_color()
# convert to a uniformly distributed variable, according to expectations
cdf_expected = 1 - (1 - shrinkage)**(ndim * nlive)
axS.hist(cdf_expected, cumulative=True, density=True,
histtype='step', bins=np.linspace(0, 1, 4000),
label=label, color=color, ls=ls
)
print("%s shrunk %.4f, from %d shrinkage samples" % (fullsamplername, cdf_expected.mean(), len(shrinkage)))
axspeed.plot(cdf_expected.mean(), ncalls, markers[1 if sampler.adaptive_nsteps else sampler.nsteps], label=label, color=color)
if lastspeed[0] == samplername:
axspeed.plot([lastspeed[1], cdf_expected.mean()], [lastspeed[2], ncalls], '-', color=color)
lastspeed = [samplername, cdf_expected.mean(), ncalls]
stepsize, angular_step, radial_step = steps.transpose()
assert len(stepsize) == len(Lsequence), (len(stepsize), len(Lsequence))
# here we estimate the volume differently: from the expected shrinkage per iteration
it = np.arange(len(stepsizesq))
vol = (1 - 1. / nlive)**it
assert np.isfinite(vol).all(), vol
assert (vol > 0).all(), vol
assert (vol <= 1).all(), vol
relstepsize = stepsize / vol**(1. / ndim)
relradial_step = radial_step / vol**(1. / ndim)
axstep1.hist(relstepsize[np.isfinite(relstepsize)], bins=1000, cumulative=True, density=True,
histtype='step',
label=label, color=color, ls=ls)
axstep2.hist(angular_step, bins=1000, cumulative=True, density=True,
histtype='step',
label=label, color=color, ls=ls)
axstep3.hist(relradial_step, bins=1000, cumulative=True, density=True,
histtype='step',
label=label, color=color, ls=ls)
sampler.plot(filename = 'evaluate_sampling_%s_%dd_N%d_%s.png' % (args.problem, ndim, nlive, samplername))
print('range:', Lsequence_ref[0], Lsequence_ref[-1])
axL.plot([Lsequence_ref[0], Lsequence_ref[-1]], [Lsequence_ref[0], Lsequence_ref[-1]], '-', color='k', lw=1, label=label_ref)
axL.set_xlabel('logL (reference)')
axL.set_ylabel('logL')
lo, hi = Lsequence_ref[int(len(Lsequence_ref)*0.1)], Lsequence_ref[-1]
axL.set_xlim(lo, hi)
axL.set_ylim(lo, hi)
axL.legend(loc='best', prop=dict(size=6))
filename = 'evaluate_sampling_%s_%dd_N%d_L.pdf' % (args.problem, ndim, nlive)
print("plotting to %s ..." % filename)
plt.figure('Lseq')
plt.savefig(filename, bbox_inches='tight')
plt.close()
plt.figure('shrinkage')
plt.xlabel('Shrinkage Volume')
plt.ylabel('Cumulative Distribution')
plt.xlim(0, 1)
plt.plot([0,1], [0,1], '--', color='k')
plt.legend(loc='upper left', prop=dict(size=6))
filename = 'evaluate_sampling_%s_%dd_N%d_shrinkage.pdf' % (args.problem, ndim, nlive)
print("plotting to %s ..." % filename)
plt.savefig(filename, bbox_inches='tight')
plt.close()
plt.figure('speed')
plt.xlabel('Bias')
plt.ylabel('# of function evaluations')
plt.yscale('log')
lo, hi = plt.xlim()
hi = max(0.5 - lo, hi - 0.5, 0.04)
plt.xlim(0.5 - hi, 0.5 + hi)
lo, hi = plt.ylim()
plt.vlines(0.5, lo, hi)
plt.ylim(lo, hi)
plt.legend(loc='best', prop=dict(size=6), fancybox=True, framealpha=0.5)
filename = 'evaluate_sampling_%s_%dd_N%d_speed.pdf' % (args.problem, ndim, nlive)
print("plotting to %s ..." % filename)
plt.savefig(filename, bbox_inches='tight')
plt.savefig(filename.replace('.pdf', '.png'), bbox_inches='tight')
plt.close()
plt.figure('stepsize')
axstep1.set_ylabel('Cumulative Distribution')
axstep1.set_xlabel('Euclidean distance')
axstep1.legend(loc='lower right', prop=dict(size=6))
axstep2.set_ylabel('Cumulative Distribution')
axstep2.set_xlabel('Angular distance')
#axstep2.legend(loc='best', prop=dict(size=6))
axstep3.set_ylabel('Cumulative Distribution')
axstep3.set_xlabel('Radial distance')
#axstep3.legend(loc='best', prop=dict(size=6))
filename = 'evaluate_sampling_%s_%dd_N%d_step.pdf' % (args.problem, ndim, nlive)
print("plotting to %s ..." % filename)
plt.savefig(filename, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=2,
help="Dimensionality")
parser.add_argument("--num_live_points", type=int, default=200)
parser.add_argument("--problem", type=str, default="circgauss")
parser.add_argument('--nsteps', type=int, default=1000)
args = parser.parse_args()
main(args)
| 13,011 | 42.959459 | 185 | py |
top-SAT-solvers-2021 | top-SAT-solvers-2021-main/experiment_gen_random_SATs.py | import os
import sys
import re
def terminal(cmd):
return os.popen(cmd).read()
def run(clauses, literals, num_vars):
terminal(f'python3 gen_random_SAT.py {clauses} {literals} {num_vars}')
output = terminal('./kissat_gb/build/kissat random_SAT.cnf | grep process-time:')
match = re.match('c process-time:\s+[^\s]+\s+([0-9\.]+)', output)
t1 = float(match.group(1))
t2 = 1000
return (t1, t2)
def header():
line = 'Clauses,'
line += 'Literals per clause,'
line += 'Variables,'
line += 'KISSAT_GB Time (s),'
line += 'BRUTE_FORCE Time (s),'
return line
def log(clauses, literals, num_vars, t1, t2):
line = str(clauses) + ','
line += str(literals) + ','
line += str(num_vars) + ','
line += str(t1) + ','
line += str(t2) + ','
return line
output = open('experiment_output.csv', 'w')
output.write(header() + '\n')
total_clauses = 1000
total_literals = 1000
total_variables = 1000
step = 100
count = 0
num_samples = (total_clauses / step) * (total_literals / step) * (total_variables / step)
for clauses in range(step, total_clauses, step):
for literals in range(step, total_literals, step):
for num_vars in range(step, total_variables, step):
if(count % 10 == 0): print(f'Progress: {count / num_samples}')
count += 1
(t1, t2) = run(clauses, literals, num_vars)
output.write(log(clauses, literals, num_vars, t1, t2) + '\n')
output.close()
print('SUCCESS!') | 1,399 | 22.728814 | 89 | py |
top-SAT-solvers-2021 | top-SAT-solvers-2021-main/gen_random_SAT.py | #!/usr/bin/python
import sys
import random
clauses = None
literals = None
num_vars = None
if len(sys.argv) == 4: # The file name with two arguments
clauses = int(sys.argv[1])
literals = int(sys.argv[2])
num_vars = int(sys.argv[3])
if num_vars == None:
print('Random SAT Generator')
clauses = int(input('How many clauses? '))
literals = int(input('How many literals per? '))
literals = int(input('How many variable values? '))
file = open('random_SAT.cnf', 'w')
file.write('c random_SAT.cnf\n')
file.write('c\n')
file.write(f'p cnf {num_vars} {clauses}\n')
for clause in range(clauses):
line = ''
for literal in range(literals):
v = random.randint(1, num_vars)
if random.randint(0, 1) == 0: v = -v
line += str(v) + ' '
line += '0'
file.write(line + '\n')
file.close()
print('Successfully generated "random_SAT.cnf".') | 842 | 21.184211 | 57 | py |
FML | FML-master/FML/MakePythonWrapper/test.py | import MyLibrary as mylib
import numpy as np
# Call the first C++ function we defined in Main.cpp
mylib.test(123)
# Example of how to pass more complex data from C++ to Python
# We allocate a struct with a double* and an int and provide
# in the InterfaceFile.i how to extract this in python
data = mylib.getData(10)
for i in range(10):
print( data.get_x(i) )
# Free up memory we allocated in C++ (if you feel like it)
mylib.freeData(data)
# Pass numpy arrays to C++ and use them there
x = np.linspace(1,3,3)
y = np.linspace(1,5,5)
mylib.getNumpyArray(x,y)
| 565 | 24.727273 | 61 | py |
FML | FML-master/FML/HaloModel/PythonWrapper/example.py | import numpy as np
from HaloModelWrapper import HaloModel
import matplotlib.pyplot as plt
import time
import gc
# Parameters
filename = "pofk_lin.txt"
OmegaM = 0.3
w0 = -1.0
wa = 0.0
mu0 = 0.0
mua = 0.0
deltacfac = 1.0
DeltaVirfac = 1.0
cofmfac = 1.0
sigma8norm = True
hmcode = True
verbose = True
# Set up halomodel
model = HaloModel(
filename,
OmegaM,
w0,
wa,
mu0,
mua,
verbose,
hmcode,
sigma8norm,
deltacfac,
DeltaVirfac,
cofmfac)
model2 = HaloModel(
filename,
OmegaM,
w0,
wa,
mu0,
mua,
verbose,
False,
sigma8norm,
deltacfac,
DeltaVirfac,
cofmfac)
# Arrays to evaluate pofk and massfunction at
k = np.exp(np.linspace(np.log(1e-4),np.log(10.0),100))
M = np.exp(np.linspace(np.log(1e6),np.log(1e16),100))
zarr = np.linspace(0.0,4.0,100)
# Set redshift to compute at
z = 0.0
# Compute everything at redshift
model.calc_at_redshift(z)
model.info()
# Fetch P(k,z), Plin(k,z), dndlogM(M) and n(M) at given redshift
# (Since we just computed at z this just fetches the data)
pofk_lin, pofk = model.get_pofk(z, k)
pofk_1h, pofk_2h = model.get_pofk_1h_2h(z, k)
dndlogM, nofM = model.get_nofM(z, M)
deltac = model.get_deltac_of_z(zarr)
# Model without HMCode
deltac2 = model2.get_deltac_of_z(zarr)
pofk_lin2, pofk2 = model2.get_pofk(z, k)
pofk2_1h, pofk2_2h = model2.get_pofk_1h_2h(z, k)
dndlogM2, nofM2 = model2.get_nofM(z, M)
# Plot deltac
plt.plot(zarr,deltac)
plt.plot(zarr,deltac2)
plt.show()
# Make a plot of massfunction
plt.xscale('log')
plt.yscale('log')
plt.plot(M,dndlogM)
plt.plot(M,dndlogM2)
plt.show()
# Make a plot of power-spectra
plt.xscale('log')
plt.yscale('log')
plt.plot(k,pofk,label="HMCode")
plt.plot(k,pofk_1h,label="HMCode 1h")
plt.plot(k,pofk_2h,label="HMCode 2h")
plt.plot(k,pofk_lin,label="Linear")
plt.plot(k,pofk2,label="Non HMCode",ls="dashed")
plt.plot(k,pofk2_1h,label="Non HMCode 1h",ls="dashed")
plt.plot(k,pofk2_2h,label="Non HMCode 2h",ls="dashed")
plt.legend()
plt.show()
| 2,070 | 19.71 | 64 | py |
FML | FML-master/FML/HaloModel/PythonWrapper/HaloModelWrapper.py | import numpy as np
import HaloModelCXX as hm
import matplotlib.pyplot as plt
"""
Input: linear power-spectrum at z=0
Run the halomodel and get the power-spectra
P1h, P2h and P(k,z), halo-massfunctions etc.
"""
class HaloModel:
def __init__(self,
filename_pofk_lin,
OmegaM,
w0=-1.0,
wa=0.0,
mu0=0.0,
mua=0.0,
verbose=False,
hmcode=True,
sigma8norm=True,
deltacfac=1.0,
DeltaVirfac=1.0,
cofmfac=1.0):
self.filename_pofk_lin = filename_pofk_lin
self.OmegaM = OmegaM
self.w0 = w0
self.wa = wa
self.mu0 = mu0
self.mua = mua
self.verbose = verbose
self.hmcode = bool(hmcode)
self.sigma8norm = bool(sigma8norm)
self.deltacfac = deltacfac
self.DeltaVirfac = DeltaVirfac
self.cofmfac = cofmfac
self.is_init = False
self.init()
def init(self):
if(self.is_init):
return
self.halomodel = hm.get()
hm.init(self.halomodel,
self.filename_pofk_lin,
self.OmegaM,
self.w0,
self.wa,
self.mu0,
self.mua,
self.verbose,
self.hmcode,
self.sigma8norm,
self.deltacfac,
self.DeltaVirfac,
self.cofmfac)
self.is_init = True
def info(self):
hm.info(self.halomodel)
def calc_at_redshift(self,z):
hm.calc(self.halomodel,z)
def get_pofk(self, z, k):
self.init()
self.calc_at_redshift(z)
pofk_lin = np.zeros_like(k)
pofk = np.zeros_like(k)
hm.get_pofk(self.halomodel, k, pofk_lin, pofk)
return pofk_lin, pofk
def get_pofk_1h_2h(self, z, k):
self.init()
self.calc_at_redshift(z)
pofk_1h = np.zeros_like(k)
pofk_2h = np.zeros_like(k)
hm.get_pofk_1h_2h(self.halomodel, k, pofk_1h, pofk_2h)
return pofk_1h, pofk_2h
def get_nofM(self, z, M):
self.init()
self.calc_at_redshift(z)
dndlogM = np.zeros_like(M)
n = np.zeros_like(M)
hm.get_nofM(self.halomodel, M, dndlogM, n)
return dndlogM, n
def get_deltac_of_z(self, z):
self.init()
deltac = np.zeros_like(z)
hm.get_deltac(self.halomodel, z, deltac)
return deltac
def __del__(self):
hm.free(self.halomodel)
| 2,194 | 21.864583 | 58 | py |
caracal | caracal-master/caracal/main.py | # -*- coding: future_fstrings -*-
from caracal import log
import caracal
import os
import sys
import ruamel.yaml
import pdb
import traceback
import logging
import shutil
from caracal.dispatch_crew import config_parser
from caracal.dispatch_crew import worker_help
import caracal.dispatch_crew.caltables as mkct
from caracal.workers.worker_administrator import WorkerAdministrator
import stimela
from caracal.schema import SCHEMA_VERSION
__version__ = caracal.__version__
pckgdir = caracal.PCKGDIR
DEFAULT_CONFIG = caracal.DEFAULT_CONFIG
SAMPLE_CONFIGS = caracal.SAMPLE_CONFIGS = {
"minimal": "minimalConfig.yml",
"meerkat": "meerkat-defaults.yml",
"carate": "carateConfig.yml",
"meerkat_continuum": "meerkat-continuum-defaults.yml",
"mosaic_basic": "mosaic_basic_config.yml",
}
SCHEMA = caracal.SCHEMA
# Create the log object
####################################################################
# CARACal imports
####################################################################
####################################################################
# Runtime routines
####################################################################
def print_worker_help(worker):
"""
worker help
"""
schema = os.path.join(pckgdir, "schema", "{0:s}_schema.yml".format(worker))
if not os.path.exists(schema):
return None
with open(schema, "r") as f:
worker_dict = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader, version=(1, 1))
helper = worker_help.worker_options(worker, worker_dict["mapping"][worker])
helper.print_worker()
return True
def get_default(sample, to):
"""
Get default parset copy
"""
log.info(
"Dumping default configuration to {0:s} as requested. Goodbye!".format(to))
sample_config = os.path.join(pckgdir, "sample_configurations",
SAMPLE_CONFIGS[sample])
os.system('cp {0:s} {1:s}'.format(sample_config, to))
def log_logo():
# print("WWWWWWWWMMWMMWWMMWWWWWMWWWWMMMMWWWWWWWWWWWWWWWWWWWWMMMMWWWWWWWWWWWWWWWWWWWWWWWWWMMMWWWWWMWWWWWWWWWWWWWNNNNWWMMWWMWWWWWWW")
# print("WWWWWWWMMWWWWWWMMWWWWWWWWWWMMWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWMMMMMMMWWWWWWWWWWWWWWWWWWWWWWWWMWWWWXkO0KWWWWWWWWWWWWW")
# print("WWMWWWWWWWWWWWKOxdollcok00KKXWWWWMMMMWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWNXKKXXXXXNWWWWWWWN0d::ckWMWWMMWWWWWWW")
# print("WMWWMWWWWWN0dc;'....',cxOOkkkO0XWMWWMMWWWWWX0xolllccc::::cllooddxxxxdddoooooooollcc;,,,;;;;;:cldxO0Ol,'.'lKWWWWMWWWWWWWW")
# print("MWWWWWWWXkc'.''...,ckKNWWWNXKOkk0XWWWWWWKxc,.....................................................',''''.,xNWWWWWWWWWWWWW")
# print("MWWWWWNk:'',;,'.,l0NWWWWWWMWWNKOkkKWWKxc'........................................................'''''''',dXWWMMWWWWWWWW")
# print("WWWWW0c'',;:;'.:kNWWWMWWWWWMWWWN0kdoc'....,ll'.....................................................''''''',kWWWMMWWWWWWW")
# print("MWWWO;.';cc,''cKWWWWWMMMMWWWWWWXx:.....:dkkx:...................................................';::;'.''''oNWWWWWWWWWWW")
# print("WWWO;.';cc;'.cKWWWWMMWWWWWWWWW0;...'..':l;.....................................................,oKNNKOd:,,:xXWWWWWWWWWWW")
# print("WWKc.';cc:'.;0WWWWWMMMWMMWWWWW0odOOc..........................',,'...................'.........cKWWWWWWN0KNWWWWWWWWWWWWW")
# print("MWx'.,:cc,.'dWMWWWWWWWMWMMMWMMWWWWk'....................;cdkO0KXXOxdoc,''''',;;:lokOOkc........,cxXWWWWWWWMWWWMMWWWWWWWW")
# print("MNl.';cc:'.;OWWWWWMWWWWWMWWMWWWWWO;...''.............:dOXWWMMWWWWMMWWNX0KK00KXXNWWWWWWXkl:,'.....':x0NWWWWMMWWMMMWWWWWWW")
# print("WKc.';cc:'.:KWWWWWWWMWWMWWMWWWWWO;...,x0kdoloolc:cox0NMWWWWMMWMMWWWMWWMWWWWMMWWWWWWWWWWWWNKOxoc:,'..,lONWWWWWMMMWWWWWWWW")
# print("WKc.';cc:'.:KWWMWWWWMMMWMMWWWMXo'....l0NWWWWWMWWWWMWWWMWMMWMMMMWWWWWMMWWMWWWWWWMWWWWWWMWWWWWWWWNk;.''.,xNWWWMMWWWWWWWWWW")
# print("WXl.';cc:'.;0MWWWWWWWWWWWMMWWWx.....:x0NWWWMMMWWWWMWWMWWWWMWWWMMMWWWMWWMMMWWMWWWWMWWMMMWWWWMWMWWNk:'''.;OWWWWWWMMWWWWWWW")
# print("WWd'.,ccc,.'xWMWWWWWWWWMWWMWWNo..;loxkKWWWMMWWMMMWWWMWWMWWWWWWWMMWWWWWMMMWWWWMWWMMWWMMWWMWWMWMMWMWX00OdxXWWWWMWWWWWWWWWW")
# print("WW0:.';cc;'.cKMWWWWWWWWWWWWWWWNOdxxdxOXWWMXOkkkkkkOKNWWMMWWMMN0kkOXWMWMMMWWN0kxxxk0XWMWWMWN0kkONWWWWMNOkkKWWWWWWWWWWWWWW")
# print("WWWk,.':cc,''oXWWWWWWWWWWWWWWWXo'..'cOWMWWO;..''''',:dKWWWWNNx,..'lXMWMMWKd:'.''''';dXMWMNd'..'oNMWWW0;..lNWWMMWWWWWWWWW")
# print("MWWNx,.';c:,.'oXWWMWWWMWWMMWWNd'.,,.'dNWWWO;..cO0Od,.'oNMWWkc,.,;''oXMMW0:..;dO00kooONWWNx,.,;.'dNMWW0;..lNWWWWMWWMWWWWW")
# print("WWWWNk;.',::,.'lKWWWWMWWWWWWNx,.,ol,.,xWWWO;..lXNXk;..cXWWO;..,xO:.'dNMNo..,kWWWWWWWWWWWk,.;kk;.,xWWW0;..lNWWWWWWMMMWWMW")
# print("WWWWWWKo,.';;,'.;xXWWWWMMWWWk,.';ll;'.;kWMO;..,:::,.'c0WW0:...:dkc'.,dNNl..;OWWWWWWWWWWk;.':xxc'.,kWW0;..lNWWWWWMWWWWMMM")
# print("WMWWMWWNOl,.',''.':d0NWWWWWO;.'',,,,,'.;OWO;..;ol;..:0WWKc.'',,,,,,'.,xNO:.';d0K0kod0NO;.',,,,,,'.;OW0;..cO0000KNMWWWWWW")
# print("WMWWMWWWWWKxc,'.....,cdOK0x;.'cOKKKKOc..:0O;..oNW0c'':OKl..:dOKKKK0o'.,kNKd:'.',''';dk:..l0KKKK0c'.;O0;..'''''':0MWWWWWW")
# print("WWWWWWWMWWWWNKkdlc:;;,:dOOxdxOXWWWWWWKkkkKXOkkKWWWXOkk0KOkkKWWWWWWWXOkk0NMWX0kxxxk0XWKkkOXWWWWWWXOkkKXOkkkkkkkkONMWWWMMW")
# print("WWWWWWWMMWWWWWWWWWNXKKXNNWWWMWWWWMWWWWWMWWWWMMWWWWWWWWMMMWMMWWWWWWWWMMWWWWWWMMWWWWWWMMMWWMWWMWWWWMMMMWMMMMWWMWWMMWWWWWWW")
# print("WWWWWWWMMWWMWWWWMMWWWWWWWWWWWWWWMWWWWWWWWMWWWWMWWWMWWWWMWWWMMWWWMWWWWMMWWWWWWWMMMMWWMMMWMWWMWWMWMMWWMWWMMWMWWMWWWWMMMWWW")
print("""
........................................................................................................................
..........................................................................................................Z.~...........
...........................................................................................................Z.O..........
..................,8OOOOOZ==++,...........................................................................ZZOZ..........
...............?OZOOOOOOOO+======..................~=$ZOOO8OOZ~ ............~~....~7ZZOZOZZZOZZOO$.....,ZZZZZ=..........
.............OOOOOOOOOO$.....~=====...........$88888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZ7...........
...........OOOOO$OOOO7..........====,.......88888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZZ...........
.........~ZOOO77OOOO.............:===~...Z8888888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZZZ..........
........$OOOZIIOOOO................====88888887Z8888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZZZZ.........
.......OOOOIIIOOOO..................?888888O++.OO888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZZZZZZZZZ.........
......+OOO7IIOOOO.................:O8888O8,..Z888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZ?.,ZZZZZZZZ=........
....,.OOOII7ZOOO.................8888D78888888888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZO:......+OZ$Z.........
.....OOOO77IOOO,..................8,.:I8888888888888OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZO.....................
.....OOOI7I7OOO.......................88888888888888OOOOOOOOOOO~....II8OOOOOOOOOOOOOOO$$,ZZZZZZZZZZZ....................
....$OOO77IOOOZ......................888888888888888OOOOOOO?............?IZI$Z$=?I........ZZZZZZZZZZOZ..................
....OOO$II7OOO,.....................?8888O888888888888888...................................ZZZZZZZZZZZ?................
....OOO7I7IOOO.....................,88887+.,,Z8ID88888..........................................IOZZZZZZZ=..............
....OOO7IIIOOO....................OO8888==..........................................................ZZZZZZI.............
....OOO7IIIOOO...................+888888==..........................................................OZZZZZZ.............
....OOO$II7OOO+..................88888O+==............................................................ZZZZZZ............
....?OOOII7$OOZ..................D888:+=+:................................................................O.............
.....OOO7777OOZ.......................==+...............................................................................
.....7OOOI77OOOZ..................$OOOO=+.....OOOOOOZOO?........$OOOO.........IOOOOOOO?.......OOOO=......OOOO...........
......OOOZI77OOZ..................OOOOOO......OOOOOOOOOOO~......OOOOOO......IOOOOOOOOOOO.....ZOOOOO......OOOO...........
.......OOO$7IZOOO................OOOZZOO......OOOO....ZOOO.....OOO8OOO.....?OOOO.....OZ.....=OOOOOOZ.....OOOO...........
........OOOOI7OOOO..............IOOO+ZOOO.....OOOO....OOOO....7OOO.8OOO....OOOO.............OOZ=.OOO~....OOOO...........
.........OOOOI7OOOO.............OOOI==OOOO....OOOO++IOZOOO...:OOO~..OOOO...OOOO............OOOZ..$OOO....OOOO...........
..........8OOOOIOOOO$..........OOOOOOOOOOO+...OOOOOOOOOOZ....OOOOOOOOOOO,..OOOO...........ZOOOOOOOOOOO...OOOO...........
...........,8OOOO$OOOZ........OOOOOOOOOOOOO...OOOO::OOOO....OOOOOOOOOOOOO..:OOOO=....OO...OOOOOOOOOOOO$..OOOO...........
..............OOOOOOOOOOO===++OOO7,.....OOOO..OOOO...OOOO..~OOO=......OOOO...ZOOOOOOOOOO.OOOO......ZOOO..OOOOOOOOOOO....
................~OOOOOOOOO+=+OOOO.......?OOO$.OOOO....ZOOZ.ZOOO.......$ZOO?....ZOOOOO7..ZOOO,.......OOOO.OOOOOOOOOOO....
......................+I7~=~............................................................................................
........................................................................................................................
........................................................................................................................
""")
# print("""
# ................................................................................
# ........................................................................?.......
# ..............:~~:,....................................................Z$.......
# ..........ZOOOOOO+==+=...........+7O88OOOI,......,+7~+?OOZZOZZZZZ=...ZZZZ.......
# ........OOOOOOO.....,==+.....:88888OOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZ........
# ......OOOIOOO.........+=+..88888888OOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZ,......
# .....ZOOIOOO...........==88888+I888OOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZZZZZZZZZ......
# ....OOZI$OO............8888I.$DO888OOOOOOOOOOOOOOOOOOOOOOZZZZZZZZZZ..ZZZZZ .....
# ...$OO7IOO............88.+888888888OOOOOOOOOOOOOOOOOOOOOOZZZZZZZZO..............
# ...OO7I$OO...............8888888888OOOOO8....I,OOOOOOOOO?=.OZZZZZZO.............
# ...OOIIOO=..............D888888888OOOO......................,ZZZZZZZ:...........
# ...OOIIOO..............I888+..:...Z..............................OZZZZ:.........
# ...OOIIOO.............8888==.......................................ZZZZ.........
# ...OOIIOO7............8888==........................................ZZZZ........
# ...OOII7OO...............+=.....................................................
# ....OOIIOO.............OOO=...$OOOOOO+.....OOO.....~OOOOOO....8OOZ....OOO.......
# ....ZOZIIOO...........OOOOO...$OO...OOI...OOOOO...ZOO,..=O...,OOOO....OOO.......
# .....OOOI$OO.........OOO=OO:..$OO...OOZ..OOO.OO,..OO~........OO.:OO...OOO.......
# ......=OOO7OO?......:OOOOOOO..$OOOOOOO..=OOOOZOO..OOZ.......OOOOOOOO..OOO.......
# ........ZOOOOOO+....OOZ$$$OOO.$OZ.,OO7..OO$$$$OOO.,OOOZOOO~$OO$$$$OO=.OOZOOOO...
# ...........OOOOOO+=OOO.....OO$$OO...OOOZOO.....ZO?..:OOOO..OO,....~OO.OOOOOOO...
# ................................................................................
# ................................................................................
# """)
log.info("Version {1:s} installed at {0:s}".format(pckgdir, str(__version__)))
def execute_pipeline(options, config, block):
# setup piping infractructure to send messages to the parent
workers_directory = os.path.join(caracal.pckgdir, "workers")
backend = config['general']['backend']
if options.container_tech and options.container_tech != 'default':
backend = options.container_tech
def __run(debug=False):
""" Executes pipeline """
# with stream_director(log) as director: # stdout and stderr needs to go to the log as well -- nah
try:
pipeline = WorkerAdministrator(config,
workers_directory,
add_all_first=False, prefix=options.general_prefix,
configFileName=options.config, singularity_image_dir=options.singularity_image_dir,
container_tech=backend, start_worker=options.start_worker,
end_worker=options.end_worker, generate_reports=not options.no_reports)
if options.report:
pipeline.regenerate_reports()
else:
# OMS: I don't think this is necessary, as it is not used here directly, and loaded on-demand
# # Obtain some divine knowledge
# cdb = mkct.calibrator_database()
pipeline.run_workers()
except SystemExit as e:
# if e.code != 0:
log.error("A pipeline worker initiated sys.exit({0:}). This is likely a bug, please report.".format(e.code))
log.info(" More information can be found in the logfile at {0:s}".format(caracal.CARACAL_LOG))
log.info(" You are running version {0:s}".format(str(__version__)), extra=dict(logfile_only=True))
if debug:
log.warning("you are running with -debug enabled, dropping you into pdb. Use Ctrl+D to exit.")
pdb.post_mortem(sys.exc_info()[2])
sys.exit(1) # indicate failure
except KeyboardInterrupt:
log.error("Ctrl+C received from user, shutting down. Goodbye!")
except Exception as exc:
log.error("{} [{}]".format(exc, type(exc).__name__), extra=dict(boldface=True))
log.info(" More information can be found in the logfile at {0:s}".format(caracal.CARACAL_LOG))
log.info(" You are running version {0:s}".format(str(__version__)), extra=dict(logfile_only=True))
for line in traceback.format_exc().splitlines():
log.error(line, extra=dict(traceback_report=True))
if debug:
log.warning("you are running with -debug enabled, dropping you into pdb. Use Ctrl+D to exit.")
pdb.post_mortem(sys.exc_info()[2])
log.info("exiting with error code 1")
sys.exit(1) # indicate failure
return __run(debug=options.debug)
############################################################################
# Driver entrypoint
############################################################################
def driver():
main(sys.argv[1:])
def main(argv):
# parse initial arguments to init basic switches and modes
parser = config_parser.basic_parser(argv)
options, _ = parser.parse_known_args(argv)
caracal.init_console_logging(boring=options.boring, debug=options.debug)
stimela.logger().setLevel(logging.DEBUG if options.debug else logging.INFO)
# user requests worker help
if options.worker_help:
if not print_worker_help(options.worker_help):
parser.error("unknown worker '{}'".format(options.worker_help))
return
caracal.log.info(f"Invoked as {' '.join(sys.argv)}")
# User requests default config => dump and exit
if options.get_default:
sample_config = SAMPLE_CONFIGS.get(options.get_default_template)
if sample_config is None:
parser.error("unknown default template '{}'".format(options.get_default_template))
sample_config_path = os.path.join(pckgdir, "sample_configurations", sample_config)
if not os.path.exists(sample_config_path):
raise RuntimeError("Missing sample config file {}. This is a bug, please report".format(sample_config))
# validate the file
try:
parser = config_parser.config_parser()
_, version = parser.validate_config(sample_config_path)
if version != SCHEMA_VERSION:
log.warning("Sample config file {} version is {}, current CARACal version is {}.".format(sample_config,
version,
SCHEMA_VERSION))
log.warning("Proceeding anyway, but please notify the CARACal team to ship a newer sample config!")
except config_parser.ConfigErrors as exc:
log.error("{}, list of errors follows:".format(exc))
for section, errors in exc.errors.items():
print(" {}:".format(section))
for err in errors:
print(" - {}".format(err))
sys.exit(1) # indicate failure
log.info("Initializing {1} from config template '{0}' (schema version {2})".format(options.get_default_template,
options.get_default, version))
shutil.copyfile(sample_config_path, options.get_default)
return
if options.print_calibrator_standard:
cdb = mkct.calibrator_database()
log.info("Found the following reference calibrators (in CASA format):")
log.info(cdb)
return
# if config was not specified (i.e. stayed default), print help and exit
config_file = options.config
if config_file == caracal.DEFAULT_CONFIG:
parser.print_help()
sys.exit(1)
try:
parser = config_parser.config_parser()
config, version = parser.validate_config(config_file)
if version != SCHEMA_VERSION:
log.warning("Config file {} schema version is {}, current CARACal version is {}".format(config_file,
version, SCHEMA_VERSION))
log.warning("Will try to proceed anyway, but please be advised that configuration options may have changed.")
# populate parser with items from config
parser.populate_parser(config)
# reparse arguments
caracal.log.info("Loading pipeline configuration from {}".format(config_file), extra=dict(color="GREEN"))
options, config = parser.update_config_from_args(config, argv)
# raise warning on schema version
except config_parser.ConfigErrors as exc:
log.error("{}, list of errors follows:".format(exc))
for section, errors in exc.errors.items():
print(" {}:".format(section))
for err in errors:
print(" - {}".format(err))
sys.exit(1) # indicate failure
except Exception as exc:
traceback.print_exc()
log.error("Error parsing arguments or configuration: {}".format(exc))
if options.debug:
log.warning("you are running with -debug enabled, dropping you into pdb. Use Ctrl+D to exit.")
pdb.post_mortem(sys.exc_info()[2])
sys.exit(1) # indicate failure
if options.report and options.no_reports:
log.error("-report contradicts --no-reports")
sys.exit(1)
log_logo()
# Very good idea to print user options into the log before running:
parser.log_options(config)
execute_pipeline(options, config, block=True)
| 19,547 | 60.278997 | 135 | py |
caracal | caracal-master/caracal/__init__.py | # -*- coding: future_fstrings -*-
import logging.handlers
import pkg_resources
import os
import subprocess
import logging
from time import gmtime, strftime
import stimela
import stimela.utils
##############################################################################
# Globals
##############################################################################
class CaracalException(RuntimeError):
"""Base class for pipeline logic errors"""
pass
class PlayingWithFire(RuntimeError):
"""Silly settings chosen."""
pass
class UserInputError(CaracalException):
"""Something wrong with user input"""
pass
class ConfigurationError(CaracalException):
"""Something wrong with the configuration"""
pass
class BadDataError(CaracalException):
"""Something wrong with the data"""
pass
class ExtraDependencyError(Exception):
"""Optional dependencies are missing"""
def __init__(self, message=None, extra=None):
default_message = "Pipeline run requires optional dependencies, please re-install caracal as: \n 'pip install caracal[all]'"
if extra:
extra = f"or, install the missing package as: \n 'pip install caracal[{extra}]'"
else:
extra = ""
if message:
self.message = message
else:
self.message = default_message + extra
super().__init__(self.message)
def report_version():
# Distutils standard way to do version numbering
try:
__version__ = pkg_resources.require("caracal")[0].version
except pkg_resources.DistributionNotFound:
__version__ = "dev"
# perhaps we are in a github with tags; in that case return describe
path = os.path.dirname(os.path.abspath(__file__))
try:
# work round possible unavailability of git -C
result = subprocess.check_output(
'cd %s; git describe --tags' % path, shell=True, stderr=subprocess.STDOUT).rstrip().decode()
except subprocess.CalledProcessError:
result = None
if result is not None and 'fatal' not in result:
# will succeed if tags exist
return result
else:
# perhaps we are in a github without tags? Cook something up if so
try:
result = subprocess.check_output(
'cd %s; git rev-parse --short HEAD' % path, shell=True, stderr=subprocess.STDOUT).rstrip().decode()
except subprocess.CalledProcessError:
result = None
if result is not None and 'fatal' not in result:
return __version__ + '-' + result
else:
# we are probably in an installed version
return __version__
__version__ = VERSION = report_version()
# global settings
PCKGDIR = pckgdir = os.path.dirname(os.path.abspath(__file__))
# this gets renamed once the config is read in
CARACAL_LOG = "log-caracal.txt"
DEFAULT_CONFIG = os.path.join(
PCKGDIR, "sample_configurations", "minimalConfig.yml")
SCHEMA = os.path.join(
PCKGDIR, "schema", "schema-{0:s}.yml".format(__version__))
SAMPLE_CONFIGS = {
"minimal": "minimalConfig.yml",
"meerkat": "meerkat-defaults.yml",
"carate": "carateConfig.yml",
"meerkat_continuum": "meerkat-continuum-defaults.yml",
"mosaic_basic": "mosaic_basic_config.yml",
}
################################################################################
# Logging
################################################################################
class DelayedFileHandler(logging.handlers.MemoryHandler):
"""A DelayedFileHandler is a variation on the MemoryHandler. It will buffer up log
entries until told to stop delaying, then dumps everything into the target file
and from then on logs continuously. This allows the log file to be switched at startup."""
def __init__(self, filename=None, delay=True):
logging.handlers.MemoryHandler.__init__(self, 100000, target=filename and logging.FileHandler(filename, delay=True))
self._delay = delay
def shouldFlush(self, record):
return not self._delay
def setFilename(self, filename, delay=False):
self._delay = delay
target = logging.FileHandler(filename)
target.setFormatter(self.formatter)
for filt in self.filters:
target.addFilter(filt)
target.setLevel(self.level)
self.setTarget(target)
if not delay:
self.flush()
LOGGER_NAME = "CARACal"
STIMELA_LOGGER_NAME = "CARACal.Stimela"
DEBUG = 0
log = logging.getLogger(LOGGER_NAME)
# these will be set up by init_logger
log_filehandler = log_console_handler = log_console_formatter = None
def create_logger():
""" Creates logger and associated objects. Called upon import"""
global log, log_filehandler
log.setLevel(logging.DEBUG)
log.propagate = False
# init stimela logger as a sublogger
if stimela.is_logger_initialized():
raise RuntimeError("Stimela logger already initialized. This is a bug: you must have an incompatible version of Stimela.")
stimela.logger(STIMELA_LOGGER_NAME, propagate=True, console=False)
log_filehandler = DelayedFileHandler()
log_filehandler.setFormatter(stimela.log_boring_formatter)
log_filehandler.setLevel(logging.INFO)
log.addHandler(log_filehandler)
def init_console_logging(boring=False, debug=False):
"""Sets up console logging"""
global log_console_handler, log_console_formatter, log_filehandler, DEBUG
DEBUG = debug
log_filehandler.setLevel(logging.DEBUG if debug else logging.INFO)
log_console_formatter = stimela.log_boring_formatter if boring else stimela.log_colourful_formatter
log_console_handler = logging.StreamHandler()
log_console_handler.setLevel(logging.INFO)
log_console_handler.setFormatter(log_console_formatter)
# add filter to console handler:
# (the logfile still gets all the messages)
if not debug:
def _console_filter(rec):
# traceback dumps don't go to cosnole
if getattr(rec, 'traceback_report', None) or getattr(rec, 'logfile_only', None):
return False
# for Stimela messages at level <=INFO, only allow through subprocess output and job state
if rec.name.startswith(STIMELA_LOGGER_NAME) and rec.levelno <= logging.INFO:
if hasattr(rec, 'stimela_subprocess_output') and rec.stimela_subprocess_output[1] != 'start':
return True
elif hasattr(rec, 'stimela_job_state'):
return True
return False
return True
log_console_handler.addFilter(_console_filter)
log.addHandler(log_console_handler)
def remove_log_handler(hndl):
log.removeHandler(hndl)
def add_log_handler(hndl):
log.addHandler(hndl)
create_logger()
| 6,844 | 30.837209 | 132 | py |
caracal | caracal-master/caracal/tests/__init__.py | 0 | 0 | 0 | py | |
caracal | caracal-master/caracal/schema/__init__.py | SCHEMA_VERSION = "1.1.1"
| 25 | 12 | 24 | py |
caracal | caracal-master/caracal/utils/requires.py | import importlib
from caracal import ExtraDependencyError
def checkimport(package):
"""
Check if a package is installed.
"""
exists = importlib.util.find_spec(package)
if exists:
return True
else:
return False
def extras(packages):
if isinstance(packages, str):
packages = [packages]
def mydecorator(func):
def inner_func(*args, **kw):
for package in packages:
if not checkimport(package):
raise ExtraDependencyError(extra=package)
return func(*args, **kw)
return inner_func
return mydecorator
| 635 | 21.714286 | 61 | py |
caracal | caracal-master/caracal/utils/__init__.py | 0 | 0 | 0 | py | |
caracal | caracal-master/caracal/utils/tests/__init__.py | 0 | 0 | 0 | py | |
caracal | caracal-master/caracal/notebooks/__init__.py | import os.path
import glob
import shutil
import jinja2
import traceback
import time
import caracal
from caracal import log
from stimela.utils import xrun, StimelaCabRuntimeError
_j2env = None
SOURCE_NOTEBOOK_DIR = os.path.dirname(__file__)
def setup_default_notebooks(notebooks, output_dir, prefix, config):
# setup logos
logodir = os.path.join(output_dir, "reports")
if not os.path.exists(logodir):
os.mkdir(logodir)
for png in glob.glob(os.path.join(SOURCE_NOTEBOOK_DIR, "*.png")):
shutil.copyfile(png, os.path.join(logodir, os.path.basename(png)))
for notebook in notebooks:
nbfile = notebook + ".ipynb"
nbdest = os.path.join(output_dir, "{}-{}".format(prefix, nbfile) if prefix else nbfile)
# overwrite destination only if source is newer
dest_mtime = os.path.getmtime(nbdest) if os.path.exists(nbdest) else 0
# if source is a template, invoke jinja
nbsrc = os.path.join(SOURCE_NOTEBOOK_DIR, nbfile + ".j2")
if os.path.exists(nbsrc):
if os.path.getmtime(nbsrc) > dest_mtime:
global _j2env
if _j2env is None:
_j2env = jinja2.Environment(loader=jinja2.PackageLoader('caracal', 'notebooks'),
autoescape=jinja2.select_autoescape(['html', 'xml']))
template = _j2env.get_template(nbfile + ".j2")
log.info("Creating standard notebook {} from template".format(nbdest))
with open(nbdest, "wt") as file:
try:
print(template.render(**config), file=file)
except jinja2.TemplateError as exc:
log.error("Error rendering notebook template: {}".format(exc), extra=dict(boldface=True))
log.info(" More information can be found in the logfile at {0:s}".format(caracal.CARACAL_LOG))
for line in traceback.format_exc().splitlines():
log.error(line, extra=dict(traceback_report=True))
log.info("This is not fatal, continuing")
else:
log.info("Standard notebook {} already exists, won't overwrite".format(nbdest))
continue
# if source exists as is, copy
nbsrc = os.path.join(SOURCE_NOTEBOOK_DIR, nbfile)
if os.path.exists(nbsrc):
if os.path.getmtime(nbsrc) > dest_mtime:
log.info("Creating standard notebook {}".format(nbdest))
shutil.copyfile(nbsrc, nbdest)
else:
log.info("Standard notebook {} already exists, won't overwrite".format(nbdest))
continue
log.error("Standard notebook {} does not exist".format(nbsrc))
_radiopadre_updated = False
def generate_report_notebooks(notebooks, output_dir, prefix, container_tech):
opts = ["--non-interactive", "--auto-init"]
if container_tech == "docker":
opts.append("--docker")
elif container_tech == "singularity":
opts.append("--singularity")
else:
log.warning("Container technology '{}' not supported by radiopadre, skipping report rendering")
return
if caracal.DEBUG:
opts += ['-v', '2', '--container-debug']
# disabling as per https://github.com/caracal-pipeline/caracal/issues/1161
# # first time run with -u
# global _radiopadre_updated
# if not _radiopadre_updated:
# opts.append('--update')
# _radiopadre_updated = True
start_time = time.time()
log.info("Rendering report(s)")
for notebook in notebooks:
if prefix:
notebook = "{}-{}".format(prefix, notebook)
nbdest = os.path.join(output_dir, notebook + ".ipynb")
nbhtml = os.path.join(output_dir, notebook + ".html")
if os.path.exists(nbdest):
try:
xrun("run-radiopadre", opts + ["--nbconvert", nbdest], log=log)
except StimelaCabRuntimeError as exc:
log.warning("Report {} failed to render ({}). HTML report will not be available.".format(nbhtml, exc))
# check that HTML file actually showed up (sometimes the container doesn't report an error)
if os.path.exists(nbhtml) and os.path.getmtime(nbhtml) >= start_time:
log.info("Rendered report {}".format(nbhtml))
else:
log.warning("Report {} failed to render".format(nbhtml))
else:
log.warning("Report notebook {} not found, skipping report rendering".format(nbdest))
| 4,613 | 39.473684 | 119 | py |
caracal | caracal-master/caracal/workers/ddcal_worker.py | # -*- coding: future_fstrings -*-
import os
import copy
import numpy as np
import sys
import caracal
import stimela.dismissable as sdm
from caracal.dispatch_crew import utils
from stimela.pathformatter import pathformatter as spf
from caracal.utils.requires import extras
NAME = 'Direction-dependent Calibration'
LABEL = "ddcal"
@extras(packages=["astropy", "regions"])
def worker(pipeline, recipe, config):
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs import WCS
from regions import PixCoord, write_ds9, PolygonPixelRegion
npix = config['image_dd']['npix']
cell = config['image_dd']['cell']
use_mask = config['image_dd']['use_mask']
ddsols_t = config['calibrate_dd']['dd_dd_timeslots_int']
ddsols_f = config['calibrate_dd']['dd_dd_chan_int']
dist_ncpu = config['calibrate_dd']['dist_ncpu']
label = config['label_in']
USEPB = config['use_pb']
prefix = pipeline.prefix
INPUT = pipeline.input
DD_DIR = "3GC"
OUTPUT = os.path.join(pipeline.output, DD_DIR)
DDF_LSM = "DDF_lsm.lsm.html"
shared_mem = str(config['shared_mem']) + 'gb'
all_targets, all_msfile, ms_dict = pipeline.get_target_mss(label)
caracal.log.info("All_targets", all_targets)
caracal.log.info("All_msfiles", all_msfile)
if not os.path.exists(OUTPUT):
os.mkdir(OUTPUT)
de_sources_mode = config['calibrate_dd']['de_sources_mode']
if de_sources_mode == 'manual':
de_targets = config['calibrate_dd']['de_target_manual']
de_sources = config['calibrate_dd']['de_sources_manual']
if len(de_targets) != len(de_sources):
caracal.log.error("The number of targets for de calibration does not match sources, please recheck.")
sys.exit(1)
de_dict = dict(zip(de_targets, de_sources))
else:
de_targets = all_targets
dd_image_opts = {
"Data-MS": all_msfile,
"Data-ColName": config['image_dd']['data_colname'],
"Data-ChunkHours": config['image_dd']['data_chunkhours'],
"Output-Mode": config['image_dd']['output_mode'],
"Output-Name": prefix + "-DD-precal",
"Output-Images": 'dmcrioekzp',
"Image-NPix": npix,
"Image-Cell": cell,
"Facets-NFacets": config['image_dd']['facets_nfacets'],
"Weight-ColName": config['image_dd']['weight_col'],
"Weight-Mode": config['image_dd']['weight_mode'],
"Weight-Robust": config['image_dd']['weight_robust'],
"Freq-NBand": config['image_dd']['freq_nband'],
"Freq-NDegridBand": config['image_dd']['freq_ndegridband'],
"Deconv-RMSFactor": config['image_dd']['deconv_rmsfactor'],
"Deconv-PeakFactor": config['image_dd']['deconv_peakfactor'],
"Deconv-Mode": config['image_dd']['deconv_mode'],
"Deconv-MaxMinorIter": config['image_dd']['deconv_maxminoriter'],
"Deconv-Gain": config['image_dd']['deconv_gain'],
"Deconv-FluxThreshold": config['image_dd']['deconv_fluxthr'],
"Deconv-AllowNegative": config['image_dd']['deconv_allownegative'],
"Hogbom-PolyFitOrder": config['image_dd']['hogbom_polyfitorder'],
"Parallel-NCPU": config['image_dd']['parallel_ncpu'],
"Predict-ColName": config['image_dd']["predict_colname"],
"Log-Memory": config['image_dd']["log_memory"],
"Cache-Reset": config['image_dd']["cache_reset"],
"Log-Boring": config["image_dd"]["log_boring"], }
def make_primary_beam():
eidos_opts = {
"prefix": prefix,
"pixels": 256,
"freq": "850 1715 30",
"diameter": 4.0,
"coeff": 'me',
"coefficients-file": "meerkat_beam_coeffs_em_zp_dct.npy", }
recipe.add("cab/eidos", "make-pb", eidos_opts,
input=INPUT,
output=OUTPUT,
label="make-pb:: Generate primary beams from Eidos",
shared_memory=shared_mem)
def dd_precal_image(field, ms_list):
dd_image_opts_precal = copy.deepcopy(dd_image_opts)
outdir = field + "_ddcal/"
image_prefix_precal = "/" + outdir + "/" + prefix + "_" + field # Add the output subdirectory to the imagename
dd_ms_list = {"Data-MS": ms_list}
dd_image_opts_precal.update(dd_ms_list)
if (use_mask):
dd_imagename = {"Output-Name": image_prefix_precal + "-DD-masking"} # Add the mask image prefix
dd_image_opts_precal.update(dd_imagename)
recipe.add("cab/ddfacet", "ddf_image-for_mask-{0:s}".format(field), dd_image_opts_precal,
input=INPUT,
output=OUTPUT,
label="ddf_image-for_mask-{0:s}:: DDFacet image for masking".format(field),
shared_memory=shared_mem)
imname = '{0:s}{1:s}.app.restored.fits'.format(image_prefix_precal, "-DD-masking")
output_folder = "/" + outdir
recipe.add("cab/cleanmask", "mask_ddf-precal-{0:s}".format(field), {
'image': '{0:s}:output'.format(imname),
'output': '{0:s}mask_ddf_precal_{1:s}.fits'.format(output_folder, field),
'sigma': config['image_dd']['mask_sigma'],
'boxes': config['image_dd']['mask_boxes'],
'iters': config['image_dd']['mask_niter'],
'overlap': config['image_dd']['mask_overlap'],
'no-negative': True,
'tolerance': config['image_dd']['mask_tol'],
}, input=INPUT, output=OUTPUT, label='mask_ddf-precal-{0:s}:: Make a mask for the initial ddf image'.format(field), shared_memory=shared_mem)
recipe.run()
recipe.jobs = []
dd_imagename = {"Output-Name": image_prefix_precal + "-DD-precal"}
dd_image_opts_precal.update(dd_imagename)
if use_mask:
dd_maskopt = {"Mask-External": "{0:s}mask_ddf_precal_{1:s}.fits:output".format(output_folder, field)}
dd_image_opts_precal.update(dd_maskopt)
recipe.add("cab/ddfacet", "ddf_image-{0:s}".format(field), dd_image_opts_precal,
input=INPUT,
output=OUTPUT,
label="ddf_image-{0:s}:: DDFacet initial image for DD calibration".format(field), shared_memory=shared_mem)
recipe.run()
recipe.jobs = []
def dd_postcal_image(field, ms_list):
dd_image_opts_postcal = copy.deepcopy(dd_image_opts)
outdir = field + "_ddcal/"
image_prefix_postcal = "/" + outdir + "/" + prefix + "_" + field
dd_ms_list = {"Data-MS": ms_list}
dd_image_opts_postcal.update(dd_ms_list)
caracal.log.info("Imaging", ms_list)
postcal_datacol = config['image_dd']['data_colname_postcal']
dd_imagecol = {"Data-ColName": postcal_datacol}
dd_image_opts_postcal.update(dd_imagecol)
if (use_mask):
dd_imagename = {"Output-Name": image_prefix_postcal + "-DD-masking"}
dd_image_opts_postcal.update(dd_imagename)
recipe.add("cab/ddfacet", "ddf_image-postcal-{0:s}".format(field), dd_image_opts_postcal,
input=INPUT,
output=OUTPUT,
label="ddf_image-postcal-{0:s}:: Primary beam corrected image".format(field),
shared_memory=shared_mem)
imname = '{0:s}{1:s}.app.restored.fits'.format(image_prefix_postcal, "-DD-masking")
output_folder = "/" + outdir
recipe.add("cab/cleanmask", "mask_ddf-postcal-{0:s}".format(field), {
'image': '{0:s}:output'.format(imname),
'output': '{0:s}mask_ddf_postcal_{1:s}.fits:output'.format(output_folder, field),
'sigma': config['image_dd']['mask_sigma'],
'boxes': config['image_dd']['mask_boxes'],
'iters': config['image_dd']['mask_niter'],
'overlap': config['image_dd']['mask_overlap'],
'no-negative': True,
'tolerance': config['image_dd']['mask_tol'],
}, input=INPUT, output=OUTPUT, label='mask_ddf-postcal-{0:s}:: Make a mask for the initial ddf image'.format(field), shared_memory=shared_mem)
recipe.run()
recipe.jobs = []
dd_imagename = {"Output-Name": image_prefix_postcal + "-DD-postcal"}
dd_image_opts_postcal.update(dd_imagename)
if use_mask:
dd_maskopt = {"Mask-External": "{0:s}mask_ddf_postcal_{1:s}.fits:output".format(output_folder, field)}
dd_image_opts_postcal.update(dd_maskopt)
dd_beamopts = {"Beam-Model": "FITS", "Beam-FITSFile": prefix + "'_$(corr)_$(reim).fits':output", "Beam-FITSLAxis": 'px', "Beam-FITSMAxis": "py", "Output-Images": 'dmcriDMCRIPMRIikz'}
if USEPB:
dd_image_opts_postcal.update(dd_beamopts)
recipe.add("cab/ddfacet", "ddf_image-postcal-{0:s}".format(field), dd_image_opts_postcal,
input=INPUT,
output=OUTPUT,
label="ddf_image-postcal-{0:s}:: Primary beam corrected image".format(field),
shared_memory=shared_mem)
# def sfind_intrinsic():
# DDF_INT_IMAGE = prefix+"-DD-precal.int.restored.fits:output"
# DDF_APP_IMAGE = prefix+"-DD-precal.app.restored.fits:output"
# if usepb:
# main_image = DDF_INT_IMAGE
# else:
# main_image = DDF_APP_IMAGE
#
# recipe.add("cab/pybdsm", "intrinsic_sky_model",{
# "filename" : main_image,
# "outfile" : "DDF_lsm",
# "detection_image" : DDF_APP_IMAGE,
# "thresh_pix" : 100,
# "clobber" : True,
# "thresh_isl" : 30,
# "port2tigger" : True,
# "clobber" : True,
# "adaptive_rms_box" : True,
# "spectralindex_do" : False,
# },
# input=INPUT,
# output=OUTPUT,
# label="intrinsic_sky_model:: Find sources in the beam-corrected image")
def dagga(field):
"function to tag sources for dd calibration, very smoky"
key = 'calibrate_dd'
# make a skymodel with only dE taggable sources.
# de_only_model = 'de-only-model.txt'
de_sources_mode = config[key]['de_sources_mode']
print("de_sources_mode:", de_sources_mode)
# if usepb:
# model_cube = prefix+"-DD-precal.cube.int.model.fits"
# else:
# model_cube = prefix+"-DD-precal.cube.app.model.fits"
outdir = field + "_ddcal"
if de_sources_mode == 'auto':
caracal.log.info("Carrying out automatic source taggig for direction dependent calibration")
caracal.log.info('Carrying out automatic dE tagging')
catdagger_opts = {
"ds9-reg-file": "de-{0:s}.reg:output".format(field),
"ds9-tag-reg-file": "de-clusterleads-{0:s}.reg:output".format(field),
"noise-map": prefix + "_" + field + "-DD-precal.app.residual.fits",
"sigma": config[key]['sigma'],
"min-distance-from-tracking-centre": config[key]['min_dist_from_phcentre'],
}
recipe.add('cab/catdagger', 'tag_sources-auto_mode', catdagger_opts, input=INPUT,
output=OUTPUT + "/" + outdir, label='tag_sources-auto_mode::Tag dE sources with CatDagger', shared_memory=shared_mem)
if de_sources_mode == 'manual':
img = prefix + "_" + field + "-DD-precal.app.restored.fits"
imagefile = os.path.join(pipeline.output, DD_DIR, outdir, img)
# print("Imagefile",imagefile)
# print("Pipeline output", pipeline.output)
w = WCS(imagefile)
# coords = config[key]['de_sources_manual']
print(de_dict)
sources_to_tag = de_dict[field.replace("_", "-")]
reg = []
for j in range(len(sources_to_tag.split(";"))):
coords = sources_to_tag.split(";")[j]
size = coords.split(",")[2]
coords_str = coords.split(",")[0] + " " + coords.split(",")[1]
# print("Coordinate String", coords_str)
centre = SkyCoord(coords_str, unit='deg')
separation = int(size) * u.arcsec
# print("Size",separation)
xlist = []
ylist = []
for i in range(5):
ang_sep = (306 / 5) * i * u.deg
p = centre.directional_offset_by(ang_sep, separation)
pix = PixCoord.from_sky(p, w)
xlist.append(pix.x)
ylist.append(pix.y)
vertices = PixCoord(x=xlist, y=ylist)
region_dd = PolygonPixelRegion(vertices=vertices)
reg.append(region_dd)
regfile = "de-{0:s}.reg".format(field)
ds9_file = os.path.join(OUTPUT, outdir, regfile)
# This needs to be rewritten. write_ds9 does not exist any more
write_ds9(reg, ds9_file, coordsys='physical')
def dd_calibrate(field, mslist):
key = 'calibrate_dd'
outdir = field + "_ddcal"
dicomod = prefix + "_" + field + "-DD-precal.DicoModel"
dereg = "de-{0:s}.reg".format(field)
output_cubical = OUTPUT + "/" + outdir
test_path = spf("MODEL_DATA")
for ms in mslist:
mspref = os.path.splitext(ms)[0].replace('-', '_')
step = 'dd_calibrate-{0:s}-{1:s}'.format(mspref, field)
recipe.add('cab/cubical_ddf', step, {
"data-ms": ms,
"data-column": config[key]['dd_data_col'],
"out-column": config[key]['dd_out_data_col'],
"weight-column": config[key]['dd_weight_col'],
"sol-jones": "G,DD", # Jones terms to solve
"sol-min-bl": config[key]['sol_min_bl'], # only solve for |uv| > 300 m
"sol-stall-quorum": config[key]['dd_sol_stall_quorum'],
"g-type": config[key]['dd_g_type'],
"g-clip-high": config[key]['dd_g_clip_high'],
"g-clip-low": config[key]['dd_g_clip_low'],
"g-solvable": True,
"g-update-type": config[key]['dd_g_update_type'],
"g-max-prior-error": config[key]['dd_g_max_prior_error'],
"dd-max-prior-error": config[key]['dd_dd_max_prior_error'],
"g-max-post-error": config[key]['dd_g_max_post_error'],
"dd-max-post-error": config[key]['dd_dd_max_post_error'],
"g-time-int": config[key]['dd_g_timeslots_int'],
"g-freq-int": config[key]['dd_g_chan_int'],
"dist-ncpu": config[key]['dist_ncpu'],
"dist-nworker": config[key]['dist_nworker'],
"dist-max-chunks": config[key]['dist_nworker'],
"dist-max-chunks": config[key]['dist_nworker'],
# "model-beam-pattern": prefix+"'_$(corr)_$(reim).fits':output",
# "montblanc-feed-type": "linear",
# "model-beam-l-axis" : "px",
# "model-beam-m-axis" : "py",
"g-save-to": "g_final-cal_{0:s}_{1:s}.parmdb".format(mspref, field),
"dd-save-to": "dd_cal_final_{0:s}_{1:s}.parmdb".format(mspref, field),
"dd-type": "complex-2x2",
"dd-clip-high": 0.0,
"dd-clip-low": 0.0,
"dd-solvable": True,
"dd-time-int": ddsols_t,
"dd-freq-int": ddsols_f,
"dd-dd-term": True,
"dd-prop-flags": 'always',
"dd-fix-dirs": "0",
"out-subtract-dirs": "1:",
"model-list": spf("MODEL_DATA+-{{}}{}@{{}}{}:{{}}{}@{{}}{}".format(dicomod, dereg, dicomod, dereg), "output", "output", "output", "output"),
"out-name": prefix + "dE_sub",
"out-mode": 'sr',
"out-model-column": "MODEL_OUT",
# "data-freq-chunk" : 1*ddsols_f,
# "data-time-chunk" : 1*ddsols_t,
"data-time-chunk": ddsols_t * int(min(1, config[key]['dist_nworker'])) if (ddsols_f == 0 or config[key]['dd_g_chan_int'] == 0) else ddsols_t * int(min(1, np.sqrt(config[key]['dist_nworker']))),
"data-freq-chunk": 0 if (ddsols_f == 0 or config[key]['dd_g_chan_int'] == 0) else ddsols_f * int(min(1, np.sqrt(config[key]['dist_nworker']))),
"sol-term-iters": "[50,90,50,90]",
"madmax-plot": False,
"out-plots": True,
"madmax-enable": config[key]['madmax_enable'],
"madmax-threshold": config[key]['madmax_thr'],
"madmax-global-threshold": config[key]['madmax_global_thr'],
"madmax-estimate": "corr",
# "out-casa-gaintables" : True,
"degridding-NDegridBand": config['image_dd']['freq_ndegridband'],
'degridding-MaxFacetSize': 0.15,
},
input=INPUT,
# output=OUTPUT+"/"+outdir,
output=output_cubical,
shared_memory=shared_mem,
label='dd_calibrate-{0:s}-{1:s}:: Carry out DD calibration'.format(mspref, field))
def cp_data_column(field, mslist):
outdir = field + "_ddcal"
for ms in mslist:
mspref = os.path.splitext(ms)[0].replace('-', '_')
step = 'cp_datacol-{0:s}-{1:s}'.format(mspref, field)
recipe.add('cab/msutils', step, {
"command": 'copycol',
"msname": ms,
"fromcol": 'SUBDD_DATA',
"tocol": 'CORRECTED_DATA',
},
input=INPUT,
output=OUTPUT + "/" + outdir,
label='cp_datacol-{0:s}-{1:s}:: Copy SUBDD_DATA to CORRECTED_DATA'.format(mspref, field), shared_memory=shared_mem)
def img_wsclean(mslist, field):
key = 'image_wsclean'
outdir = field + "_ddcal"
imweight = config[key]['img_ws_weight']
pref = "DD_wsclean"
mspref = os.path.splitext(mslist[0])[0].replace('-', '_')
step = 'img_wsclean-{0:s}-{1:s}'.format(mspref, field)
recipe.add('cab/wsclean', step, {
"msname": mslist,
"column": config[key]['img_ws_col'],
"weight": imweight if not imweight == 'briggs' else 'briggs {}'.format(config[key]['img_ws_robust']),
"nmiter": sdm.dismissable(config[key]['img_ws_nmiter']),
"npix": config[key]['img_ws_npix'],
"padding": config[key]['img_ws_padding'],
"scale": config[key]['img_ws_cell'],
"prefix": '{0:s}_{1:s}'.format(pref, field),
"niter": config[key]['img_ws_niter'],
"mgain": config[key]['img_ws_mgain'],
"pol": config[key]['img_ws_stokes'],
"taper-gaussian": sdm.dismissable(config[key]['img_ws_uvtaper']),
"channelsout": config[key]['img_ws_nchans'],
"joinchannels": config[key]['img_ws_joinchans'],
"local-rms": config[key]['img_ws_local_rms'],
"fit-spectral-pol": config[key]['img_ws_specfit_nrcoeff'],
"auto-threshold": config[key]['img_ws_auto_thr'],
"auto-mask": config[key]['img_ws_auto_mask'],
"multiscale": config[key]['img_ws_multi_scale'],
"multiscale-scales": sdm.dismissable(config[key]['img_ws_multi_scale_scales']),
"savesourcelist": True if config[key]['img_ws_niter'] > 0 else False,
},
input=INPUT,
output=OUTPUT + "/" + outdir,
version='2.6' if config[key]['img_ws_multi_scale'] else None,
label='img_wsclean-{0:s}-{1:s}:: Image DD-calibrated data with WSClean'.format(mspref, field), shared_memory=shared_mem)
def run_crystalball(mslist, field):
key = 'transfer_model_dd'
outdir = field + "_ddcal"
pref = "DD_wsclean"
crystalball_model = '{0:s}_{1:s}-sources.txt'.format(pref, field)
for ms in mslist:
mspref = os.path.splitext(ms)[0].replace('-', '_')
step = 'crystalball-{0:s}-{1:s}'.format(mspref, field)
recipe.add('cab/crystalball', step, {
"ms": ms,
"sky-model": crystalball_model + ':output',
"row-chunks": config[key]['dd_row_chunks'],
"model-chunks": config[key]['dd_model_chunks'],
"within": sdm.dismissable(config[key]['dd_within'] or None),
"points-only": config[key]['dd_points_only'],
"num-sources": sdm.dismissable(config[key]['dd_num_sources']),
"num-workers": sdm.dismissable(config[key]['dd_num_workers']),
"memory-fraction": config[key]['dd_mem_frac'],
},
input=INPUT,
output=OUTPUT + "/" + outdir, shared_memory=shared_mem,
label='crystalball-{0:s}-{1:s}:: Run Crystalball'.format(mspref, field))
for target in de_targets:
mslist = ms_dict[target]
field = utils.filter_name(target)
caracal.log.info("Processing field", field, "for de calibration:")
if USEPB:
make_primary_beam()
if pipeline.enable_task(config, 'image_dd'):
dd_precal_image(field, mslist)
dagga(field)
if pipeline.enable_task(config, 'calibrate_dd'):
dd_calibrate(field, mslist)
if pipeline.enable_task(config, 'image_dd'):
dd_postcal_image(field, mslist)
if pipeline.enable_task(config, 'copy_data'):
cp_data_column(field, mslist)
if pipeline.enable_task(config, 'image_wsclean'):
img_wsclean(mslist, field)
if pipeline.enable_task(config, 'transfer_model_dd'):
run_crystalball(mslist, field)
| 22,035 | 48.855204 | 209 | py |
caracal | caracal-master/caracal/workers/obsconf_worker.py | # -*- coding: future_fstrings -*-
import caracal.dispatch_crew.utils as utils
import caracal
import sys
import numpy as np
import os
import shutil
from caracal import log
NAME = 'Automatically Categorize Observed Fields'
LABEL = 'obsconf'
def repeat_val(val, n):
l = []
for x in range(n):
l.append(val)
return l
def worker(pipeline, recipe, config):
recipe.msdir = pipeline.rawdatadir
recipe.output = pipeline.msdir
step = None
for i, (msname, msroot, prefix) in enumerate(zip(pipeline.msnames, pipeline.msbasenames, pipeline.prefix_msbases)):
# filenames generated
obsinfo = f'{msroot}-obsinfo.txt'
summary = f'{msroot}-summary.json'
elevplot = f'{msroot}-elevation-tracks.png'
if pipeline.enable_task(config, 'obsinfo'):
if config['obsinfo']['listobs']:
if os.path.exists(os.path.join(pipeline.msdir, obsinfo)):
caracal.log.info(f"obsinfo file {obsinfo} exists, not regenerating")
else:
step = f'listobs-ms{i}'
recipe.add('cab/casa_listobs', step,
{
"vis": msname,
"listfile": obsinfo,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.msdir,
label='{0:s}:: Get observation information ms={1:s}'.format(step, msname))
if config['obsinfo']['summary_json']:
if os.path.exists(os.path.join(pipeline.msdir, summary)):
caracal.log.info(f"summary file {summary} exists, not regenerating")
else:
step = f'summary_json-ms{i}'
recipe.add('cab/msutils', step,
{
"msname": msname,
"command": 'summary',
"display": False,
"outfile": summary,
},
input=pipeline.input,
output=pipeline.msdir,
label='{0:s}:: Get observation information as a json file ms={1:s}'.format(step, msname))
if config['obsinfo']['vampirisms']:
step = 'vampirisms-ms{0:d}'.format(i)
recipe.add('cab/sunblocker', step,
{
"command": 'vampirisms',
"inset": msname,
"dryrun": True,
"nononsoleil": True,
"verb": True,
},
input=pipeline.input,
output=pipeline.msdir,
label='{0:s}:: Note sunrise and sunset'.format(step))
if pipeline.enable_task(config['obsinfo'], 'plotelev'):
if os.path.exists(os.path.join(pipeline.msdir, elevplot)):
caracal.log.info(f"elevation plot {elevplot} exists, not regenerating")
else:
step = "elevation-plots-ms{:d}".format(i)
if config['obsinfo']["plotelev"]["plotter"] in ["plotms"]:
recipe.add("cab/casa_plotms", step, {
"vis": msname,
"xaxis": "hourangle",
"yaxis": "elevation",
"coloraxis": "field",
"plotfile": elevplot,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.msdir,
label="{:s}:: Plotting elevation tracks".format(step))
elif config['obsinfo']["plotelev"]["plotter"] in ["owlcat"]:
recipe.add("cab/owlcat_plotelev", step, {
"msname": msname,
"output-name": elevplot
},
input=pipeline.input,
output=pipeline.msdir,
label="{:s}:: Plotting elevation tracks".format(step))
# if any steps at all were inserted, run the recipe
if step is not None:
recipe.run()
recipe.jobs = []
# initialse things
for item in 'xcal fcal bpcal gcal target refant minbase maxdist'.split():
val = config[item]
for attr in ["", "_ra", "_dec", "_id"]:
setattr(pipeline, item + attr, repeat_val(val, pipeline.nobs))
setattr(pipeline, 'nchans', repeat_val(None, pipeline.nobs))
setattr(pipeline, 'firstchanfreq', repeat_val(None, pipeline.nobs))
setattr(pipeline, 'lastchanfreq', repeat_val(None, pipeline.nobs))
setattr(pipeline, 'chanwidth', repeat_val(None, pipeline.nobs))
setattr(pipeline, 'specframe', repeat_val(None, pipeline.nobs))
setattr(pipeline, 'startdate', repeat_val(None, pipeline.nobs))
setattr(pipeline, 'enddate', repeat_val(None, pipeline.nobs))
# Set antenna properties
# pipeline.Tsys_eta = config['Tsys_eta']
# pipeline.dish_diameter = config['dish_diameter']
for i, (msname, msroot, prefix) in enumerate(zip(pipeline.msnames, pipeline.msbasenames, pipeline.prefix_msbases)):
caracal.log.info(f"MS #{i}: {msname}")
msdict = pipeline.get_msinfo(msname)
obsinfo = f'{msroot}-obsinfo.txt'
summary = f'{msroot}-summary.json'
elevplot = f'{msroot}-elevation-tracks.png'
# copy these to obsinfo dir if needed
for filename in obsinfo, summary, elevplot:
src, dest = os.path.join(pipeline.msdir, filename), os.path.join(pipeline.obsinfo, filename)
if os.path.exists(src) and (not os.path.exists(dest) or os.path.getmtime(dest) < os.path.getmtime(src)):
caracal.log.info(f"generated new obsinfo/{filename}")
shutil.copyfile(src, dest)
# get the actual date stamp for the start and end of the observations.
# !!!!!!! This info appears to not be present in the json file just the totals and start times (without slew times) so we'll get it from the txt file
with open(os.path.join(pipeline.msdir, obsinfo), 'r') as stdr:
content = stdr.readlines()
for line in content:
info_on_line = [x for x in line.split() if x != '']
if len(info_on_line) > 2:
if info_on_line[0].lower() == 'observed' and info_on_line[1].lower() == 'from':
calender_month_abbr = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov',
'dec']
startdate, starttime = info_on_line[2].split('/')
hr, minute, sec = starttime.split(':')
day, month_abbr, year = startdate.split('-')
month_num = '{:02d}'.format(calender_month_abbr.index(month_abbr.lower()) + 1)
correct_date = ''.join([year, month_num, day, hr, minute, sec])
pipeline.startdate[i] = float(correct_date)
enddate, endtime = info_on_line[4].split('/')
hr, minute, sec = endtime.split(':')
day, month_abbr, year = enddate.split('-')
month_num = '{:02d}'.format(calender_month_abbr.index(month_abbr.lower()) + 1)
correct_date = ''.join([year, month_num, day, hr, minute, sec])
pipeline.enddate[i] = float(correct_date)
# get reference antenna LEAVING THIS LINE HERE
# FOR WHEN WE COME UP WITH A WAY TO AUTOSELECT
# if config.get('refant') == 'auto':
# pipeline.refant[i] = '0'
# Get channels in MS
spw = msdict['SPW']['NUM_CHAN']
pipeline.nchans[i] = spw
caracal.log.info(' {0:d} spectral windows, with NCHAN={1:s}'.format(
len(spw), ','.join(map(str, spw))))
# Get first chan, last chan, chan width
chfr = msdict['SPW']['CHAN_FREQ']
firstchanfreq = [ss[0] for ss in chfr]
lastchanfreq = [ss[-1] for ss in chfr]
chanwidth = [(ss[-1] - ss[0]) / (len(ss) - 1) for ss in chfr]
pipeline.firstchanfreq[i] = firstchanfreq
pipeline.lastchanfreq[i] = lastchanfreq
pipeline.chanwidth[i] = chanwidth
caracal.log.info(' CHAN_FREQ from {0:s} Hz to {1:s} Hz with average channel width of {2:s} Hz'.format(
','.join(map(str, firstchanfreq)), ','.join(map(str, lastchanfreq)), ','.join(map(str, chanwidth))))
if i == pipeline.nobs - 1 and np.max(pipeline.chanwidth) > 0 and np.min(pipeline.chanwidth) < 0:
caracal.log.err('Some datasets have a positive channel increment, some negative. This will lead to errors. Exiting')
raise caracal.BadDataError("MSs with mixed channel ordering not supported")
# Get spectral frame
pipeline.specframe[i] = msdict['SPW']['MEAS_FREQ_REF']
targetinfo = msdict['FIELD']
intents = utils.categorize_fields(msdict)
# Save all fields in a list
all_fields = msdict["FIELD"]["NAME"]
# The order of fields here is important
for term in "target gcal fcal bpcal xcal".split():
conf_fields = getattr(pipeline, term)[i]
conf_fields_str = ','.join(conf_fields)
label, fields = intents[term]
label = ",".join(label) or term
# no fields set for this term -- make empty and continue
if not conf_fields:
found_fields = []
# check if user set fields manually
elif conf_fields_str == "all":
found_fields = fields
elif conf_fields_str == "longest":
f = utils.observed_longest(msdict, fields)
found_fields = [f] if f else []
elif conf_fields_str == "nearest":
f = utils.select_gcal(msdict, fields, mode="nearest")
found_fields = [f] if f else []
else:
found_fields = set(all_fields).intersection(conf_fields)
getattr(pipeline, term)[i] = list(found_fields)
if not found_fields:
# complain if not found, unless it's 'xcal', which is only for polcal, so let that worker complain
if term != "xcal":
raise RuntimeError(f"Can't find an appropriate FIELD for obsinfo: {term}: {conf_fields_str}. "
"Please check this config setting. It may also be that your MS scan intents "
f"are not pupulated correctly, in which case you must set {term} to a list of explicit field names.")
continue
# caracal.log.info(" ====================================")
caracal.log.info(f" {term} ({label}):")
# caracal.log.info(" ---------------------------------- ")
_ra = []
_dec = []
_fid = []
for f in found_fields:
fid = utils.get_field_id(msdict, f)[0]
targetpos = targetinfo['REFERENCE_DIR'][fid][0]
ra = targetpos[0] / np.pi * 180
dec = targetpos[1] / np.pi * 180
_ra.append(ra)
_dec.append(dec)
_fid.append(fid)
tobs = utils.field_observation_length(msdict, f) / 60.0
caracal.log.info(
' {0:s} (ID={1:d}) : {2:.2f} minutes | RA={3:.2f} deg, Dec={4:.2f} deg'.format(f, fid, tobs, ra, dec))
getattr(pipeline, term + "_ra")[i] = _ra
getattr(pipeline, term + "_dec")[i] = _dec
getattr(pipeline, term + "_id")[i] = _fid
| 12,255 | 48.220884 | 157 | py |
caracal | caracal-master/caracal/workers/prep_worker.py | # -*- coding: future_fstrings -*-
import os
import sys
import caracal
from caracal.dispatch_crew.utils import closeby
import caracal.dispatch_crew.caltables as mkct
import numpy as np
from caracal.workers.utils import manage_flagsets as manflags
from caracal.dispatch_crew import utils
from caracal.utils.requires import extras
NAME = "Prepare Data for Processing"
LABEL = 'prep'
def getfield_coords(info, field, db, tol=2.9E-3, tol_diff=4.8481E-6):
"""
Shameless copy of the hetfield function to return field coordinates
from the database.
Find match of fields in info
Parameters:
info (dict): dictionary of obsinfo as read by yaml
field (str): field name
db (dict): calibrator data base as returned by
calibrator_database()
Go through all calibrators in db and return the first that matches
the coordinates of field in msinfo. Return empty string if not
found.
If coordinates difference is larger than tol_diff, return the correct coordinates, else return empty string.
"""
# Get position of field in msinfo
ind = info['FIELD']['NAME'].index(field)
firade = info['FIELD']['DELAY_DIR'][ind][0]
firade[0] = np.mod(firade[0], 2 * np.pi)
dbcp = db.db
caracal.log.info("Checking for crossmatch")
caracal.log.info("Database keys:", dbcp.keys())
for key in dbcp.keys():
carade = [dbcp[key]['ra'], dbcp[key]['decl']]
if closeby(carade, firade, tol=tol):
if not closeby(carade, firade, tol=tol_diff):
return key, dbcp[key]['ra'], dbcp[key]['decl']
else:
caracal.log.info("Calibrator coordinates match within the specified tolerance.")
return None, None, None
return None, None, None
def worker(pipeline, recipe, config):
label = config['label_in']
wname = pipeline.CURRENT_WORKER
field_name = config["field"]
msdir = pipeline.msdir
for i in range(pipeline.nobs):
prefix_msbase = pipeline.prefix_msbases[i]
mslist = pipeline.get_mslist(i, label, target=(field_name == "target"))
for msname in mslist:
if not os.path.exists(os.path.join(msdir, msname)):
caracal.log.error(f"MS file {msdir}/{msname} does not exist. Please check that is where it should be.")
raise IOError
# if pipeline.enable_task(config, 'fixcalcoords'):
tol = config["tol"]
tol_diff = config["tol_diff"]
# Convert tolerance from arcseconds to radians:
tol = tol * np.pi / (180.0 * 3600.0)
tol_diff = tol_diff * np.pi / (180.0 * 3600.0)
db = mkct.calibrator_database()
dbc = mkct.casa_calibrator_database()
msdict = pipeline.get_msinfo(msname)
ra_corr = None
dec_corr = None
if field_name != 'target':
for f in pipeline.bpcal[i]:
fielddb, ra_corr, dec_corr = getfield_coords(msdict, f, db, tol=tol, tol_diff=tol_diff)
if fielddb is None:
caracal.log.info("Checking the CASA database of calibrators.")
fielddb, ra_corr, dec_corr = getfield_coords(msdict, f, dbc, tol=tol, tol_diff=tol_diff)
if fielddb is not None:
caracal.log.info("The coordinates of calibrator {0:s} in the MS are offset. This is a known problem for some vintage MeerKAT MSs.".format(f))
if pipeline.enable_task(config, 'fixcalcoords'):
caracal.log.info("We will now attempt to fix this by rephasing the visibilities using the CASA fixvis task.")
ra_corr = float(ra_corr * 180.0 / np.pi)
dec_corr = float(dec_corr * 180.0 / np.pi)
@extras("astropy")
def needs_astropy():
from astropy.coordinates import SkyCoord
return SkyCoord(ra_corr, dec_corr, unit='deg')
c = needs_astropy()
rahms = c.ra.hms
decdms = c.dec.dms
coordstring = 'J2000 ' + c.to_string('hmsdms')
step = 'fixuvw-ms{0:d}-{1:s}'.format(i, f)
recipe.add('cab/casa_fixvis', step,
{
"vis": msname,
"field": f,
"phasecenter": coordstring,
"reuse": False,
"outputvis": msname,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Fix bpcal coordinates ms={1:s}'.format(step, msname))
else:
caracal.log.error("###### WE RECOMMEND SWITCHING ON THE fixcalcoords OPTION #######")
if pipeline.enable_task(config, 'fixuvw'):
# fielddb, ra_corr, dec_corr = getfield_coords(msdict, f, db)
step = 'fixuvw-ms{:d}'.format(i)
recipe.add('cab/casa_fixvis', step,
{
"vis": msname,
"reuse": False,
"outputvis": msname,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Fix UVW coordinates ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, "manage_flags"):
mode = config["manage_flags"]["mode"]
available_flagversions = manflags.get_flags(pipeline, msname)
if mode == "legacy":
version = "caracal_legacy"
if version not in available_flagversions:
caracal.log.info('The file {0:s} does not yet have a flag version called "caracal_legacy". Saving the current FLAG column to "caracal_legacy".'.format(msname))
step = "save-legacy-{0:s}-ms{1:d}".format(wname, i)
manflags.add_cflags(pipeline, recipe, version, msname, cab_name=step)
else:
caracal.log.info('The file {0:s} already has a flag version called "caracal_legacy". Restoring it.'.format(msname))
version = "caracal_legacy"
step = "restore-flags-{0:s}-ms{1:d}".format(wname, i)
manflags.restore_cflags(pipeline, recipe, version,
msname, cab_name=step)
if available_flagversions[-1] != version:
step = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, i)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
msname, cab_name=step)
elif mode == "restore":
version = config["manage_flags"]["version"]
if version == 'auto':
version = '{0:s}_{1:s}_before'.format(pipeline.prefix, wname)
if version in available_flagversions:
step = "restore-flags-{0:s}-ms{1:d}".format(wname, i)
manflags.restore_cflags(pipeline, recipe, version,
msname, cab_name=step)
if available_flagversions[-1] != version:
step = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, i)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
msname, cab_name=step)
else:
caracal.log.error('The flag version {0:s} you asked to restore does not exist for {1:s}.'.format(version, msname))
if version == "caracal_legacy":
caracal.log.error('You may actually want to create that "caracal legacy" flag version with:')
caracal.log.error(' prepare_data: manage_flags: mode: save_legacy_flags')
raise RuntimeError('Flag version conflicts')
if config["clearcal"]:
step = 'clearcal-ms{:d}'.format(i)
fields = set(pipeline.fcal[i] + pipeline.bpcal[i])
recipe.add('cab/casa_clearcal', step,
{
"vis": msname,
"field": ",".join(fields),
"addmodel": config['clearcal']['addmodel']
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Reset MODEL_DATA ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, "specweights"):
specwts = config['specweights']["mode"]
if specwts == "uniform":
step = 'init_ws-ms{:d}'.format(i)
recipe.add('cab/casa_script', step,
{
"vis": msname,
"script": "vis = os.path.join(os.environ['MSDIR'], '{:s}')\n"
"initweights(vis=vis, wtmode='weight', dowtsp=True)".format(msname),
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Adding Spectral weights using MeerKAT noise specs ms={1:s}'.format(step, msname))
elif specwts == "calculate":
_config = config["specweights"]
step = 'calculate_ws-ms{:d}'.format(i)
recipe.add('cab/msutils', step,
{
"msname": msname,
"command": 'estimate_weights',
"stats_data": _config['calculate']['statsfile'],
"weight_columns": _config['calculate']['weightcols'],
"noise_columns": _config['calculate']['noisecols'],
"write_to_ms": _config['calculate']['apply'],
"plot_stats": prefix_msbase + '-noise_weights.png',
},
input=pipeline.input,
output=pipeline.diagnostic_plots,
label='{0:s}:: Adding Spectral weights using MeerKAT noise specs ms={1:s}'.format(step, msname))
elif specwts == "delete":
step = 'delete_ws-ms{:d}'.format(i)
recipe.add('cab/casa_script', step,
{
"vis": msname,
"script": "vis = os.path.join(os.environ['MSDIR'], '{msname:s}') \n"
"colname = '{colname:s}' \n"
"tb.open(vis, nomodify=False) \n"
"try: tb.colnames().index(colname) \n"
"except ValueError: pass \n"
"finally: tb.close(); quit \n"
"tb.open(vis, nomodify=False) \n"
"try: tb.removecols(colname) \n"
"except RuntimeError: pass \n"
"finally: tb.close()".format(msname=msname, colname="WEIGHT_SPECTRUM"),
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: deleting WEIGHT_SPECTRUM if it exists ms={1:s}'.format(step, msname))
else:
raise RuntimeError("Specified specweights [{0:s}] mode is unknown".format(specwts))
| 12,765 | 53.7897 | 183 | py |
caracal | caracal-master/caracal/workers/line_worker.py | # -*- coding: future_fstrings -*-
import sys
import os
import glob
import stimela.dismissable as sdm
import stimela.recipe
import shutil
import itertools
import json
import psutil
import re
import datetime
import numpy as np
import caracal
from caracal.dispatch_crew import utils, noisy
from caracal.workers.utils import manage_flagsets as manflags
from caracal import log
from caracal.workers.utils import remove_output_products
from caracal.workers.utils import image_contsub
from caracal.workers.utils import flag_Uzeros
from casacore.tables import table
from caracal.utils.requires import extras
NAME = 'Process and Image Line Data'
LABEL = 'line'
def get_relative_path(path, pipeline):
"""Returns e.g. cubes/<dir> given output/cubes/<dir>"""
return os.path.relpath(path, pipeline.output)
def add_ms_label(msname, label="mst"):
"""Adds _label to end of MS name, before the extension"""
msbase, ext = os.path.splitext(msname)
return f"{msbase}_{label}{ext}"
@extras("astropy")
def freq_to_vel(filename, reverse):
from astropy.io import fits
C = 2.99792458e+8 # m/s
HI = 1.4204057517667e+9 # Hz
if not os.path.exists(filename):
caracal.log.warn(
'Skipping conversion for {0:s}. File does not exist.'.format(filename))
else:
with fits.open(filename, mode='update') as cube:
headcube = cube[0].header
if 'restfreq' in headcube:
restfreq = float(headcube['restfreq'])
else:
restfreq = HI
# add rest frequency to FITS header
headcube['restfreq'] = restfreq
# convert from frequency to radio velocity
if (headcube['naxis'] > 2) and ('FREQ' in headcube['ctype3']) and not reverse:
headcube['cdelt3'] = -C * float(headcube['cdelt3']) / restfreq
headcube['crval3'] = C * \
(1 - float(headcube['crval3']) / restfreq)
# FITS standard for radio velocity as per
# https://fits.gsfc.nasa.gov/standard40/fits_standard40aa-le.pdf
headcube['ctype3'] = 'VRAD'
if 'cunit3' in headcube:
# delete cunit3 because we adopt the default units = m/s
del headcube['cunit3']
# convert from radio velocity to frequency
elif (headcube['naxis'] > 2) and ('VRAD' in headcube['ctype3']) and (headcube['naxis'] > 2) and reverse:
headcube['cdelt3'] = -restfreq * float(headcube['cdelt3']) / C
headcube['crval3'] = restfreq * \
(1 - float(headcube['crval3']) / C)
headcube['ctype3'] = 'FREQ'
if 'cunit3' in headcube:
# delete cunit3 because we adopt the default units = Hz
del headcube['cunit3']
else:
if not reverse:
caracal.log.warn(
'Skipping conversion for {0:s}. Input is not a cube or not in frequency.'.format(filename))
else:
caracal.log.warn(
'Skipping conversion for {0:s}. Input is not a cube or not in velocity.'.format(filename))
@extras("astropy")
def remove_stokes_axis(filename):
from astropy.io import fits
if not os.path.exists(filename):
caracal.log.warn(
'Skipping Stokes axis removal for {0:s}. File does not exist.'.format(filename))
else:
with fits.open(filename, mode='update') as cube:
headcube = cube[0].header
if (headcube['naxis'] == 4) and (headcube['ctype4'] == 'STOKES'):
caracal.log.info('Working on {}'.format(filename))
cube[0].data = cube[0].data[0]
del headcube['cdelt4']
del headcube['crpix4']
del headcube['crval4']
del headcube['ctype4']
if 'cunit4' in headcube:
del headcube['cunit4']
else:
caracal.log.warn(
'Skipping Stokes axis removal for {0:s}. Input cube has less than 4 axis or the 4th axis type is not "STOKES".'.format(filename))
@extras("astropy")
def fix_specsys_ra(filename, specframe):
from astropy.io import fits
# Reference frame codes below from from http://www.eso.org/~jagonzal/telcal/Juan-Ramon/SDMTables.pdf, Sec. 2.50 and
# FITS header notation from
# https://fits.gsfc.nasa.gov/standard40/fits_standard40aa-le.pdf
specsys3 = {
0: 'LSRD',
1: 'LSRK',
2: 'GALACTOC',
3: 'BARYCENT',
4: 'GEOCENTR',
5: 'TOPOCENT'}[
np.unique(
np.array(specframe))[0]]
if not os.path.exists(filename):
caracal.log.warn(
'Skipping SPECSYS fix for {0:s}. File does not exist.'.format(filename))
else:
with fits.open(filename, mode='update') as cube:
headcube = cube[0].header
if 'specsys' in headcube:
del headcube['specsys']
headcube['specsys3'] = specsys3
if headcube['CRVAL1'] < 0:
headcube['CRVAL1'] += 360.
@extras("astropy")
def make_pb_cube(filename, apply_corr, typ, dish_size, cutoff):
from astropy.io import fits
C = 2.99792458e+8 # m/s
HI = 1.4204057517667e+9 # Hz
if not os.path.exists(filename):
caracal.log.warn(
'Skipping primary beam cube for {0:s}. File does not exist.'.format(filename))
else:
with fits.open(filename) as cube:
headcube = cube[0].header
datacube = np.indices(
(headcube['naxis2'], headcube['naxis1']), dtype=np.float32)
datacube[0] -= (headcube['crpix2'] - 1)
datacube[1] -= (headcube['crpix1'] - 1)
datacube = np.sqrt((datacube**2).sum(axis=0))
datacube.resize((1, datacube.shape[0], datacube.shape[1]))
datacube = np.repeat(datacube,
headcube['naxis3'],
axis=0) * np.abs(headcube['cdelt1'])
cdelt3 = float(headcube['cdelt3'])
crval3 = float(headcube['crval3'])
# Convert radio velocity to frequency if required
if 'VRAD' in headcube['ctype3']:
if 'restfreq' in headcube:
restfreq = float(headcube['restfreq'])
else:
restfreq = HI
cdelt3 = - restfreq * cdelt3 / C
crval3 = restfreq * (1 - crval3 / C)
freq = (crval3 + cdelt3 * (np.arange(headcube['naxis3'], dtype=np.float32) -
headcube['crpix3'] + 1))
if typ == 'gauss':
sigma_pb = 17.52 / (freq / 1e+9) / dish_size / 2.355
sigma_pb.resize((sigma_pb.shape[0], 1, 1))
datacube = np.exp(-datacube**2 / 2 / sigma_pb**2)
elif typ == 'mauch':
FWHM_pb = (57.5 / 60) * (freq / 1.5e9)**-1
FWHM_pb.resize((FWHM_pb.shape[0], 1, 1))
datacube = (np.cos(1.189 * np.pi * (datacube / FWHM_pb)) / (
1 - 4 * (1.189 * datacube / FWHM_pb)**2))**2
datacube[datacube < cutoff] = np.nan
if headcube['naxis'] == 4:
datacube = np.expand_dims(datacube, 0)
fits.writeto(filename.replace('image.fits', 'pb.fits'),
datacube, header=headcube, overwrite=True)
if apply_corr:
fits.writeto(filename.replace('image.fits', 'pb_corr.fits'),
cube[0].data / datacube, header=headcube, overwrite=True) # Applying the primary beam correction
caracal.log.info('Created primary beam cube FITS {0:s}'.format(
filename.replace('image.fits', 'pb.fits')))
@extras("astropy")
def calc_rms(filename, linemaskname):
from astropy.io import fits
if linemaskname is None:
if not os.path.exists(filename):
caracal.log.info(
'Noise not determined in cube for {0:s}. File does not exist.'.format(filename))
else:
with fits.open(filename) as cube:
datacube = cube[0].data
y = datacube[~np.isnan(datacube)]
return np.sqrt(np.sum(y * y, dtype=np.float64) / y.size)
else:
with fits.open(filename) as cube:
datacube = cube[0].data
with fits.open(linemaskname) as mask:
datamask = mask[0].data
# select channels
selchans = datamask.sum(axis=(2, 3)) > 0
newcube = datacube[selchans]
newmask = datamask[selchans]
y2 = newcube[newmask == 0]
return np.sqrt(np.nansum(y2 * y2, dtype=np.float64) / y2.size)
@extras(packages="astropy")
def worker(pipeline, recipe, config):
import astropy
from astropy.io import fits
# Modules useful to calculate common barycentric frequency grid
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy.coordinates import EarthLocation
from astropy import constants
import astropy.units as units
wname = pipeline.CURRENT_WORKER
flags_before_worker = '{0:s}_{1:s}_before'.format(pipeline.prefix, wname)
flags_after_worker = '{0:s}_{1:s}_after'.format(pipeline.prefix, wname)
flag_main_ms = (pipeline.enable_task(config, 'flag_u_zeros') or pipeline.enable_task(config, 'sunblocker')) and not config['sunblocker']['use_mstransform']
flag_mst_ms = (pipeline.enable_task(config, 'sunblocker') and config['sunblocker']['use_mstransform']) or (pipeline.enable_task(config, 'flag_u_zeros') and config['flag_u_zeros']['use_mstransform']) or pipeline.enable_task(config, 'flag_mst_errors')
rewind_main_ms = config['rewind_flags']["enable"] and (config['rewind_flags']['mode'] == 'reset_worker' or config['rewind_flags']["version"] != 'null')
rewind_mst_ms = config['rewind_flags']["enable"] and (config['rewind_flags']['mode'] == 'reset_worker' or config['rewind_flags']["mstransform_version"] != 'null')
label = config['label_in']
line_name = config['line_name']
if label != '':
flabel = label
else:
flabel = label
all_targets, all_msfiles, ms_dict = pipeline.get_target_mss(flabel)
RA, Dec = [], []
firstchanfreq_all, chanw_all, lastchanfreq_all = [], [], []
restfreq = config['restfreq']
# distributed deconvolution settings
ncpu = config['ncpu']
if ncpu == 0:
ncpu = psutil.cpu_count()
else:
ncpu = min(ncpu, psutil.cpu_count())
nrdeconvsubimg = ncpu if config['make_cube']['wscl_nrdeconvsubimg'] == 0 else config['make_cube']['wscl_nrdeconvsubimg']
if nrdeconvsubimg == 1:
wscl_parallel_deconv = None
else:
wscl_parallel_deconv = int(np.ceil(max(config['make_cube']['npix']) / np.sqrt(nrdeconvsubimg)))
for i, msfile in enumerate(all_msfiles):
# Update pipeline attributes (useful if, e.g., channel averaging was
# performed by the split_data worker)
msinfo = pipeline.get_msinfo(msfile)
spw = msinfo['SPW']['NUM_CHAN']
caracal.log.info('MS #{0:d}: {1:s}'.format(i, msfile))
caracal.log.info(' {0:d} spectral windows, with NCHAN={1:s}'.format(len(spw), ','.join(map(str, spw))))
# Get first chan, last chan, chan width
chfr = msinfo['SPW']['CHAN_FREQ']
# To be done: add user selected spw
firstchanfreq = [ss[0] for ss in chfr]
lastchanfreq = [ss[-1] for ss in chfr]
chanwidth = [(ss[-1] - ss[0]) / (len(ss) - 1) for ss in chfr]
firstchanfreq_all.append(firstchanfreq), chanw_all.append(
chanwidth), lastchanfreq_all.append(lastchanfreq)
caracal.log.info(' CHAN_FREQ from {0:s} Hz to {1:s} Hz with average channel width of {2:s} Hz'.format(
','.join(map(str, firstchanfreq)), ','.join(map(str, lastchanfreq)), ','.join(map(str, chanwidth))))
tinfo = msinfo['FIELD']
targetpos = tinfo['REFERENCE_DIR']
while len(targetpos) == 1:
targetpos = targetpos[0]
tRA = targetpos[0] / np.pi * 180.
tDec = targetpos[1] / np.pi * 180.
RA.append(tRA)
Dec.append(tDec)
caracal.log.info(' Target RA, Dec for Doppler correction: {0:.3f} deg, {1:.3f} deg'.format(RA[i], Dec[i]))
# Find common barycentric frequency grid for all input .MS, or set it as
# requested in the config file
if pipeline.enable_task(config, 'mstransform') and pipeline.enable_task(config['mstransform'],
'doppler') and config['mstransform']['doppler']['changrid'] == 'auto':
firstchanfreq = list(itertools.chain.from_iterable(firstchanfreq_all))
chanw = list(itertools.chain.from_iterable(chanw_all))
lastchanfreq = list(itertools.chain.from_iterable(lastchanfreq_all))
teldict = {
'meerkat': [21.4430, -30.7130],
'gmrt': [73.9723, 19.1174],
'vla': [-107.6183633, 34.0783584],
'wsrt': [52.908829698, 6.601997592],
'atca': [-30.307665436, 149.550164466],
'askap': [116.5333, -16.9833],
}
tellocation = teldict[config['mstransform']['doppler']['telescope']]
telloc = EarthLocation.from_geodetic(tellocation[0], tellocation[1])
firstchanfreq_dopp, chanw_dopp, lastchanfreq_dopp = firstchanfreq, chanw, lastchanfreq
corr_order = False
if len(chanw) > 1:
if np.max(chanw) > 0 and np.min(chanw) < 0:
corr_order = True
for i, msfile in enumerate(all_msfiles):
msinfo = '{0:s}/{1:s}-obsinfo.txt'.format(pipeline.msdir, os.path.splitext(msfile)[0])
with open(msinfo, 'r') as searchfile:
for longdatexp in searchfile:
if "Observed from" in longdatexp:
dates = longdatexp
matches = re.findall(
r'(\d{2}[- ](\d{2}|January|Jan|February|Feb|March|Mar|April|Apr|May|May|June|Jun|July|Jul|August|Aug|September|Sep|October|Oct|November|Nov|December|Dec)[\- ]\d{2,4})',
dates)
obsstartdate = str(matches[0][0])
obsdate = datetime.datetime.strptime(
obsstartdate, '%d-%b-%Y').strftime('%Y-%m-%d')
targetpos = SkyCoord(
RA[i], Dec[i], frame='icrs', unit='deg')
v = targetpos.radial_velocity_correction(
kind='barycentric', obstime=Time(obsdate), location=telloc).to('km/s')
corr = np.sqrt((constants.c - v) / (constants.c + v))
if corr_order:
if chanw_dopp[0] > 0.:
firstchanfreq_dopp[i], chanw_dopp[i], lastchanfreq_dopp[i] = lastchanfreq_dopp[i] * \
corr, chanw_dopp[i] * corr, firstchanfreq_dopp[i] * corr
else:
firstchanfreq_dopp[i], chanw_dopp[i], lastchanfreq_dopp[i] = firstchanfreq_dopp[i] * \
corr, chanw_dopp[i] * corr, lastchanfreq_dopp[i] * corr
else:
firstchanfreq_dopp[i], chanw_dopp[i], lastchanfreq_dopp[i] = firstchanfreq_dopp[i] * \
corr, chanw_dopp[i] * corr, lastchanfreq_dopp[i] * corr # Hz, Hz, Hz
# WARNING: the following line assumes a single SPW for the line data
# being processed by this worker!
if np.min(chanw_dopp) < 0:
comfreq0, comfreql, comchanw = np.min(firstchanfreq_dopp), np.max(
lastchanfreq_dopp), -1 * np.max(np.abs(chanw_dopp))
# safety measure to avoid wrong Doppler settings due to change of
# Doppler correction during a day
comfreq0 += comchanw
# safety measure to avoid wrong Doppler settings due to change of
# Doppler correction during a day
comfreql -= comchanw
else:
comfreq0, comfreql, comchanw = np.max(firstchanfreq_dopp), np.min(
lastchanfreq_dopp), np.max(chanw_dopp)
# safety measure to avoid wrong Doppler settings due to change of
# Doppler correction during a day
comfreq0 += comchanw
# safety measure to avoid wrong Doppler settings due to change of
# Doppler correction during a day
comfreql -= comchanw
nchan_dopp = int(np.floor(((comfreql - comfreq0) / comchanw))) + 1
comfreq0 = '{0:.3f}Hz'.format(comfreq0)
comchanw = '{0:.3f}Hz'.format(comchanw)
caracal.log.info(
'Calculated common Doppler-corrected channel grid for all input .MS: {0:d} channels starting at {1:s} and with channel width {2:s}.'.format(
nchan_dopp, comfreq0, comchanw))
if pipeline.enable_task(config, 'make_cube') and config['make_cube']['image_with'] == 'wsclean' and corr_order:
caracal.log.error('wsclean requires a consistent ordering of the frequency axis across multiple MSs')
caracal.log.error('(all increasing or all decreasing). Use casa_image if this is not the case.')
raise caracal.BadDataError("inconsistent frequency axis ordering across MSs")
elif pipeline.enable_task(config, 'mstransform') and pipeline.enable_task(config['mstransform'], 'doppler') and config['mstransform']['doppler']['changrid'] != 'auto':
if len(config['mstransform']['doppler']['changrid'].split(',')) != 3:
caracal.log.error(
'Incorrect format for mstransform:doppler:changrid in the .yml config file.')
caracal.log.error(
'Current setting is mstransform:doppler:changrid:"{0:s}"'.format(
config['mstransform']['doppler']['changrid']))
caracal.log.error(
'Expected "nchan,chan0,chanw" (note the commas) where nchan is an integer, and chan0 and chanw must include units appropriate for the chosen mstransform:mode')
raise caracal.ConfigurationError("can't parse mstransform:doppler:changrid setting")
nchan_dopp, comfreq0, comchanw = config['mstransform']['doppler']['changrid'].split(
',')
nchan_dopp = int(nchan_dopp)
caracal.log.info(
'Set requested Doppler-corrected channel grid for all input .MS: {0:d} channels starting at {1:s} and with channel width {2:s}.'.format(
nchan_dopp, comfreq0, comchanw))
elif pipeline.enable_task(config, 'mstransform'):
nchan_dopp, comfreq0, comchanw = None, None, None
for i, msname in enumerate(all_msfiles):
# Write/rewind flag versions only if flagging tasks are being
# executed on these .MS files, or if the user asks to rewind flags
if flag_main_ms or rewind_main_ms:
available_flagversions = manflags.get_flags(pipeline, msname)
if rewind_main_ms:
if config['rewind_flags']['mode'] == 'reset_worker':
version = flags_before_worker
stop_if_missing = False
elif config['rewind_flags']['mode'] == 'rewind_to_version':
version = config['rewind_flags']['version']
if version == 'auto':
version = flags_before_worker
stop_if_missing = True
if version in available_flagversions:
if flags_before_worker in available_flagversions and available_flagversions.index(flags_before_worker) < available_flagversions.index(version) and not config['overwrite_flagvers']:
manflags.conflict('rewind_too_little', pipeline, wname, msname, config, flags_before_worker, flags_after_worker)
substep = 'version-{0:s}-ms{1:d}'.format(version, i)
manflags.restore_cflags(pipeline, recipe, version, msname, cab_name=substep)
if version != available_flagversions[-1]:
substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, i)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
msname, cab_name=substep)
if version != flags_before_worker and flag_main_ms:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
elif stop_if_missing:
manflags.conflict('rewind_to_non_existing', pipeline, wname, msname, config, flags_before_worker, flags_after_worker)
elif flag_main_ms:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
else:
if flags_before_worker in available_flagversions and not config['overwrite_flagvers']:
manflags.conflict('would_overwrite_bw', pipeline, wname, msname, config, flags_before_worker, flags_after_worker)
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
if pipeline.enable_task(config, 'subtractmodelcol'):
# Check if a model subtraction has already been done
t = table('{0:s}/{1:s}'.format(pipeline.msdir, msname), readonly=False)
try:
nModelSub = t.getcolkeyword('CORRECTED_DATA', 'modelSub')
except RuntimeError:
nModelSub = 0
if (nModelSub <= -1) & (config['subtractmodelcol']['force'] == False):
caracal.log.error(f'The model has been subtracted {np.abs(nModelSub)} times.')
caracal.log.error('Exiting CARACal.')
raise caracal.PlayingWithFire("I am very confident you shouldn't be doing this. If you know better, use the 'force' option.")
if (nModelSub <= -1) & (config['subtractmodelcol']['force']):
caracal.log.warn(f'The model has been subtracted {np.abs(nModelSub)} times.')
caracal.log.warn('You have chosen to force another model subtraction.')
caracal.log.warn('God speed!')
step = 'modelsub-ms{:d}'.format(i)
recipe.add('cab/msutils', step,
{
"command": 'sumcols',
"msname": msname,
"subtract": True,
"col1": 'CORRECTED_DATA',
"col2": 'MODEL_DATA',
"column": 'CORRECTED_DATA'
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Subtract model column'.format(step))
t.putcolkeyword('CORRECTED_DATA', 'modelSub', nModelSub - 1)
t.close()
if pipeline.enable_task(config, 'addmodelcol'):
# Check if a model addition has already been done
t = table('{0:s}/{1:s}'.format(pipeline.msdir, msname), readonly=False)
try:
nModelSub = t.getcolkeyword('CORRECTED_DATA', 'modelSub')
except RuntimeError:
nModelSub = 0
if (nModelSub >= 0) & (config['addmodelcol']['force'] == False):
caracal.log.error(f'The model has been added {np.abs(nModelSub)} times.')
caracal.log.error('Exiting CARACal.')
raise caracal.PlayingWithFire("I am very confident you shouldn't be doing this. If you know better, use the 'force' option.")
if (nModelSub >= 0) & (config['addmodelcol']['force']):
caracal.log.warn(f'The model has been added {np.abs(nModelSub)} times.')
caracal.log.warn('You have chosen to force another model addition.')
caracal.log.warn('God speed!')
step = 'modeladd-ms{:d}'.format(i)
recipe.add('cab/msutils', step,
{
"command": 'sumcols',
"msname": msname,
"col1": 'CORRECTED_DATA',
"col2": 'MODEL_DATA',
"column": 'CORRECTED_DATA'
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Add model column'.format(step))
t.putcolkeyword('CORRECTED_DATA', 'modelSub', nModelSub + 1)
t.close()
msname_mst = add_ms_label(msname, "mst")
msname_mst_base = os.path.splitext(msname_mst)[0]
flagv = msname_mst + ".flagversions"
summary_file = f'{msname_mst_base}-summary.json'
obsinfo_file = f'{msname_mst_base}-obsinfo.txt'
if pipeline.enable_task(config, 'mstransform'):
# Set UVLIN fit channel range
if pipeline.enable_task(config['mstransform'], 'uvlin') and config['mstransform']['uvlin']['exclude_known_sources']:
C = 2.99792458e+5 # km/s
chanfreqs = np.arange(firstchanfreq_all[i][0], lastchanfreq_all[i][0] + chanw_all[i][0], chanw_all[i][0])
chanids = np.arange(chanfreqs.shape[0])
linechans = chanids < 0 # Array of False's used to build the fitspw settings
line_id, line_ra, line_dec, line_vmin, line_vmax, line_flux = np.loadtxt('{0:s}/{1:s}'.format(pipeline.input, config['mstransform']['uvlin']['known_sources_cat']), dtype='str', unpack=True)
line_ra = astropy.coordinates.Angle(line_ra, unit='hour').degree
line_dec = astropy.coordinates.Angle(line_dec, unit='degree').degree
line_flux = line_flux.astype(float)
line_fmax = (units.Quantity(config['restfreq']) / ((line_vmin.astype(float) - config['mstransform']['uvlin']['known_sources_dv']) / C + 1)).to_value(units.hertz)
line_fmin = (units.Quantity(config['restfreq']) / ((line_vmax.astype(float) + config['mstransform']['uvlin']['known_sources_dv']) / C + 1)).to_value(units.hertz)
distance = 180 / np.pi * np.arccos(np.sin(Dec[i] / 180 * np.pi) * np.sin(line_dec / 180 * np.pi) +
np.cos(Dec[i] / 180 * np.pi) * np.cos(line_dec / 180 * np.pi) * np.cos((RA[i] - line_ra) / 180 * np.pi))
# Select line sources:
# within the search radius;
# above the line flux threashold (not PB-corrected);
# and (at least partly) within the MS frequency range.
line_selected = (distance < config['mstransform']['uvlin']['known_sources_radius']) *\
(line_flux >= config['mstransform']['uvlin']['known_sources_flux']) *\
((line_fmin >= chanfreqs.min()) * (line_fmin <= chanfreqs.max()) +
(line_fmax >= chanfreqs.min()) * (line_fmax <= chanfreqs.max()))
line_id, line_fmin, line_fmax = line_id[line_selected], line_fmin[line_selected], line_fmax[line_selected]
line_chanmin, line_chanmax = [], []
caracal.log.info('Excluding the following line sources and channel intervals from the UVLIN fit:')
for ll in range(line_id.shape[0]):
if line_fmin[ll] < chanfreqs[0]:
line_chanmin.append(chanids[0])
else:
line_chanmin.append(chanids[chanfreqs < line_fmin[ll]].max())
if line_fmax[ll] > chanfreqs[-1]:
line_chanmax.append(chanids[-1])
else:
line_chanmax.append(chanids[chanfreqs > line_fmax[ll]].min())
caracal.log.info(' {0:20s}: {1:5d} - {2:5d}'.format(line_id[ll], line_chanmin[ll], line_chanmax[ll]))
linechans += (chanids >= line_chanmin[ll]) * (chanids <= line_chanmax[ll])
autofitchans = ~linechans
if config['mstransform']['uvlin']['fitspw']:
caracal.log.info('Combining the above channel intervals with the user input {0:s}'.format(config['mstransform']['uvlin']['fitspw']))
userfitchans = [qq.split(';') for qq in config['mstransform']['uvlin']['fitspw'].split(':')[1::2]]
while len(userfitchans) > 1:
userfitchans[0] = userfitchans[0] + userfitchans[1]
del (userfitchans[1])
userfitchans = [list(map(int, qq.split('~'))) for qq in userfitchans[0]]
userfitchans = np.array([(chanids >= qq[0]) * (chanids <= qq[1]) for qq in userfitchans]).sum(axis=0).astype('bool')
autofitchans *= userfitchans
fitspw = '0~' if autofitchans[0] else ''
for cc in chanids[1:]:
if not autofitchans[cc - 1] and autofitchans[cc] and (not fitspw or fitspw[-1] == ';'):
fitspw += '{0:d}~'.format(cc)
elif autofitchans[cc - 1] and not autofitchans[cc]:
fitspw += '{0:d};'.format(cc - 1)
if not fitspw:
raise caracal.BadDataError('No channels available for UVLIN fit.')
elif fitspw[-1] == '~':
fitspw += '{0:d}'.format(chanids[-1])
elif fitspw[-1] == ';':
fitspw = fitspw[:-1]
fitspw = '0:{0:s}'.format(fitspw)
caracal.log.info('The UVLIN fit will be executed on the channels {0:s}'.format(fitspw))
else:
fitspw = config['mstransform']['uvlin']['fitspw']
# If the output of this run of mstransform exists, delete it first
remove_output_products((msname_mst, flagv, summary_file, obsinfo_file), directory=pipeline.msdir, log=log)
col = config['mstransform']['col']
step = 'mstransform-ms{:d}'.format(i)
recipe.add('cab/casa_mstransform',
step,
{"msname": msname,
"outputvis": msname_mst,
"regridms": pipeline.enable_task(config['mstransform'], 'doppler'),
"mode": config['mstransform']['doppler']['mode'],
"nchan": sdm.dismissable(nchan_dopp),
"start": sdm.dismissable(comfreq0),
"width": sdm.dismissable(comchanw),
"interpolation": 'nearest',
"datacolumn": col,
"restfreq": restfreq,
"outframe": config['mstransform']['doppler']['frame'],
"veltype": config['mstransform']['doppler']['veltype'],
"douvcontsub": pipeline.enable_task(config['mstransform'], 'uvlin'),
"fitspw": sdm.dismissable(fitspw),
"fitorder": config['mstransform']['uvlin']['fitorder'],
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Doppler tracking corrections'.format(step))
substep = 'save-{0:s}-ms{1:d}'.format('caracal_legacy', i)
manflags.add_cflags(pipeline, recipe, 'caracal_legacy', msname_mst,
cab_name=substep, overwrite=False)
if config['mstransform']['obsinfo']:
step = 'listobs-ms{:d}'.format(i)
recipe.add('cab/casa_listobs',
step,
{"vis": msname_mst,
"listfile": '{0:s}-obsinfo.txt:msfile'.format(msname_mst_base),
"overwrite": True,
},
input=pipeline.input,
output=pipeline.obsinfo,
label='{0:s}:: Get observation information ms={1:s}'.format(step,
msname_mst))
step = 'summary_json-ms{:d}'.format(i)
recipe.add(
'cab/msutils',
step,
{
"msname": msname_mst,
"command": 'summary',
"display": False,
"outfile": '{0:s}-summary.json:msfile'.format(msname_mst_base),
},
input=pipeline.input,
output=pipeline.obsinfo,
label='{0:s}:: Get observation information as a json file ms={1:s}'.format(
step,
msname_mst))
recipe.run()
recipe.jobs = []
if os.path.exists('{0:s}/{1:s}'.format(pipeline.msdir, msname_mst)):
mst_exist = True
else:
mst_exist = False
# Write/rewind flag versions of the mst .MS files only if they have just
# been created, their FLAG is being changed, or the user asks to rewind flags
if mst_exist and (pipeline.enable_task(config, 'mstransform') or flag_mst_ms or rewind_mst_ms):
available_flagversions = manflags.get_flags(pipeline, msname_mst)
if rewind_mst_ms:
if config['rewind_flags']['mode'] == 'reset_worker':
version = flags_before_worker
stop_if_missing = False
elif config['rewind_flags']['mode'] == 'rewind_to_version':
version = config['rewind_flags']['mstransform_version']
if version == 'auto':
version = flags_before_worker
stop_if_missing = True
if version in available_flagversions:
if flags_before_worker in available_flagversions and available_flagversions.index(flags_before_worker) < available_flagversions.index(version) and not config['overwrite_flagvers']:
manflags.conflict('rewind_too_little', pipeline, wname, msname_mst, config, flags_before_worker, flags_after_worker, read_version='mstransform_version')
substep = 'version_{0:s}_ms{1:d}'.format(version, i)
manflags.restore_cflags(pipeline, recipe, version, msname_mst, cab_name=substep)
if version != available_flagversions[-1]:
substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, i)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
msname_mst, cab_name=substep)
if version != flags_before_worker and flag_mst_ms:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname_mst, cab_name=substep, overwrite=config['overwrite_flagvers'])
elif stop_if_missing:
manflags.conflict('rewind_to_non_existing', pipeline, wname, msname_mst, config, flags_before_worker, flags_after_worker, read_version='mstransform_version')
elif flag_mst_ms:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname_mst, cab_name=substep, overwrite=config['overwrite_flagvers'])
else:
if flags_before_worker in available_flagversions and not config['overwrite_flagvers']:
manflags.conflict('would_overwrite_bw', pipeline, wname, msname_mst, config, flags_before_worker, flags_after_worker, read_version='mstransform_version')
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname_mst, cab_name=substep, overwrite=config['overwrite_flagvers'])
if pipeline.enable_task(config, 'flag_mst_errors'):
step = 'flag_mst_errors-ms{0:d}'.format(i)
recipe.add('cab/autoflagger',
step,
{"msname": msname_mst,
"column": 'DATA',
"strategy": config['flag_mst_errors']['strategy'],
"indirect-read": True if config['flag_mst_errors']['readmode'] == 'indirect' else False,
"memory-read": True if config['flag_mst_errors']['readmode'] == 'memory' else False,
"auto-read-mode": True if config['flag_mst_errors']['readmode'] == 'auto' else False,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: file ms={1:s}'.format(step, msname_mst))
recipe.run()
recipe.jobs = []
if pipeline.enable_task(config, 'flag_u_zeros'):
uZeros = flag_Uzeros.UzeroFlagger(config)
if config['flag_u_zeros']['use_mstransform']:
msname_Flag = msname_mst
else:
msname_Flag = msname
uZeros.run_flagUzeros(pipeline, all_targets, msname_Flag)
if pipeline.enable_task(config, 'sunblocker'):
if config['sunblocker']['use_mstransform']:
msnamesb = msname_mst
else:
msnamesb = msname
step = 'sunblocker-ms{0:d}'.format(i)
recipe.add("cab/sunblocker", step,
{
"command": "phazer",
"inset": msnamesb,
"outset": msnamesb,
"imsize": config['sunblocker']['imsize'],
"cell": config['sunblocker']['cell'],
"pol": 'i',
"threshmode": 'mad',
"threshold": config['sunblocker']['thr'],
"mode": 'all',
"radrange": 0,
"angle": 0,
"show": pipeline.prefix + '.sunblocker.svg',
"verb": True,
"dryrun": False,
"uvmax": config['sunblocker']['uvmax'],
"uvmin": config['sunblocker']['uvmin'],
"vampirisms": config['sunblocker']['vampirisms'],
"flagonlyday": config['sunblocker']['flagonlyday'],
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Block out sun'.format(step))
if flag_main_ms:
substep = 'save-{0:s}-ms{1:d}'.format(flags_after_worker, i)
manflags.add_cflags(pipeline, recipe, flags_after_worker, msname,
cab_name=substep, overwrite=config['overwrite_flagvers'])
if mst_exist and flag_mst_ms:
substep = 'save-{0:s}-mst{1:d}'.format(flags_after_worker, i)
manflags.add_cflags(pipeline, recipe, flags_after_worker, msname_mst,
cab_name=substep, overwrite=config['overwrite_flagvers'])
recipe.run()
recipe.jobs = []
# Move the sunblocker plots to the diagnostic_plots
if pipeline.enable_task(config, 'sunblocker'):
sunblocker_plots = glob.glob(
"{0:s}/*_{1:s}.sunblocker.svg".format(pipeline.output, pipeline.prefix))
for plot in sunblocker_plots:
shutil.copyfile(plot, '{0:s}/{1:s}'.format(pipeline.diagnostic_plots, os.path.basename(plot)))
os.remove(plot)
if pipeline.enable_task(config, 'predict_noise'):
tsyseff = config['predict_noise']['tsyseff']
diam = config['predict_noise']['diam']
kB = 1380.6 # Boltzmann constant (Jy m^2 / K)
Aant = np.pi * (diam / 2)**2 # collecting area of 1 antenna (m^2)
SEFD = 2 * kB * tsyseff / Aant # system equivalent flux density (Jy)
caracal.log.info('Predicting natural noise of line cubes (Stokes I, single channel of MS file) for Tsys/eff = {0:.1f} K, diam = {1:.1f} m -> SEFD = {2:.1f} Jy'.format(tsyseff, diam, SEFD))
for tt, target in enumerate(all_targets):
if config['make_cube']['use_mstransform']:
mslist = [add_ms_label(ms, "mst") for ms in ms_dict[target]]
else:
mslist = ms_dict[target]
caracal.log.info(' Target #{0:d}: {1:}, files {2:}'.format(tt, target, mslist))
noisy.PredictNoise(['{0:s}/{1:s}'.format(pipeline.msdir, mm) for mm in mslist], str(tsyseff), diam, target, verbose=2)
if pipeline.enable_task(config, 'make_cube') and config['make_cube']['image_with'] == 'wsclean':
nchans_all, specframe_all = [], []
label = config['label_in']
if label != '':
flabel = label
else:
flabel = label
caracal.log.info('Collecting spectral info on MS files being imaged')
if config['make_cube']['use_mstransform']:
for i, msfile in enumerate(all_msfiles):
# Get channelisation of _mst.ms file
msbase, ext = os.path.splitext(msfile)
msinfo = pipeline.get_msinfo(f"{msbase}_mst{ext}")
spw = msinfo['SPW']['NUM_CHAN']
nchans = spw
nchans_all.append(nchans)
caracal.log.info('MS #{0:d}: {1:s}'.format(i, msfile.replace('.ms', '_mst.ms')))
caracal.log.info(' {0:d} spectral windows, with NCHAN={1:s}'.format(
len(spw), ','.join(map(str, spw))))
# Get first chan, last chan, chan width
chfr = msinfo['SPW']['CHAN_FREQ']
firstchanfreq = [ss[0] for ss in chfr]
lastchanfreq = [ss[-1] for ss in chfr]
chanwidth = [(ss[-1] - ss[0]) / (len(ss) - 1) for ss in chfr]
caracal.log.info(' CHAN_FREQ from {0:s} Hz to {1:s} Hz with average channel width of {2:s} Hz'.format(
','.join(map(str, firstchanfreq)), ','.join(map(str, lastchanfreq)), ','.join(map(str, chanwidth))))
# Get spectral reference frame
specframe = msinfo['SPW']['MEAS_FREQ_REF']
specframe_all.append(specframe)
caracal.log.info(' The spectral reference frame is {0:}'.format(specframe))
else:
for i, msfile in enumerate(all_msfiles):
msinfo = pipeline.get_msinfo(msfile)
spw = msinfo['SPW']['NUM_CHAN']
nchans = spw
nchans_all.append(nchans)
caracal.log.info('MS #{0:d}: {1:s}'.format(i, msfile))
caracal.log.info(' {0:d} spectral windows, with NCHAN={1:s}'.format(
len(spw), ','.join(map(str, spw))))
specframe = msinfo['SPW']['MEAS_FREQ_REF']
specframe_all.append(specframe)
caracal.log.info(
' The spectral reference frame is {0:}'.format(specframe))
spwid = config['make_cube']['spwid']
nchans = config['make_cube']['nchans']
if nchans == 0:
# Assuming user wants same spw for all msfiles and they have same
# number of channels
nchans = nchans_all[0][spwid]
# Assuming user wants same spw for all msfiles and they have same
# specframe
specframe_all = [ss[spwid] for ss in specframe_all][0]
firstchan = config['make_cube']['firstchan']
binchans = config['make_cube']['binchans']
channelrange = [firstchan, firstchan + nchans * binchans]
npix = config['make_cube']['npix']
if len(npix) == 1:
npix = [npix[0], npix[0]]
# Construct weight specification
if config['make_cube']['weight'] == 'briggs':
weight = 'briggs {0:.3f}'.format(
config['make_cube']['robust'])
else:
weight = config['make_cube']['weight']
wscl_niter = config['make_cube']['wscl_sofia_niter']
wscl_tol = config['make_cube']['wscl_sofia_converge']
line_image_opts = {
"weight": weight,
"taper-gaussian": str(config['make_cube']['taper']),
"pol": config['make_cube']['stokes'],
"npix": npix,
"padding": config['make_cube']['padding'],
"scale": config['make_cube']['cell'],
"channelsout": nchans,
"channelrange": channelrange,
"niter": config['make_cube']['niter'] if not config['make_cube']['wscl_onlypsf'] else 1,
"gain": config['make_cube']['gain'],
"mgain": config['make_cube']['wscl_mgain'],
"auto-threshold": config['make_cube']['wscl_auto_thr'],
"multiscale": config['make_cube']['wscl_multiscale'],
"multiscale-scale-bias": config['make_cube']['wscl_multiscale_bias'],
"parallel-deconvolution": sdm.dismissable(wscl_parallel_deconv),
"no-update-model-required": config['make_cube']['wscl_noupdatemod'],
"make-psf-only": config['make_cube']['wscl_onlypsf'],
}
if config['make_cube']['wscl_multiscale_scales']:
line_image_opts.update({"multiscale-scales": list(map(int, config['make_cube']['wscl_multiscale_scales'].split(',')))})
if config['make_cube']['wscl_beam'] != [0, 0, 0]:
line_image_opts.update({"beamshape": config['make_cube']['wscl_beam']})
for tt, target in enumerate(all_targets):
caracal.log.info('Starting to make line cube for target {0:}'.format(target))
if config['make_cube']['use_mstransform']:
mslist = [add_ms_label(ms, "mst") for ms in ms_dict[target]]
else:
mslist = ms_dict[target]
field = utils.filter_name(target)
line_clean_mask_file = None
rms_values = []
if 'fitsmask' in line_image_opts:
del (line_image_opts['fitsmask'])
if 'auto-mask' in line_image_opts:
del (line_image_opts['auto-mask'])
for j in range(1, wscl_niter + 1):
cube_path = "{0:s}/cube_{1:d}".format(
pipeline.cubes, j)
if not os.path.exists(cube_path):
os.mkdir(cube_path)
cube_dir = '{0:s}/cube_{1:d}'.format(
get_relative_path(pipeline.cubes, pipeline), j)
line_image_opts.update({
"msname": mslist,
"prefix": '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}'.format(
cube_dir, pipeline.prefix, field, line_name, j)
})
if j == 1:
own_line_clean_mask = config['make_cube']['wscl_user_clean_mask']
if own_line_clean_mask:
'''
MAKE HDR FILE FOR REGRIDDING THE USER SUPPLIED MASK
'''
doProj = False
doSpec = False
C = 2.99792458e+8 # m/s
femit = [r.strip() for r in re.split('([-+]?\d+\.\d+)|([-+]?\d+)', restfreq.strip()) if r is not None and r.strip() != '']
femit = (eval(femit[0]) * units.Unit(femit[1])).to(units.Hz).value # Hz
t = summary_file if config['make_cube']['use_mstransform'] else summary_file.replace('_mst', '')
with open('{}/{}'.format(pipeline.msdir, t)) as f:
obsDict = json.load(f)
raTarget = np.round(obsDict['FIELD']['REFERENCE_DIR'][0][0][0] / np.pi * 180, 5)
decTarget = np.round(obsDict['FIELD']['REFERENCE_DIR'][0][0][1] / np.pi * 180, 5)
cubeHeight = config['make_cube']['npix'][0]
cubeWidth = config['make_cube']['npix'][1] if len(config['make_cube']['npix']) == 2 else cubeHeight
preGridMask = own_line_clean_mask
caracal.log.info('+++++++++++++++++++++++++++++')
caracal.log.info('Checking Mask dimensions')
caracal.log.info('doProj = {}'.format(doProj))
caracal.log.info('RA = {}'.format(raTarget))
caracal.log.info('Dec = {}'.format(decTarget))
caracal.log.info('CubeHeight (px) = {}'.format(cubeHeight))
caracal.log.info('CubeWidht (px) = {}'.format(cubeWidth))
postGridMask = preGridMask.replace('.fits', '_{}_regrid.fits'.format(pipeline.prefix))
with fits.open('{}/{}'.format(pipeline.masking, preGridMask)) as hdul:
if hdul[0].header["CRVAL1"] < 0:
hdul[0].header["CRVAL1"] += 360.
caracal.log.info('MaskRA = {}'.format(hdul[0].header["CRVAL1"]))
caracal.log.info('MaskDec = {}'.format(hdul[0].header["CRVAL2"]))
caracal.log.info('MaskWidth = {}'.format(hdul[0].header["NAXIS1"]))
caracal.log.info('MaskHeight = {}'.format(hdul[0].header["NAXIS2"]))
if hdul[0].header["NAXIS1"] != cubeWidth:
caracal.log.info('NAXIS1')
doProj = True
if hdul[0].header["NAXIS2"] != cubeHeight:
caracal.log.info('NAXIS2')
doProj = True
if np.round(hdul[0].header["CRVAL1"], 5) != np.round(raTarget, 5):
caracal.log.info('CRVAL1')
doProj = True
if np.round(hdul[0].header["CRVAL2"], 5) != np.round(decTarget, 5):
doProj = True
caracal.log.info('CRVAL2')
if int(hdul[0].header['NAXIS3']) > int(nchans):
doSpec = True
else:
dpSpec = None # this should work in both a request for a subset, and if the cube is to be binned.
if 'FREQ' in hdul[0].header['CTYPE3']:
cdelt = round(hdul[0].header['CDELT3'], 2)
else:
cdelt = round(hdul[0].header['CDELT3'] * femit / (-C), 2)
if np.round(cdelt,1) > np.round(chanwidth[0],1):
doSpec = True
elif doProj:
pass
else:
doSpec = None # likely will fail/produce incorrect result in the case that the ms file and mask were not created with the same original spectral grid.
if doProj:
ax3param = []
for key in ['NAXIS3', 'CTYPE3', 'CRPIX3', 'CRVAL3', 'CDELT3']:
ax3param.append(hdul[0].header[key])
caracal.log.info('doSpecProj = {}'.format(doSpec))
caracal.log.info('+++++++++++++++++++++++++++++')
caracal.log.info('doSpaceProj = {}'.format(doProj))
caracal.log.info('+++++++++++++++++++++++++++++')
if doProj:
'''
MAKE HDR FILE FOR REGRIDDING THE USER SUPPLIED MASK AND REPROJECT
'''
with open('{}/tmp.hdr'.format(pipeline.masking), 'w') as file:
file.write('SIMPLE = T\n')
file.write('BITPIX = -64\n')
file.write('NAXIS = 2\n')
file.write('NAXIS1 = {}\n'.format(cubeWidth))
file.write('CTYPE1 = \'RA---SIN\'\n')
file.write('CRVAL1 = {}\n'.format(raTarget))
file.write('CRPIX1 = {}\n'.format(cubeWidth / 2 + 1))
file.write('CDELT1 = {}\n'.format(-1 * config['make_cube']['cell'] / 3600.))
file.write('NAXIS2 = {}\n'.format(cubeHeight))
file.write('CTYPE2 = \'DEC--SIN\'\n')
file.write('CRVAL2 = {}\n'.format(decTarget))
file.write('CRPIX2 = {}\n'.format(cubeHeight / 2 + 1))
file.write('CDELT2 = {}\n'.format(config['make_cube']['cell'] / 3600.))
file.write('EXTEND = T\n')
file.write('EQUINOX = 2000.0\n')
file.write('END\n')
if os.path.exists('{}/{}'.format(pipeline.masking, postGridMask)):
os.remove('{}/{}'.format(pipeline.masking, postGridMask))
with fits.open('{}/{}'.format(pipeline.masking, preGridMask)) as hdul:
if np.amax(hdul[0].data) > 1:
mask = np.where(hdul[0].data > 0)
hdul[0].data[mask] = 1
preGridMaskNew = preGridMask.replace('.fits', '_01.fits')
hdul.writeto('{}/{}'.format(pipeline.masking, preGridMaskNew), overwrite=True)
preGridMask = preGridMaskNew
caracal.log.info('Reprojecting mask {} to match the grid of the cube.'.format(preGridMask))
step = 'reprojectMask-field{}'.format(tt)
recipe.add('cab/mProjectCube', step,
{
"in.fits": preGridMask,
"out.fits": postGridMask,
"hdr.template": 'tmp.hdr',
},
input=pipeline.masking,
output=pipeline.masking,
label='{0:s}:: Reprojecting user input mask {1:s} to match the grid of the cube'.format(step, preGridMask))
# In order to make sure that we actually find stuff in the images we execute the rec ipe here
recipe.run()
# Empty job que after execution
recipe.jobs = []
if not os.path.exists('{}/{}'.format(pipeline.masking, postGridMask)):
raise IOError(
"The regridded mask {0:s} does not exist. The original mask likely has no overlap with the cube.".format(postGridMask))
with fits.open('{}/{}'.format(pipeline.masking, postGridMask), mode='update') as hdul:
for i, key in enumerate(['NAXIS3', 'CTYPE3', 'CRPIX3', 'CRVAL3', 'CDELT3']):
hdul[0].header[key] = ax3param[i]
axDict = {'1': [2, cubeWidth],
'2': [1, cubeHeight]}
for i in ['1', '2']:
cent, nax = hdul[0].header['CRPIX' + i], hdul[0].header['NAXIS' + i]
if cent < axDict[i][1] / 2 + 1:
delt = int(axDict[i][1] / 2 + 1 - cent)
if i == '1':
toAdd = np.zeros([hdul[0].header['NAXIS3'], hdul[0].data.shape[1], delt])
else:
toAdd = np.zeros([hdul[0].header['NAXIS3'], delt, hdul[0].data.shape[2]])
hdul[0].data = np.concatenate([toAdd, hdul[0].data], axis=axDict[i][0])
hdul[0].header['CRPIX' + i] = cent + delt
if hdul[0].data.shape[axDict[i][0]] < axDict[i][1]:
delt = int(axDict[i][1] - hdul[0].data.shape[axDict[i][0]])
if i == '1':
toAdd = np.zeros([hdul[0].header['NAXIS3'], hdul[0].data.shape[1], delt])
else:
toAdd = np.zeros([hdul[0].header['NAXIS3'], delt, hdul[0].data.shape[2]])
hdul[0].data = np.concatenate([hdul[0].data, toAdd], axis=axDict[i][0])
if hdul[0].data.shape[axDict[i][0]] > axDict[i][1]:
delt = int(hdul[0].data.shape[axDict[i][0]] - axDict[i][1])
hdul[0].data = hdul[0].data[:, :, -delt] if i == '1' else hdul[0].data[:, -delt, :]
if cent > axDict[i][1] / 2 + 1:
hdul[0].header['CRPIX' + i] = hdul[0].data.shape[axDict[i][0]] / 2 + 1
hdul[0].data = np.around(hdul[0].data.astype(np.float32)).astype(np.int16)
try:
del hdul[0].header['EN']
except KeyError:
pass
hdul.flush()
line_image_opts.update({"fitsmask": '{0:s}/{1:s}:output'.format(
get_relative_path(pipeline.masking, pipeline), postGridMask.split('/')[-1])})
else:
if not doSpec:
line_image_opts.update({"fitsmask": '{0:s}/{1:s}:output'.format(
get_relative_path(pipeline.masking, pipeline), preGridMask.split('/')[-1])})
else:
pass
if doSpec:
gridMask = postGridMask if doProj else preGridMask
caracal.log.info('Reprojecting mask {} to match the spectral axis of the cube.'.format(gridMask))
if doProj:
hdul = fits.open('{}/{}'.format(pipeline.masking, postGridMask), mode='update')
else:
hdul = fits.open('{}/{}'.format(pipeline.masking, preGridMask))
if os.path.exists('{}/{}'.format(pipeline.masking, postGridMask)):
os.remove('{}/{}'.format(pipeline.masking, postGridMask))
if 'FREQ' in hdul[0].header['CTYPE3']:
# all in Hz
crval = firstchanfreq[0] + chanwidth[0] * firstchan
cdeltm = hdul[0].header['CDELT3']
cdelte = crval + nchans * binchans * chanwidth[0]
else:
# all in m/s
crval = C * (femit - (firstchanfreq[0] + chanwidth[0] * firstchan)) / femit
cdeltm = hdul[0].header['CDELT3'] * femit / (-C)
crvale = C * (femit - (firstchanfreq[0] + chanwidth[0] * firstchan + nchans * binchans * chanwidth[0])) / femit
cdelt = chanwidth[0] * binchans # in Hz
hdr = hdul[0].header
ax3 = np.arange(hdr['CRVAL3'] - hdr['CDELT3'] * (hdr['CRPIX3'] - 1), hdr['CRVAL3'] + hdr['CDELT3'] * (hdr['NAXIS3'] - hdr['CRPIX3'] + 1), hdr['CDELT3'])
if (np.max([crval, crvale]) <= np.max([ax3[0], ax3[-1]])) & (np.min([crval, crvale]) >= np.min([ax3[0], ax3[-1]])):
caracal.log.info("Requested channels are contained in mask {}.".format(gridMask))
idx = np.argmin(abs(ax3 - crval))
ide = np.argmin(abs(ax3 - crvale))
if cdelt > cdeltm:
hdul[0].data = hdul[0].data[idx:ide]
hdul[0].header['CRPIX3'] = 1
hdul[0].header['CRVAL3'] = crval
hdul[0].header['NAXIS3'] = nchans
hdul[0].header['CDELT3'] = hdul[0].header['CDELT3'] * binchans
if binchans > 1:
if (nchans % binchans) > 0:
rdata = (hdul[0].data[:-(nchans % binchans)]).reshape((nchans - 1 * (nchans % binchans), binchans, hdul[0].header['NAXIS1'], hdul[0].header['NAXIS2']))
rdata = np.nansum(rdata, axis=1)
rdata = np.concatenate((rdata, np.nansum(hdul[0].data[-(nchans % binchans):])), axis=0)
else:
rdata = (hdul[0].data).reshape(nchans, binchans, hdul[0].header['NAXIS1'], hdul[0].header['NAXIS2'])
rdata = np.nansum(rdata, axis=1)
rdata[rdata > 0] = 1
hdul[0].data = rdata
else:
pass
else:
rdata = np.zeros((nchans, hdr['NAXIS1'], hdr['NAXIS2']))
rr = int(cdeltm / cdelt)
for nn in range(nchans):
rdata[nn] = hdul[0].data[idx + nn // rr]
hdul[0].header['NAXIS3'] = nchans
hdul[0].header['CRPIX3'] = 1
hdul[0].header['CRVAL3'] = crval
hdul[0].header['CDELT3'] = hdul[0].header['CDELT3'] / rr
hdul[0].data = rdata
hdul[0].data = np.around(hdul[0].data.astype(np.float32)).astype(np.int16)
if doProj:
hdul.flush()
else:
hdul.writeto('{}/{}'.format(pipeline.masking, postGridMask))
line_image_opts.update({"fitsmask": '{0:s}/{1:s}:output'.format(
get_relative_path(pipeline.masking, pipeline), gridMask.split('/')[-1])})
else:
raise IOError("Requested channels are not contained in mask {}.".format(gridMask))
else:
if not doProj:
line_image_opts.update({"fitsmask": '{0:s}/{1:s}:output'.format(
get_relative_path(pipeline.masking, pipeline), preGridMask.split('/')[-1])})
else:
pass
step = 'make_cube-{0:s}-field{1:d}-iter{2:d}-with_user_mask'.format(line_name, tt, j)
else:
line_image_opts.update({"auto-mask": config['make_cube']['wscl_auto_mask']})
step = 'make_cube-{0:s}-field{1:d}-iter{2:d}-with_automasking'.format(line_name, tt, j)
else:
step = 'make_sofia_mask-field{0:d}-iter{1:d}'.format(tt, j - 1)
line_clean_mask = '{0:s}_{1:s}_{2:s}_{3:d}.image_clean_mask.fits:output'.format(
pipeline.prefix, field, line_name, j)
line_clean_mask_file = '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}.image_clean_mask.fits'.format(
cube_path, pipeline.prefix, field, line_name, j)
cubename = '{0:s}_{1:s}_{2:s}_{3:d}.image.fits:input'.format(
pipeline.prefix, field, line_name, j - 1)
cubename_file = '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}.image.fits'.format(
cube_path, pipeline.prefix, field, line_name, j - 1)
outmask = '{0:s}_{1:s}_{2:s}_{3:d}.image_clean'.format(
pipeline.prefix, field, line_name, j)
recipe.add('cab/sofia', step,
{
"import.inFile": cubename,
"steps.doFlag": False,
"steps.doScaleNoise": True,
"steps.doSCfind": True,
"steps.doMerge": True,
"steps.doReliability": False,
"steps.doParameterise": False,
"steps.doWriteMask": True,
"steps.doMom0": False,
"steps.doMom1": False,
"steps.doWriteCat": False,
"flag.regions": [],
"scaleNoise.statistic": 'mad',
"SCfind.threshold": 4,
"SCfind.rmsMode": 'mad',
"merge.radiusX": 3,
"merge.radiusY": 3,
"merge.radiusZ": 3,
"merge.minSizeX": 2,
"merge.minSizeY": 2,
"merge.minSizeZ": 2,
"writeCat.basename": outmask,
},
input=pipeline.cubes + '/cube_' + str(j - 1),
output=pipeline.output + '/' + cube_dir,
label='{0:s}:: Make SoFiA mask'.format(step))
recipe.run()
recipe.jobs = []
if not os.path.exists(line_clean_mask_file):
caracal.log.info(
'Sofia mask_' + str(j - 1) + ' was not found. Exiting and saving the cube')
j -= 1
break
step = 'make_cube-{0:s}-field{1:d}-iter{2:d}-with_SoFiA_mask'.format(line_name, tt, j)
line_image_opts.update({"fitsmask": '{0:s}/{1:s}'.format(cube_dir, line_clean_mask)})
if 'auto-mask' in line_image_opts:
del (line_image_opts['auto-mask'])
recipe.add('cab/wsclean',
step, line_image_opts,
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Image Line'.format(step))
recipe.run()
recipe.jobs = []
# delete line "MFS" images made by WSclean by averaging all channels
for mfs in glob.glob('{0:s}/{1:s}/{2:s}_{3:s}_{4:s}_{5:d}-MFS*fits'.format(
pipeline.output, cube_dir, pipeline.prefix, field, line_name, j)):
os.remove(mfs)
# Stack channels together into cubes and fix spectral frame
if config['make_cube']['wscl_make_cube']:
if config['make_cube']['wscl_onlypsf']:
imagetype = ['psf',]
elif not config['make_cube']['niter']:
imagetype = ['dirty', 'image']
else:
imagetype = ['dirty', 'image', 'psf', 'residual', 'model']
if config['make_cube']['wscl_mgain'] < 1.0:
imagetype.append('first-residual')
for mm in imagetype:
step = '{0:s}-cubestack-field{1:d}-iter{2:d}'.format(
mm.replace('-', '_'), tt, j)
if not os.path.exists('{6:s}/{0:s}/{1:s}_{2:s}_{3:s}_{4:d}-0000-{5:s}.fits'.format(
cube_dir, pipeline.prefix, field, line_name, j, mm, pipeline.output)):
caracal.log.warn('Skipping container {0:s}. Single channels do not exist.'.format(step))
else:
stacked_cube = '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}.{5:s}.fits'.format(cube_dir,
pipeline.prefix, field, line_name, j, mm)
recipe.add(
'cab/fitstool',
step,
{
"file_pattern": '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}-*-{5:s}.fits:output'.format(
cube_dir, pipeline.prefix, field, line_name,
j, mm),
"output": stacked_cube,
"stack": True,
"delete-files": True,
"fits-axis": 'FREQ',
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Make {1:s} cube from wsclean {1:s} channels'.format(
step,
mm.replace('-', '_')))
recipe.run()
recipe.jobs = []
# Replace channels that are single-valued (usually zero-ed) in the dirty cube with blanks
# in all cubes assuming that channels run along numpy axis 1 (axis 0 is for Stokes)
if not config['make_cube']['wscl_onlypsf']:
with fits.open('{0:s}/{1:s}'.format(pipeline.output, stacked_cube)) as stck:
cubedata = stck[0].data
cubehead = stck[0].header
if mm == 'dirty':
tobeblanked = (cubedata == np.nanmean(cubedata, axis=(0, 2, 3)).reshape((
1, cubedata.shape[1], 1, 1))).all(axis=(0, 2, 3))
cubedata[:, tobeblanked] = np.nan
fits.writeto('{0:s}/{1:s}'.format(pipeline.output, stacked_cube), cubedata, header=cubehead, overwrite=True)
caracal.log.info('Fixing the spectral system of all cubes for target {0:d}, iteration {1:d}'.format(tt, j))
for ss in ['dirty', 'psf', 'first-residual', 'residual', 'model', 'image']:
cubename = '{6:s}/{0:s}/{1:s}_{2:s}_{3:s}_{4:d}.{5:s}.fits'.format(
cube_dir, pipeline.prefix, field, line_name, j, ss, pipeline.output)
recipe.add(fix_specsys_ra,
'fixspecsysra-{0:s}-cube-field{1:d}-iter{2:d}'.format(ss.replace("_", "-"), tt, j),
{'filename': cubename,
'specframe': specframe_all, },
input=pipeline.input,
output=pipeline.output,
label='Fix spectral reference frame for cube {0:s}'.format(cubename))
recipe.run()
recipe.jobs = []
if not config['make_cube']['wscl_onlypsf']:
cubename_file = '{0:s}/cube_{1:d}/{2:s}_{3:s}_{4:s}_{1:d}.image.fits'.format(
pipeline.cubes, j, pipeline.prefix, field, line_name)
rms_values.append(calc_rms(cubename_file, line_clean_mask_file))
caracal.log.info('RMS = {0:.3e} Jy/beam for {1:s}'.format(rms_values[-1], cubename_file))
# if the RMS has decreased by a factor < wscl_tol compared to the previous cube then cleaning is no longer improving the cube and we can stop
if len(rms_values) > 1 and wscl_tol and rms_values[-2] / rms_values[-1] <= wscl_tol:
caracal.log.info('The cube RMS noise has decreased by a factor <= {0:.3f} compared to the previous WSclean iteration. Noise convergence achieved.'.format(wscl_tol))
break
# If the RMS has decreased by a factor > wscl_tol compared to the previous cube then cleaning is still improving the cube and it's worth continuing with a new SoFiA + WSclean iteration
elif len(rms_values) > 1 and wscl_tol and rms_values[-2] / rms_values[-1] > wscl_tol:
# rms_old = rms_new
caracal.log.info('The cube RMS noise has decreased by a factor > {0:.3f} compared to the previous WSclean iteration. The noise has not converged yet and we should continue iterating SoFiA + WSclean.'.format(wscl_tol))
if j == wscl_niter:
caracal.log.info('Stopping anyway. Maximum number of SoFiA + WSclean iterations reached.')
else:
caracal.log.info('Starting a new SoFiA + WSclean iteration.')
# Out of SoFiA + WSclean loop -- prepare final data products
for ss in ['dirty', 'psf', 'first-residual', 'residual', 'model', 'image']:
if 'dirty' in ss:
caracal.log.info('Preparing final cubes.')
cubename = '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}.{5:s}.fits'.format(
cube_path, pipeline.prefix, field, line_name, j, ss)
finalcubename = '{0:s}/{1:s}_{2:s}_{3:s}.{4:s}.fits'.format(
cube_path, pipeline.prefix, field, line_name, ss)
line_clean_mask_file = '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}.image_clean_mask.fits'.format(
cube_path, pipeline.prefix, field, line_name, j)
final_line_clean_mask_file = '{0:s}/{1:s}_{2:s}_{3:s}.image_clean_mask.fits'.format(
cube_path, pipeline.prefix, field, line_name)
MFScubename = '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}-MFS-{5:s}.fits'.format(
cube_path, pipeline.prefix, field, line_name, j, ss)
finalMFScubename = '{0:s}/{1:s}_{2:s}_{3:s}-MFS-{4:s}.fits'.format(
cube_path, pipeline.prefix, field, line_name, ss)
if os.path.exists(cubename):
os.rename(cubename, finalcubename)
if os.path.exists(line_clean_mask_file):
os.rename(line_clean_mask_file, final_line_clean_mask_file)
if os.path.exists(MFScubename):
os.rename(MFScubename, finalMFScubename)
for j in range(1, wscl_niter):
if config['make_cube']['wscl_removeintermediate']:
for ss in ['dirty', 'psf', 'first-residual', 'residual', 'model', 'image']:
cubename = '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}.{5:s}.fits'.format(
pipeline.cubes, pipeline.prefix, field, line_name, j, ss)
line_clean_mask_file = '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}.image_clean_mask.fits'.format(
pipeline.cubes, pipeline.prefix, field, line_name, j)
MFScubename = '{0:s}/{1:s}_{2:s}_{3:s}_{4:d}-MFS-{5:s}.fits'.format(
pipeline.cubes, pipeline.prefix, field, line_name, j, ss)
if os.path.exists(cubename):
os.remove(cubename)
if os.path.exists(line_clean_mask_file):
os.remove(line_clean_mask_file)
if os.path.exists(MFScubename):
os.remove(MFScubename)
if pipeline.enable_task(config, 'make_cube') and config['make_cube']['image_with'] == 'casa':
cube_dir = get_relative_path(pipeline.cubes, pipeline)
nchans_all, specframe_all = [], []
label = config['label_in']
if label != '':
flabel = '_' + label
else:
flabel = label
caracal.log.info('Collecting spectral info on MS files being imaged')
if config['make_cube']['use_mstransform']:
for i, msfile in enumerate(all_msfiles):
if not pipeline.enable_task(config, 'mstransform'):
msinfo = pipeline.get_msinfo(msfile)
spw = msinfo['SPW']['NUM_CHAN']
nchans = spw
nchans_all.append(nchans)
caracal.log.info('MS #{0:d}: {1:s}'.format(i, msfile))
caracal.log.info(' {0:d} spectral windows, with NCHAN={1:s}'.format(
len(spw), ','.join(map(str, spw))))
# Get first chan, last chan, chan width
chfr = msinfo['SPW']['CHAN_FREQ']
firstchanfreq = [ss[0] for ss in chfr]
lastchanfreq = [ss[-1] for ss in chfr]
chanwidth = [(ss[-1] - ss[0]) / (len(ss) - 1)
for ss in chfr]
caracal.log.info(' CHAN_FREQ from {0:s} Hz to {1:s} Hz with average channel width of {2:s} Hz'.format(
','.join(map(str, firstchanfreq)), ','.join(map(str, lastchanfreq)), ','.join(map(str, chanwidth))))
specframe = msinfo['SPW']['MEAS_FREQ_REF']
specframe_all.append(specframe)
caracal.log.info(
' The spectral reference frame is {0:}'.format(specframe))
elif pipeline.enable_task(config['mstransform'], 'doppler'):
nchans_all[i] = [nchan_dopp for kk in chanw_all[i]]
specframe_all.append([{'lsrd': 0, 'lsrk': 1, 'galacto': 2, 'bary': 3, 'geo': 4, 'topo': 5}[
config['mstransform']['doppler']['frame']] for kk in chanw_all[i]])
else:
for i, msfile in enumerate(all_msfiles):
msinfo = pipeline.get_msinfo(msfile)
spw = msinfo['SPW']['NUM_CHAN']
nchans = spw
nchans_all.append(nchans)
caracal.log.info('MS {0:d}: {1:s}'.format(i, msfile))
caracal.log.info(' {0:d} spectral windows, with NCHAN={1:s}'.format(
len(spw), ','.join(map(str, spw))))
specframe = msinfo['SPW']['MEAS_FREQ_REF']
specframe_all.append(specframe)
caracal.log.info(
' The spectral reference frame is {0:}'.format(specframe))
spwid = config['make_cube']['spwid']
nchans = config['make_cube']['nchans']
if nchans == 0:
# Assuming user wants same spw for all msfiles and they have same
# number of channels
nchans = nchans_all[0][spwid]
# Assuming user wants same spw for all msfiles and they have same
# specframe
specframe_all = [ss[spwid] for ss in specframe_all][0]
firstchan = config['make_cube']['firstchan']
binchans = config['make_cube']['binchans']
channelrange = [firstchan, firstchan + nchans * binchans]
# Construct weight specification
if config['make_cube']['weight'] == 'briggs':
weight = 'briggs {0:.3f}'.format(
config['make_cube']['robust'])
else:
weight = config['make_cube']['weight']
for tt, target in enumerate(all_targets):
if config['make_cube']['use_mstransform']:
mslist = [add_ms_label(ms, "mst") for ms in ms_dict[target]]
else:
mslist = ms_dict[target]
field = utils.filter_name(target)
step = 'make_line_cube-field{0:d}'.format(tt)
image_opts = {
"msname": mslist,
"prefix": '{0:s}/{1:s}_{2:s}_{3:s}'.format(cube_dir, pipeline.prefix, field, line_name),
"mode": 'channel',
"nchan": nchans,
"start": config['make_cube']['firstchan'],
"interpolation": 'nearest',
"niter": config['make_cube']['niter'],
"gain": config['make_cube']['gain'],
"psfmode": 'hogbom',
"threshold": config['make_cube']['casa_thr'],
"npix": config['make_cube']['npix'],
"cellsize": config['make_cube']['cell'],
"weight": config['make_cube']['weight'],
"robust": config['make_cube']['robust'],
"stokes": config['make_cube']['stokes'],
"port2fits": config['make_cube']['casa_port2fits'],
"restfreq": restfreq,
}
if config['make_cube']['taper'] != '':
image_opts.update({
"uvtaper": True,
"outertaper": config['make_cube']['taper'],
})
recipe.add('cab/casa_clean', step, image_opts,
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Image Line'.format(step))
recipe.run()
recipe.jobs = []
# This prevents multiple running of imcontsub if the
# targets are specified explicitly
rancsonce = False
# Once all cubes have been made fix the headers etc.
# Search cubes and cubes/cubes_*/ for cubes whose header should be fixed
cube_dir = get_relative_path(pipeline.cubes, pipeline)
for tt, target in enumerate(all_targets):
field = utils.filter_name(target)
casa_cube_list = glob.glob('{0:s}/{1:s}/{2:s}_{3:s}_{4:s}*.fits'.format(
pipeline.output, cube_dir, pipeline.prefix, field, line_name))
wscl_cube_list = glob.glob('{0:s}/{1:s}/cube_*/{2:s}_{3:s}_{4:s}*.fits'.format(
pipeline.output, cube_dir, pipeline.prefix, field, line_name))
cube_list = casa_cube_list + wscl_cube_list
image_cube_list = [cc for cc in cube_list if 'image.fits' in cc]
dirty_cube_list = [cc for cc in cube_list if 'dirty.fits' in cc]
image_mask_list = [cc for cc in cube_list if 'image_mask.fits' in cc]
image_clean_mask_list = [cc for cc in cube_list if 'image_clean_mask.fits' in cc]
if pipeline.enable_task(config, 'pb_cube'):
caracal.log.info('Will create primary beam cube for target {0:d}'.format(tt))
for uu in range(len(image_cube_list)):
recipe.add(make_pb_cube,
'make pb_cube-{0:d}'.format(uu),
{'filename': image_cube_list[uu],
'apply_corr': config['pb_cube']['apply_pb'],
'typ': config['pb_cube']['pb_type'],
'dish_size': config['pb_cube']['dish_size'],
'cutoff': config['pb_cube']['cutoff'],
},
input=pipeline.input,
output=pipeline.output,
label='Make primary beam cube for {0:s}'.format(image_cube_list[uu]))
cube_list.append(image_cube_list[uu].replace('image.fits', 'pb.fits'))
if pipeline.enable_task(config, 'remove_stokes_axis'):
caracal.log.info('Will remove Stokes axis of all cubes/images of target {0:d}'.format(tt))
for uu in range(len(cube_list)):
recipe.add(remove_stokes_axis,
'remove_cube_stokes_axis-{0:d}'.format(uu),
{'filename': cube_list[uu], },
input=pipeline.input,
output=pipeline.output,
label='Remove Stokes axis for cube {0:s}'.format(cube_list[uu]))
if pipeline.enable_task(config, 'freq_to_vel'):
if not config['freq_to_vel']['reverse']:
caracal.log.info(
'Will convert spectral axis of all cubes from frequency to radio velocity for target {0:d}'.format(tt))
else:
caracal.log.info(
'Will convert spectral axis of all cubes from radio velocity to frequency for target {0:d}'.format(tt))
for uu in range(len(cube_list)):
recipe.add(freq_to_vel,
'convert-spectral_header-cube{0:d}'.format(uu),
{'filename': cube_list[uu],
'reverse': config['freq_to_vel']['reverse'], },
input=pipeline.input,
output=pipeline.output,
label='Convert spectral axis from frequency to radio velocity for cube {0:s}'.format(cube_list[uu]))
recipe.run()
recipe.jobs = []
if pipeline.enable_task(config, 'sofia'):
if config['sofia']['imcontsub']:
simage_cube_list = []
for uu in range(len(image_cube_list)):
icsname = image_cube_list[uu].replace(
'.image.fits', '.imcontsub.fits')
if len(glob.glob(icsname)) > 0:
simage_cube_list.append(icsname)
else:
simage_cube_list.append(image_cube_list[uu])
else:
simage_cube_list = image_cube_list
for uu in range(len(image_cube_list)):
step = 'sofia-source_finding-{0:d}'.format(uu)
recipe.add(
'cab/sofia',
step,
{
"import.inFile": simage_cube_list[uu].split('/')[-1] + ':input',
"steps.doFlag": config['sofia']['flag'],
"steps.doScaleNoise": True,
"steps.doSCfind": True,
"steps.doMerge": config['sofia']['merge'],
"steps.doReliability": False,
"steps.doParameterise": False,
"steps.doWriteMask": True,
"steps.doMom0": config['sofia']['mom0'],
"steps.doMom1": config['sofia']['mom1'],
"steps.doCubelets": config['sofia']['cubelets'],
"steps.doWriteCat": False,
"flag.regions": config['sofia']['flagregion'],
"scaleNoise.statistic": config['sofia']['rmsMode'],
"SCfind.threshold": config['sofia']['thr'],
"SCfind.rmsMode": config['sofia']['rmsMode'],
"merge.radiusX": config['sofia']['mergeX'],
"merge.radiusY": config['sofia']['mergeY'],
"merge.radiusZ": config['sofia']['mergeZ'],
"merge.minSizeX": config['sofia']['minSizeX'],
"merge.minSizeY": config['sofia']['minSizeY'],
"merge.minSizeZ": config['sofia']['minSizeZ'],
},
input='/'.join(simage_cube_list[uu].split('/')[:-1]),
output='/'.join(simage_cube_list[uu].split('/')[:-1]),
label='{0:s}:: Make SoFiA mask and images for cube {1:s}'.format(step, simage_cube_list[uu]))
# Again, in some cases this should run once
if rancsonce:
pass
else:
if pipeline.enable_task(config, 'imcontsub'):
caracal.log.info(
'Subtracting continuum in the image domain for target {0:d}'.format(tt))
# Using highest cube directory
dirlist = glob.glob('{0:s}/{1:s}/cube_*'.format(pipeline.output, cube_dir))
poopoo = max([int(gi[-1]) for gi in dirlist])
if config['imcontsub']['lastiter']:
wscl_cube_list = glob.glob(
'{0:s}/{1:s}/cube_{2:d}/{3:s}_{4:s}_{5:s}*.fits'.format(
pipeline.output, cube_dir, poopoo,
pipeline.prefix, field, line_name))
else:
wscl_cube_list = glob.glob(
'{0:s}/{1:s}/cube_*/{2:s}_{3:s}_{4:s}*.fits'.format(
pipeline.output, cube_dir,
pipeline.prefix, field, line_name))
# Hoping that the order is the same for all suffixes
wimage_cube_list = [cc for cc in wscl_cube_list if 'image.fits' in cc]
wdirty_cube_list = [cc for cc in wscl_cube_list if 'dirty.fits' in cc]
wimage_mask_list = [cc for cc in wscl_cube_list if 'image_mask.fits' in cc]
wimage_clean_mask_list = [cc for cc in wscl_cube_list if 'image_clean_mask.fits' in cc]
# See comment below
runonce = False
if len(config['imcontsub']['incubus']) == 0 or len(config['imcontsub']['incubus'][0]) == 0:
if len(wimage_cube_list):
contsincubelist = wimage_cube_list
rsuffix = '.image.fits'
else:
contsincubelist = wdirty_cube_list
rsuffix = '.dirty.fits'
else:
# Run only once if the cubes are specified explicitly
# Otherwise we'd do the same thing number of targers times
runonce = True
contsincubelist = config['imcontsub']['incubus']
rsuffix = '.fits'
outputlist = [i.replace(rsuffix, '.imcontsub.fits') for i in contsincubelist]
if config['imcontsub']['mask'] == '':
if len(config['imcontsub']['masculin']) == 0 or len(config['imcontsub']['masculin'][0]) == 0:
maskimc = []
else:
maskimc = config['imcontsub']['masculin']
elif config['imcontsub']['mask'] == 'clean':
maskimc = wimage_clean_mask_list
elif config['imcontsub']['mask'] == 'sofia':
maskimc = wimage_mask_list
else:
maskimc = []
if len(maskimc) == 0:
maskimc = [None for i in contsincubelist]
caracal.log.info(
'Not using mask for image subtraction of target {0:d}'.format(tt))
if config['imcontsub']['outfit']:
outfitlist = [i.replace(rsuffix, '.contsfit.fits') for i in contsincubelist]
else:
outfitlist = [None for i in contsincubelist]
if config['imcontsub']['outfitcon']:
outconlist = [i.replace(rsuffix, '.contsfitcon.fits') for i in contsincubelist]
else:
outconlist = [None for i in contsincubelist]
# outconlist = [i.replace('dirty.fits', 'imcontsub.fits') for i in dirty_cube_list]
for uu in range(len(contsincubelist)):
image_contsub.imcontsub(
incubus=contsincubelist[uu], outcubus=outputlist[uu],
fitmode=config['imcontsub']['fitmode'],
polyorder=config['imcontsub']['polyorder'],
length=config['imcontsub']['length'],
mask=maskimc[uu],
sgiters=config['imcontsub']['sgiters'],
kertyp=config['imcontsub']['kertyp'],
kersiz=config['imcontsub']['kersiz'],
fitted=outfitlist[uu],
confit=outconlist[uu],
clobber=True,
)
if runonce:
rancsonce = True
break
if pipeline.enable_task(config, 'sharpener'):
for uu in range(len(image_cube_list)):
step = 'continuum-spectral_extraction-{0:d}'.format(uu)
params = {"enable_spec_ex": True,
"enable_source_catalog": True,
"enable_abs_plot": True,
"enable_source_finder": False,
"cubename": image_cube_list[uu] + ':output',
"channels_per_plot": config['sharpener']['chans_per_plot'],
"workdir": '{0:s}/'.format(stimela.recipe.CONT_IO["output"]),
"label": config['sharpener']['label'],
}
runsharp = False
if config['sharpener']['catalog'] == 'PYBDSF':
catalogs = []
nimages = glob.glob("{0:s}/image_*".format(pipeline.continuum))
for ii in range(0, len(nimages)):
catalog = glob.glob("{0:s}/image_{1:d}/{2:s}_{3:s}_*.lsm.html".format(
pipeline.continuum, ii + 1, pipeline.prefix, field))
catalogs.append(catalog)
catalogs = sorted(catalogs)
catalogs = [cat for catalogs in catalogs for cat in catalogs]
# Right now, this is the last catalog made
if len(catalogs):
catalog_file = catalogs[-1].split('output/')[-1]
params["catalog_file"] = '{0:s}:output'.format(catalog_file)
else:
catalog_file = []
if len(catalog_file) > 0:
runsharp = True
params["catalog"] = "PYBDSF"
recipe.add('cab/sharpener',
step,
params,
input='/'.join('{0:s}/{1:s}'.format(pipeline.output, image_cube_list[uu]).split('/')[:-1]),
output=pipeline.output,
label='{0:s}:: Continuum Spectral Extraction'.format(step))
else:
caracal.log.warn(
'No PyBDSM catalogs found. Skipping continuum spectral extraction.')
elif config['sharpener']['catalog'] == 'NVSS':
runsharp = True
params["thr"] = config['sharpener']['thr']
params["width"] = config['sharpener']['width']
params["catalog"] = "NVSS"
recipe.add('cab/sharpener',
step,
params,
input='/'.join('{0:s}/{1:s}'.format(pipeline.output, image_cube_list[uu]).split('/')[:-1]),
output=pipeline.output,
label='{0:s}:: Continuum Spectral Extraction'.format(step))
recipe.run()
recipe.jobs = []
# Move the sharpener output to diagnostic_plots
if runsharp:
sharpOut = '{0:s}/{1:s}'.format(pipeline.output, 'sharpOut')
finalsharpOut = '{0:s}/{1:s}_{2:s}_{3:s}'.format(
pipeline.diagnostic_plots, pipeline.prefix, field, 'sharpOut')
if os.path.exists(finalsharpOut):
shutil.rmtree(finalsharpOut)
shutil.move(sharpOut, finalsharpOut)
| 98,559 | 55.127563 | 253 | py |
caracal | caracal-master/caracal/workers/mask_worker.py | # -*- coding: future_fstrings -*-
import os
import sys
import shutil
import glob
import yaml
import numpy as np
from caracal.dispatch_crew import utils
from caracal.utils.requires import extras
NAME = 'Make Masks'
LABEL = 'mask'
@extras("astropy")
def worker(pipeline, recipe, config):
################################################################################
# Worker's MODULES
################################################################################
from astropy import units as u
import astropy.coordinates as coord
from astropy import wcs
from astropy.io import fits, ascii
from astropy.table import Table, Column, MaskedColumn
def ra2deg(ra_hms):
'''
Converts right ascension in hms coordinates to degrees and radians
INPUT
rahms: ra in HH:MM:SS format (str)
OUTPUT
conv_units.radeg: ra in degrees
conv_units.rarad: ra in radians
'''
ra = ra_hms.split(':')
hh = float(ra[0]) * 15
mm = (float(ra[1]) / 60) * 15
ss = (float(ra[2]) / 3600) * 15
return hh + mm + ss
def dec2deg(dec_dms):
'''
Converts right ascension in hms coordinates to degrees and radians
INPUT
rahms: ra in HH:MM:SS format (str)
OUTPUT
conv_units.radeg: ra in degrees
conv_units.rarad: ra in radians
'''
dec = dec_dms.split(':')
hh = abs(float(dec[0]))
mm = float(dec[1]) / 60
ss = float(dec[2]) / 3600
if float(dec[0]) >= 0:
return hh + mm + ss
else:
return -(hh + mm + ss)
return hh + mm + ss
def nvss_pbcorr(ra_deg, dec_deg, centre, cell, imsize, obs_freq, flux):
'''
Module to locate sources on continuum image/mask
'''
# I load the WCS coordinate system:
# open file
w = wcs.WCS(naxis=2)
centre = coord.SkyCoord(centre[0], centre[1], unit=(
u.hourangle, u.deg), frame='icrs')
cell /= 3600.
pb_fwhm = 1.02 * (2.99792458E8) / obs_freq / 13.5 / np.pi * 180.
pb_fwhm_pix = pb_fwhm / cell
sigma, mu = pb_fwhm_pix / 2.35482, 0.0
w.wcs.crpix = [imsize / 2, imsize / 2]
w.wcs.cdelt = np.array([-cell, cell])
w.wcs.crval = [centre.ra.deg, centre.dec.deg]
w.wcs.ctype = ["RA---SIN", "DEC--SIN"]
px, py = w.wcs_world2pix(ra_deg, dec_deg, 0)
d = np.sqrt(px * px + py * py)
gauss = np.exp(-((d - mu)**2 / (2.0 * sigma**2)))
new_flux = flux * 1e-3 * gauss
return new_flux, px, py
@extras("astroquery")
def query_catalog_sumss(catalog_table, centre, width_im, cat_name):
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = -1
p = Vizier.query_region(coord.SkyCoord(centre[0], centre[1], unit=(u.hourangle, u.deg), frame='icrs'),
width=width_im, catalog=cat_name)
tab = p[0]
ascii.write(tab, catalog_table, overwrite=True)
@extras("astroquery")
def query_catalog_nvss(catalog_table, centre, width_im, cell, imsize, obs_freq, cat_name, thresh):
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = -1
p = Vizier.query_region(coord.SkyCoord(centre[0], centre[1], unit=(u.hourangle, u.deg), frame='icrs'),
width=width_im, catalog=cat_name)
tab = p[0]
ascii.write(tab, catalog_table, overwrite=True)
tab = Table(tab, masked=True)
ra_deg = np.empty([len(tab['RAJ2000'])])
dec_deg = np.empty([len(tab['RAJ2000'])])
flux_corr = np.empty([len(tab['RAJ2000'])])
pix_x = np.empty([len(tab['RAJ2000'])])
pix_y = np.empty([len(tab['RAJ2000'])])
for i in range(0, len(tab['RAJ2000'])):
tab['RAJ2000'][i] = tab['RAJ2000'][i].replace(' ', ':')
ra_deg[i] = ra2deg(tab['RAJ2000'][i])
tab['DEJ2000'][i] = tab['DEJ2000'][i].replace(' ', ':')
dec_deg[i] = dec2deg(tab['DEJ2000'][i])
flux_corr[i], pix_x[i], pix_y[i] = nvss_pbcorr(
ra_deg[i], dec_deg[i], centre, cell, imsize, obs_freq, tab['S1.4'][i])
ra_deg = Column(ra_deg)
dec_deg = Column(dec_deg)
pix_x = Column(pix_x)
pix_y = Column(pix_y)
flux_corr = Column(flux_corr)
tab.add_column(pix_x, name='PixX')
tab.add_column(pix_y, name='PixY')
tab.add_column(flux_corr, name='Flux_pbcorr')
tab.add_column(ra_deg, name='RADEG')
tab.add_column(dec_deg, name='DECDEG')
flux14 = np.array(flux_corr, dtype=float)
below_thresh = flux14 < thresh
for i in range(1, len(tab.colnames)):
tab[tab.colnames[i]][below_thresh] = np.nan
tab = tab[~np.isnan(tab['Flux_pbcorr'])]
tab = Table(tab, masked=True)
ascii.write(tab, catalog_table, overwrite=True)
def set_mosaic_files(catalog_table, mask_dir, fields_dir):
tab = ascii.read(catalog_table)
unique, counts = np.unique(tab['Mosaic'], return_counts=True)
mosaic_tmpdir = mask_dir + '/formosaic/'
mosaic_outdir = mask_dir + '/mosaic/'
if os.path.exists(mosaic_tmpdir) == False:
os.mkdir(mosaic_tmpdir)
if os.path.exists(mosaic_outdir):
shutil.rmtree(mosaic_outdir)
for i in range(0, len(unique)):
summsfield = fields_dir + str(unique[i]) + '.FITS'
outfield = mosaic_tmpdir + str(unique[i]) + '.FITS'
shutil.copy(summsfield, outfield)
def cleanup_mosaic_files(catalog_name, mask_dir):
montage_tmpdir = pipeline.output + '/mask_mosaic'
if os.path.exists(montage_tmpdir):
shutil.rmtree(montage_tmpdir)
casafiles = glob.glob(mask_dir + '/*.image')
for i in range(0, len(casafiles)):
shutil.rmtree(casafiles[i])
def move_files(catalog_name, mask_dir):
montage_mosaic = pipeline.output + '/mosaic.fits'
montage_mosaic_area = pipeline.output + '/mosaic_area.fits'
cat_mosaic = mask_dir + catalog_name + '_mosaic.fits'
cat_mosaic_area = mask_dir + catalog_name + '_mosaic_area.fits'
if os.path.exists(montage_mosaic):
shutil.move(montage_mosaic, cat_mosaic)
if os.path.exists(montage_mosaic_area):
shutil.move(montage_mosaic_area, cat_mosaic_area)
def build_beam(obs_freq, centre, cell, imsize, out_beam):
# if copy_head == True:
# hdrfile = fits.open(headfile)
# hdr = hdrfile[0].header
# elif copy_head == False:
w = wcs.WCS(naxis=2)
centre = coord.SkyCoord(centre[0], centre[1], unit=(
u.hourangle, u.deg), frame='icrs')
cell /= 3600.
w.wcs.crpix = [imsize / 2, imsize / 2]
w.wcs.cdelt = np.array([-cell, cell])
w.wcs.crval = [centre.ra.deg, centre.dec.deg]
w.wcs.ctype = ["RA---SIN", "DEC--SIN"]
hdr = w.to_header()
hdr['SIMPLE'] = 'T'
hdr['BITPIX'] = -32
hdr['NAXIS'] = 2
hdr.set('NAXIS1', imsize, after='NAXIS')
hdr.set('NAXIS2', imsize, after='NAXIS1')
if 'CUNIT1' in hdr:
del hdr['CUNIT1']
if 'CUNIT2' in hdr:
del hdr['CUNIT2']
pb_fwhm = 1.02 * (2.99792458E8) / obs_freq / 13.5 / np.pi * 180.
pb_fwhm_pix = pb_fwhm / hdr['CDELT2']
x, y = np.meshgrid(np.linspace(-hdr['NAXIS2'] / 2., hdr['NAXIS2'] / 2., hdr['NAXIS2']),
np.linspace(-hdr['NAXIS1'] / 2., hdr['NAXIS1'] / 2., hdr['NAXIS1']))
d = np.sqrt(x * x + y * y)
sigma, mu = pb_fwhm_pix / 2.35482, 0.0
gauss = np.exp(-((d - mu)**2 / (2.0 * sigma**2)))
fits.writeto(out_beam, gauss, hdr, overwrite=True)
def pbcorr(beam, mosaic_regrid, mosaic_pbcorr):
pblist = fits.open(beam)
pbdata = pblist[0].data
mlist = fits.open(mosaic_regrid)
mdata = mlist[0].data
mhead = mlist[0].header
mhead['EPOCH'] = 2000
if 'LONPOLE' in mhead:
del mhead['LONPOLE']
if 'LATPOLE' in mhead:
del mhead['LATPOLE']
if 'RADESYS' in mhead:
del mhead['RADESYS']
pbcorr = np.multiply(mdata, pbdata)
fits.writeto(mosaic_pbcorr, pbcorr, mhead, overwrite=True)
mlist.close()
pblist.close()
def change_header(filename, copy_head, headfile):
pblist = fits.open(filename)
dat = pblist[0].data
print('CHANGE THEE HEADER')
if copy_head:
hdrfile = fits.open(headfile)
head = hdrfile[0].header
if 'NAXIS3' in head:
del head['NAXIS3']
head['NAXIS'] = 2
print(head['NAXIS'])
dat = np.squeeze(dat)
print(dat.shape)
elif copy_head == False:
head = pblist[0].header
if 'ORIGIN' in head:
del head['ORIGIN']
if 'CUNIT1' in head:
del head['CUNIT1']
if 'CUNIT2' in head:
del head['CUNIT2']
fits.writeto(filename, dat, head, overwrite=True)
def make_mask(mosaic_pbcorr, mask, contour):
moslist = fits.open(mosaic_pbcorr)
mosdata = moslist[0].data
moshead = moslist[0].header
mosdata[np.isnan(mosdata)] = 0.0
index_in = np.where(mosdata >= contour)
index_out = np.where(mosdata <= contour)
mosdata[index_in] = 1.0
mosdata[index_out] = 0.0
fits.writeto(mask, mosdata, moshead, overwrite=True)
def make_mask_nvss(catalog_table, centre, imsize, cell, mask):
w = wcs.WCS(naxis=2)
centre = coord.SkyCoord(centre[0], centre[1], unit=(
u.hourangle, u.deg), frame='icrs')
cell /= 3600.
w.wcs.crpix = [imsize / 2, imsize / 2]
w.wcs.cdelt = np.array([-cell, cell])
w.wcs.crval = [centre.ra.deg, centre.dec.deg]
w.wcs.ctype = ["RA---SIN", "DEC--SIN"]
hdr = w.to_header()
hdr['SIMPLE'] = 'T'
hdr['BITPIX'] = -32
hdr['NAXIS'] = 2
hdr['EQUINOX'] = 2000.
hdr.set('NAXIS1', imsize, after='NAXIS')
hdr.set('NAXIS2', imsize, after='NAXIS1')
if 'CUNIT1' in hdr:
del hdr['CUNIT1']
if 'CUNIT2' in hdr:
del hdr['CUNIT2']
data = np.zeros([hdr['NAXIS2'], hdr['NAXIS1']])
tab = ascii.read(catalog_table)
major = tab['MajAxis']
minor = tab['MinAxis']
minor = major
ra = tab['RADEG']
dec = tab['DECDEG']
pix_x = tab['PixX']
pix_y = tab['PixY']
angle1 = np.radians(0.0)
cosangle1 = np.cos(angle1)
sinangle1 = np.sin(angle1)
xnum = np.linspace(0, hdr['NAXIS1'], hdr['NAXIS1'])
ynum = np.linspace(0, hdr['NAXIS2'], hdr['NAXIS2'])
x, y = np.meshgrid(xnum, ynum)
for i in range(0, len(pix_x)):
xc = pix_x[i]
yc = pix_y[i]
if minor[i] / 3600. >= float(hdr['CDELT2']) and major[i] / 3600. >= float(hdr['CDELT2']):
a = major[i] / 3600. / float(hdr['CDELT2']) / 2.
b = minor[i] / 3600. / float(hdr['CDELT2']) / 2.
ell = np.power(x - xc, 2) / np.power(a, 2) + \
np.power(y - yc, 2) / np.power(b, 2)
index_ell = np.where(np.less_equal(ell, 1))
data[index_ell] = 1
else:
data[int(yc), int(xc)] = 1
fits.writeto(mask, data, hdr, overwrite=True)
def merge_masks(extended_mask, catalog_mask, end_mask):
catlist = fits.open(catalog_mask)
catdata = catlist[0].data
cathead = catlist[0].header
forlist = fits.open(extended_mask)
fordata = forlist[0].data
forhead = forlist[0].header
fordata = np.squeeze(fordata)
fordata = np.squeeze(fordata)
index_fornan = np.isnan(fordata)
fordata[index_fornan] = catdata[index_fornan]
index_forzero = np.where(fordata < 1)
fordata[index_forzero] = catdata[index_forzero]
fits.writeto(end_mask, fordata, cathead, overwrite=True)
################################################################################
# MAIN
################################################################################
# mask_dir = pipeline.output+'/masking/'
# if os.path.exists(mask_dir) != True:
# os.mkdir(mask_dir)
mask_dir = pipeline.masking + '/'
centre = config['centre_coord']
flabel = config['label_in']
all_targets, all_msfiles, ms_dict = pipeline.get_target_mss(flabel)
msinfo = pipeline.get_msinfo(all_msfiles[0])
for target in all_targets:
mslist = ms_dict[target]
field = utils.filter_name(target)
if centre[0] == 'HH:MM:SS' and centre[1] == 'DD:MM:SS':
targetpos = msinfo['REFERENCE_DIR']
while len(targetpos) == 1:
targetpos = targetpos[0]
coords = [targetpos[0] / np.pi * 180., targetpos[1] / np.pi * 180.]
centreCoord = coord.SkyCoord(
coords[0], coords[1], frame='icrs', unit=(u.deg, u.deg))
centre[0] = centreCoord.ra.hms
centre[1] = centreCoord.dec.dms
mask_cell = config['cell_size']
mask_imsize = config['mask_size']
final_mask = mask_dir + str(config['label_out']) + '_' + str(target) + '.fits'
catalog_name = config['catalog_query']['catalog']
catalog_tab = mask_dir + catalog_name + '_' + pipeline.prefix + '_catalog.txt'
if catalog_name == 'SUMSS':
if pipeline.enable_task(config, 'catalog_query'):
key = 'catalog_query'
recipe.add(query_catalog_sumss, 'query_source_catalog',
{
'centre': centre,
'width_im': config[key]['image_width'],
'cat_name': catalog_name,
'catalog_table': catalog_tab,
},
input=pipeline.input,
output=pipeline.output,
label='Catalog pulled')
# read catalog table
fields_dir = pipeline.input + '/fields/'
step = 'prepare' # set directories
recipe.add(set_mosaic_files, step,
{
'catalog_table': catalog_tab,
'mask_dir': mask_dir,
'fields_dir': fields_dir,
},
input=pipeline.input,
output=pipeline.output,
label='Preparing folders')
step = 'mosaic'
recipe.add('cab/montage', step,
{
'input_dir': 'masking/formosaic' + ':output',
},
input=pipeline.input,
output=pipeline.output,
label='Mosaicing catalog')
step = 'cleanup' # cleanup
recipe.add(move_files, step,
{
'catalog_name': catalog_name,
'mask_dir': mask_dir,
},
input=pipeline.input,
output=pipeline.output,
label='Cleanup folders')
elif catalog_name == 'NVSS':
if pipeline.enable_task(config, 'catalog_query'):
key = 'catalog_query'
catalog_tab = mask_dir + catalog_name + '_' + pipeline.prefix + '_catalog.txt'
recipe.add(query_catalog_nvss, 'query-nvss',
{
'centre': centre,
'width_im': config[key]['image_width'],
'cat_name': catalog_name,
'thresh': config[key]['nvss_thr'],
'cell': mask_cell,
'imsize': mask_imsize,
'obs_freq': config['pbcorr']['frequency'],
'catalog_table': catalog_tab,
},
input=pipeline.input,
output=pipeline.output,
label='Catalog pulled')
if pipeline.enable_task(config, 'make_mask'):
if pipeline.enable_task(config, 'merge_with_extended') == False:
cat_mask = final_mask
else:
cat_mask = mask_dir + '/' + config['label_out'] + '_' + str(target) + '.fits'
catalog_tab = mask_dir + catalog_name + '_' + pipeline.prefix + '_catalog.txt'
recipe.add(make_mask_nvss, 'make_mask-nvss',
{
"catalog_table": catalog_tab,
"centre": centre,
'cell': mask_cell,
'imsize': mask_imsize,
"mask": cat_mask,
},
input=pipeline.input,
output=pipeline.output,
label='Mask from catalog')
if pipeline.enable_task(config, 'make_mask') and catalog_name == 'SUMSS':
if pipeline.enable_task(config, 'pbcorr'):
recipe.add(build_beam, 'make_pb',
{
'obs_freq': config['pbcorr']['frequency'],
'centre': centre,
'cell': mask_cell,
'imsize': mask_imsize,
'out_beam': mask_dir + '/gauss_pbeam.fits',
},
input=pipeline.input,
output=pipeline.output)
mosaic = 'masking/' + catalog_name + '_mosaic.fits'
mosaic_casa = 'masking/mosaic_casa.image'
beam = 'masking/gauss_pbeam.fits'
beam_casa = 'masking/gauss_pbeam.image'
mosaic_regrid_casa = 'masking/mosaic_regrid.image'
mosaic_regrid = 'masking/' + catalog_name + '_mosaic_regrid.fits'
mosaic_pbcorr = 'masking/' + catalog_name + '_mosaic_pbcorr.fits'
step = 'import-mosaic'
recipe.add('cab/casa_importfits', step,
{
"fitsimage": mosaic + ':output',
"imagename": mosaic_casa + ":output",
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Mosaic in casa format')
step = 'import-beam'
recipe.add('cab/casa_importfits', step,
{
"fitsimage": beam + ':output',
"imagename": beam_casa,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Beam in casa format')
step = 'regrid'
recipe.add('cab/casa_imregrid', step,
{
"template": beam_casa + ':output',
"imagename": mosaic_casa + ':output',
"output": mosaic_regrid_casa,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Regridding mosaic to size and projection of dirty image')
step = 'export'
recipe.add('cab/casa_exportfits', step,
{
"fitsimage": mosaic_regrid + ':output',
"imagename": mosaic_regrid_casa + ':output',
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Extracted regridded mosaic')
recipe.add(pbcorr, 'correct_pb',
{
"mosaic_regrid": pipeline.output + '/' + mosaic_regrid,
"mosaic_pbcorr": pipeline.output + '/' + mosaic_pbcorr,
"beam": pipeline.output + '/' + beam,
},
input=pipeline.input,
output=pipeline.output,
label='Correcting mosaic for primary beam')
if config['make_mask']['input_image'] == 'pbcorr':
in_image = 'masking/' + catalog_name + '_mosaic_pbcorr.fits'
else:
in_image = 'masking/' + config['make_mask']['input_image']
if config['make_mask']['mask_method'] == 'thresh':
if pipeline.enable_task(config, 'merge_with_extended') == False:
cat_mask = final_mask
else:
cat_mask = mask_dir + '/' + catalog_name + '_mask.fits'
recipe.add(make_mask, 'make_mask-mosaic',
{
"mosaic_pbcorr": pipeline.output + '/' + in_image,
"mask": cat_mask,
"contour": config['make_mask']['thr_lev'],
},
input=pipeline.input,
output=pipeline.output,
label='Mask done')
elif config['make_mask']['mask_method'] == 'sofia':
imagename = in_image
def_kernels = [[3, 3, 0, 'b'], [6, 6, 0, 'b'], [15, 15, 0, 'b']]
image_opts = {
"import.inFile": imagename,
"steps.doFlag": True,
"steps.doScaleNoise": True,
"steps.doSCfind": True,
"steps.doMerge": False,
"steps.doReliability": False,
"steps.doParameterise": False,
"steps.doWriteMask": True,
"steps.doMom0": False,
"steps.doMom1": False,
"steps.doWriteCat": False,
"SCfind.kernelUnit": 'pixel',
"SCfind.kernels": def_kernels,
"SCfind.threshold": config['make_mask']['thr_lev'],
"SCfind.rmsMode": 'mad',
"SCfind.edgeMode": 'constant',
"SCfind.fluxRange": 'all',
"scaleNoise.statistic": 'mad',
"scaleNoise.windowSpatial": config['make_mask']['scale_noise_window'],
"scaleNoise.windowSpectral": 1,
"scaleNoise.method": 'local',
"scaleNoise.fluxRange": 'all',
"scaleNoise.scaleX": True,
"scaleNoise.scaleY": True,
"scaleNoise.scaleZ": False,
"writeCat.basename": str(config['label_out']),
}
step = "make_mask-sofia"
recipe.add('cab/sofia', 'make_mask-sofia',
image_opts,
input=pipeline.output,
output=pipeline.output + '/masking/',
label='{0:s}:: Make SoFiA mask'.format(step))
recipe.add(change_header, 'extract-mosaic',
{
"filename": final_mask,
"headfile": pipeline.output + '/' + imagename,
"copy_head": True,
},
input=pipeline.output,
output=pipeline.output,
label='Extracted regridded mosaic')
if pipeline.enable_task(config, 'merge_with_extended'):
key = 'merge_with_extended'
ext_name = config[key]['extended_source_map']
extended = 'fields/' + ext_name
extended_casa = 'masking/extended.image'
extended_regrid_casa = 'masking/Fornaxa_vla_regrid.image'
extended_regrid = 'masking/Fornaxa_vla_regrid.fits'
beam = 'masking/gauss_pbeam.fits'
if os.path.exists(pipeline.output + '/' + beam) == False:
recipe.add(build_beam, 'build_pb',
{
'obs_freq': config['pbcorr']['frequency'],
'centre': centre,
'cell': mask_cell,
'imsize': mask_imsize,
'out_beam': mask_dir + '/gauss_pbeam.fits',
},
input=pipeline.input,
output=pipeline.output)
beam_casa = 'masking/gauss_pbeam.image'
if os.path.exists(pipeline.output + '/' + beam_casa) == False:
recipe.add('cab/casa_importfits', 'import-pb',
{
"fitsimage": beam + ':output',
"imagename": beam_casa,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Importing beam for extended mask')
ext_name_root = ext_name.split('.')[0]
extended_pbcorr = 'masking/' + ext_name_root + '_pbcorr.fits'
extended_mask = '/masking/' + ext_name_root + '_mask.fits'
recipe.add('cab/casa_importfits', 'import-mosaic',
{
"fitsimage": extended + ":input",
"imagename": extended_casa + ":output",
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Importing extended mask')
recipe.add('cab/casa_imregrid', 'regrid',
{
"imagename": extended_casa + ":output",
"template": beam_casa + ":output",
"output": extended_regrid_casa,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Regridding extended mask')
recipe.add('cab/casa_exportfits', 'export',
{
"fitsimage": extended_regrid + ":output",
"imagename": extended_regrid_casa + ":output",
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Exporting extended mask')
recipe.add(pbcorr, 'correct_pb',
{
"mosaic_regrid": pipeline.output + '/' + extended_regrid,
"mosaic_pbcorr": pipeline.output + '/' + extended_pbcorr,
"beam": pipeline.output + '/' + beam,
},
input=pipeline.input,
output=pipeline.output,
label='Correcting mask for primary beam')
if config['merge_with_extended']['mask_method'] == 'thresh':
recipe.add(make_mask, 'make_mask-extend',
{
"mosaic_pbcorr": pipeline.output + '/' + extended_pbcorr,
"mask": pipeline.output + extended_mask,
"contour": config['merge_with_extended']['thr_lev'],
},
input=pipeline.input,
output=pipeline.output,
label='Mask done')
cat_mask = config['mask_prefix']['mask_method']
recipe.add(merge_masks, 'make_mask-merge', # 'Merging VLA Fornax into catalog mask',
{
"extended_mask": pipeline.output + extended_mask,
"catalog_mask": cat_mask,
"end_mask": final_mask,
},
input=pipeline.input,
output=pipeline.output,
label='Total mask done')
recipe.add(cleanup_mosaic_files, 'cleanup',
{
'catalog_name': catalog_name,
'mask_dir': mask_dir,
},
input=pipeline.input,
output=pipeline.output,
label='Cleanup folders')
| 29,775 | 36.73891 | 110 | py |
caracal | caracal-master/caracal/workers/getdata_worker.py | # -*- coding: future_fstrings -*-
import os
import sys
import subprocess
import itertools
import caracal
import stimela.dismissable as sdm
import warnings
NAME = "Get Data"
LABEL = 'getdata'
def worker(pipeline, recipe, config):
pipeline.init_names(config["dataid"])
if pipeline.nobs == 0:
raise RuntimeError(f'No MS files matching any of {pipeline.dataid} were found at {pipeline.rawdatadir}. '
'Please make sure that general: msdir , getdata: dataid, and (optionally) general: '
'rawdatadir are set properly.')
for i, msname in enumerate(pipeline.msnames):
if pipeline.enable_task(config, 'untar'):
step = 'untar-{:d}'.format(i)
tar_options = config['untar']['tar_options']
# Function to untar Ms from .tar file
def untar(ms):
mspath = os.path.abspath(pipeline.rawdatadir)
subprocess.check_call(['tar', tar_options,
os.path.join(mspath, ms + '.tar'),
'-C', mspath])
# add function to recipe
recipe.add(untar, step,
{
"ms": msname,
},
label='{0:s}:: Get MS from tarbal ms={1:s}'.format(step, msname),
output=pipeline.rawdatadir,
input=pipeline.input)
| 1,463 | 35.6 | 113 | py |
caracal | caracal-master/caracal/workers/__init__.py | 0 | 0 | 0 | py | |
caracal | caracal-master/caracal/workers/polcal_worker.py | # -*- coding: future_fstrings -*-
from collections import OrderedDict
import pickle
import sys
import os
import caracal.dispatch_crew.utils as utils
import caracal
import yaml
import stimela.dismissable as sdm
from caracal.workers.utils import manage_flagsets as manflags
from caracal.workers.utils import manage_antennas as manants
from caracal.workers.utils import callibs
import copy
import re
import json
import glob
import glob
import shutil
import numpy
from casacore.tables import table as tb
NAME = "Polarization calibration"
LABEL = 'polcal'
def get_dir_path(string, pipeline):
return string.split(pipeline.output)[1][1:]
def exists(outdir, path):
_path = os.path.join(outdir, path)
return os.path.exists(_path)
def scan_length(msinfo, field):
idx = utils.get_field_id(msinfo, field)[0]
return float(utils.field_observation_length(msinfo, field)) / len(msinfo['SCAN'][str(idx)])
def xcal_model_fcal_leak(msname, msinfo, prefix_msbase, recipe, config, pipeline, i, prefix, ref, polarized_calibrators, caltablelist,
gainfieldlist, interplist, calwtlist, applylist, leak_caltablelist,
leak_gainfieldlist, leak_interplist, leak_calwtlist, leak_applylist):
field = ",".join(getattr(pipeline, config["pol_calib"])[i])
leak_field = ",".join(getattr(pipeline, config["leakage_calib"])[i])
freqsel = config.get("freqsel")
gain_solint = config.get("gain_solint")
time_solint = config.get("time_solint")
gaintables = [prefix + '.Gpol1', prefix + '.Kcrs', prefix + '.Xf', prefix + '.Df']
interps = ['linear', 'nearest', 'nearest', 'nearest']
fields = [field, '', '', '']
calwts = [True, False, False, False]
applyfields = [field, '', '', '']
gfields = [field, field, field, leak_field]
terms = ['G', 'KCROSS', 'Xf', 'Df']
if freqsel != '':
gaintables = [prefix + '.Gpol1', prefix + '.Kcrs', prefix + '.Xref', prefix + '.Xf', prefix + '.Dref',
prefix + '.Df']
interps = ['linear', 'nearest', 'nearest', 'nearest', 'nearest', 'nearest']
fields = [field, '', '', '', '', '']
calwts = [True, False, False, False, False, False]
applyfields = [field, '', '', '', '', '']
gfields = [field, field, field, field, leak_field, leak_field]
terms = ['G', 'KCROSS', 'Xref', 'Xf', 'Dref', 'Df']
docal = config['reuse_existing_tables']
if docal:
for cal in gaintables:
if not os.path.exists(os.path.join(pipeline.caltables, cal)):
caracal.log.info("No polcal table found in %s" % str(os.path.join(pipeline.caltables, cal)))
docal = False
if not docal:
if pipeline.enable_task(config, 'set_model_leakage'):
if config['set_model_leakage']['no_verify']:
opts = {
"vis": msname,
"field": leak_field,
"scalebychan": True,
"usescratch": True,
}
else:
modelsky = utils.find_in_native_calibrators(msinfo, leak_field, mode='sky')
modelpoint = utils.find_in_native_calibrators(msinfo, leak_field, mode='mod')
standard = utils.find_in_casa_calibrators(msinfo, leak_field)
if config['set_model_leakage']['meerkat_skymodel'] and modelsky:
# use local sky model of calibrator field if exists
opts = {
"skymodel": modelsky,
"msname": msname,
"field-id": utils.get_field_id(msinfo, leak_field)[0],
"threads": config["set_model_leakage"]['threads'],
"mode": "simulate",
"tile-size": config["set_model_leakage"]["tile_size"],
"column": "MODEL_DATA",
}
elif modelpoint: # spectral model if specified in our standard
opts = {
"vis": msname,
"field": leak_field,
"standard": "manual",
"fluxdensity": modelpoint['I'],
"reffreq": '{0:f}GHz'.format(modelpoint['ref'] / 1e9),
"spix": [modelpoint[a] for a in 'abcd'],
"scalebychan": True,
"usescratch": True,
}
elif standard: # NRAO model otherwise
opts = {
"vis": msname,
"field": leak_field,
"standard": standard,
"usescratch": True,
"scalebychan": True,
}
else:
raise RuntimeError('The flux calibrator field "{}" could not be '
'found in our database or in the CASA NRAO database'.format(leak_field))
step = 'set_model_cal-{0:d}'.format(i)
cabtouse = 'cab/casa_setjy'
recipe.add(cabtouse if "skymodel" not in opts else 'cab/simulator', step,
opts,
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Set jansky ms={1:s}'.format(step, msname))
recipe.add("cab/casa_setjy", "set_model_%d" % 0,
{
"msname": msname,
"usescratch": True,
"field": field,
"standard": polarized_calibrators[field]["standard"],
"fluxdensity": polarized_calibrators[field]["fluxdensity"],
"spix": polarized_calibrators[field]["spix"],
"reffreq": polarized_calibrators[field]["reffreq"],
"polindex": polarized_calibrators[field]["polindex"],
"polangle": polarized_calibrators[field]["polangle"],
"rotmeas": polarized_calibrators[field]["rotmeas"],
},
input=pipeline.input, output=pipeline.output,
label="set_model_%d" % 0)
gain_opts = {
"vis": msname,
"caltable": prefix + '.Gpol1:output',
"field": field,
"uvrange": config["uvrange"],
"refant": ref,
"solint": gain_solint,
"combine": "",
"parang": True,
"gaintype": "G",
"calmode": "p",
"spw": '',
}
if caltablelist:
gain_opts.update({
"gaintable": ["%s:output" % ct for ct in caltablelist],
"gainfield": gainfieldlist,
"interp": interplist,
})
# Phaseup diagonal of crosshand cal if available
recipe.add("cab/casa_gaincal", "gain_xcal",
gain_opts,
input=pipeline.input, output=pipeline.caltables,
label="gain_xcal")
tmp_gtab = caltablelist + [prefix + '.Gpol1']
tmp_field = gainfieldlist + ['']
tmp_interp = interplist + ['linear']
recipe.add("cab/casa_gaincal", "crosshand_delay",
{
"vis": msname,
"caltable": prefix + '.Kcrs:output',
"field": field,
"uvrange": config["uvrange"],
"refant": ref,
"solint": time_solint,
"combine": "",
"parang": True,
"gaintype": "KCROSS",
"spw": '',
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="crosshand_delay")
# Solve for the absolute angle (phase) between the feeds
# Solve first in a subband free of RFIs and then over the whole bw
tmp_gtab = caltablelist + [prefix + '.Gpol1', prefix + '.Kcrs']
tmp_field = gainfieldlist + ['', '']
tmp_interp = interplist + ['linear', 'nearest']
if freqsel != '':
recipe.add("cab/casa_polcal", "crosshand_phase_ref",
{
"vis": msname,
"caltable": prefix + '.Xref:output',
"field": field,
"uvrange": config["uvrange"],
"solint": time_solint,
"combine": "",
"poltype": "Xf",
"refant": ref,
"spw": freqsel,
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="crosshand_phase_ref")
tmp_gtab = caltablelist + [prefix + '.Gpol1', prefix + '.Kcrs', prefix + '.Xref']
tmp_field = gainfieldlist + ['', '', '']
tmp_interp = interplist + ['linear', 'nearest', 'nearest']
recipe.add("cab/casa_polcal", "crosshand_phase_freq",
{
"vis": msname,
"caltable": prefix + '.Xf:output',
"field": field,
"uvrange": config["uvrange"],
"solint": time_solint,
"combine": "scan",
"poltype": "Xf",
"refant": ref,
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="crosshand_phase_freq")
# Smooth the solution
recipe.add("cab/casa_flagdata", "flag_phase",
{
"vis": prefix + '.Xf:msfile',
"mode": 'tfcrop',
"ntime": '60s',
"combinescans": True,
"datacolumn": 'CPARAM',
"usewindowstats": "both",
"flagbackup": False,
},
input=pipeline.input, output=pipeline.caltables, msdir=pipeline.caltables,
label="flag_phase_freq")
# Solve for leakages (off-diagonal terms) using the unpolarized source
# - first remove the DC of the frequency response and combine scans
# if necessary to achieve desired SNR
tmp_gtab = leak_caltablelist + [prefix + '.Kcrs', prefix + '.Xf']
tmp_field = leak_gainfieldlist + ['', '']
tmp_interp = leak_interplist + ['nearest', 'nearest']
if freqsel != '':
tmp_gtab = leak_caltablelist + [prefix + '.Kcrs', prefix + '.Xref', prefix + '.Xf']
tmp_field = leak_gainfieldlist + ['', '', '']
tmp_interp = leak_interplist + ['nearest', 'nearest', 'nearest']
recipe.add("cab/casa_polcal", "leakage_ref",
{
"vis": msname,
"caltable": prefix + '.Dref:output',
"field": leak_field,
"uvrange": config["uvrange"],
"solint": time_solint,
"combine": "",
"poltype": "D",
"refant": ref,
"spw": freqsel,
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="leakage_ref")
tmp_gtab = leak_caltablelist + [prefix + '.Kcrs', prefix + '.Xref', prefix + '.Xf', prefix + '.Dref']
tmp_field = leak_gainfieldlist + ['', '', '', '']
tmp_interp = leak_interplist + ['nearest', 'nearest', 'nearest', 'nearest']
recipe.add("cab/casa_polcal", "leakage_freq",
{
"vis": msname,
"caltable": prefix + '.Df:output',
"field": leak_field,
"uvrange": config["uvrange"],
"solint": time_solint,
"combine": "scan",
"poltype": "Df",
"refant": ref,
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="leakage_freq")
if config['plotgains']:
plotdir = os.path.join(pipeline.diagnostic_plots, "polcal")
if not os.path.exists(plotdir):
os.mkdir(plotdir)
plotgains(recipe, pipeline, plotdir, leak_field, prefix + '.Df', i, 'Df')
recipe.run()
recipe.jobs = []
if os.path.exists(os.path.join(plotdir, prefix + '.Df.html')):
os.rename(os.path.join(plotdir, prefix + '.Df.html'),
os.path.join(plotdir, prefix + '.Df_before_flag.html'))
if os.path.exists(os.path.join(plotdir, prefix + '.Df.png')):
os.rename(os.path.join(plotdir, prefix + '.Df.png'),
os.path.join(plotdir, prefix + '.Df_before_flag.png'))
# Clip solutions
recipe.add("cab/casa_flagdata", "flag_leakage",
{
"vis": prefix + '.Df:msfile',
"mode": 'clip',
"clipminmax": [-0.6, 0.6],
"datacolumn": 'CPARAM',
"flagbackup": False,
},
input=pipeline.input, output=pipeline.caltables, msdir=pipeline.caltables,
label="flag_leakage")
recipe.run()
recipe.jobs = []
else:
caracal.log.info("Reusing existing tables as requested")
applycal_recipes = callibs.new_callib()
for _gt, _fldmap, _interp, _calwt, _field in zip(gaintables, fields, interps, calwts, applyfields):
callibs.add_callib_recipe(applycal_recipes, _gt, _interp, _fldmap, calwt=_calwt, field=_field)
pipeline.save_callib(applycal_recipes, prefix)
if config['plotgains']:
plotdir = os.path.join(pipeline.diagnostic_plots, "polcal")
if not os.path.exists(plotdir):
os.mkdir(plotdir)
for ix, gt in enumerate(gfields):
plotgains(recipe, pipeline, plotdir, gfields[ix], gaintables[ix], i, terms[ix])
if config['apply_pcal']:
for ff in config["applyto"]:
fld = ",".join(getattr(pipeline, ff)[i])
_, (caltablelist, gainfieldlist, interplist, calwtlist, applylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
config['otfcal']['callib'],
config['otfcal']['label_cal'], [fld],
default_interpolation_types=config['otfcal']['interpolation'])
_, (pcaltablelist, pgainfieldlist, pinterplist, pcalwtlist, papplylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
'',
config['label_cal'], [fld])
pcal = caltablelist + pcaltablelist
pgain = gainfieldlist + pgainfieldlist
pinter = interplist + pinterplist
pcalwt = calwtlist + pcalwtlist
recipe.add("cab/casa_applycal", "apply_caltables_" + str(ff),
{
"vis": msname,
"field": fld,
"calwt": pcalwt,
"gaintable": ["%s:output" % ct for ct in pcal],
"gainfield": pgain,
"interp": pinter,
"parang": True,
},
input=pipeline.input, output=pipeline.caltables,
label="Apply_caltables_" + str(ff))
def xcal_model_xcal_leak(msname, msinfo, prefix_msbase, recipe, config, pipeline, i, prefix, ref, polarized_calibrators, caltablelist,
gainfieldlist, interplist, calwtlist, applylist):
field = ",".join(getattr(pipeline, config["pol_calib"])[i])
scandur = scan_length(msinfo, field)
freqsel = config.get("freqsel")
gain_solint = config.get("gain_solint")
time_solint = config.get("time_solint")
gaintables = [prefix + '.Gpol1', prefix + '.Kcrs', prefix + '.Xf', prefix + '.Df0gen']
interps = ['linear', 'nearest', 'nearest', 'nearest']
fields = [field, '', '', '']
calwts = [True, False, False, False]
applyfields = [field, '', '', '']
gfields = [field, field, field, field]
terms = ['G', 'KCROSS', 'Xf', 'Df0gen']
if freqsel != '':
gaintables = [prefix + '.Gpol1', prefix + '.Kcrs', prefix + '.Xref', prefix + '.Xf', prefix + '.Df0gen']
interps = ['linear', 'nearest', 'nearest', 'nearest', 'nearest']
fields = [field, '', '', '', '']
calwts = [True, False, False, False, False]
applyfields = [field, '', '', '', '']
gfields = [field, field, field, field, field]
terms = ['G', 'KCROSS', 'Xref', 'Xf', 'Df0gen']
docal = config['reuse_existing_tables']
if docal:
for cal in gaintables:
if not os.path.exists(os.path.join(pipeline.caltables, cal)):
caracal.log.info("No polcal table found in %s" % str(os.path.join(pipeline.caltables, cal)))
docal = False
if not docal:
msdict = pipeline.get_msinfo(msname)
chfr = msdict['SPW']['CHAN_FREQ']
firstchanfreq = [ss[0] for ss in chfr]
lastchanfreq = [ss[-1] for ss in chfr]
meanchanfreq = (firstchanfreq[0] + lastchanfreq[0]) / 2.0 / 1.e9
normfreq = (meanchanfreq / float(polarized_calibrators[field]["reffreq"][:-3]))
spix = (polarized_calibrators[field]["spix"])
index = 0
polindex = (polarized_calibrators[field]["polindex"][0])
polangle = (polarized_calibrators[field]["polangle"][0])
for n in range(0, len(spix)):
index += numpy.sum(spix[n] * pow(numpy.log(normfreq), n))
c = numpy.sqrt(pow(numpy.tan(2 * polangle), 2) + 1)
istokes = (polarized_calibrators[field]["fluxdensity"][0]) * pow(normfreq, index)
qstokes = polindex * istokes / c
ustokes = polindex * istokes * numpy.tan(2 * polangle) / c
vstokes = 0
S = [istokes, qstokes, ustokes, vstokes]
recipe.add("cab/casa_setjy", "set_model_%d" % 0,
{
"msname": msname,
"usescratch": True,
"field": field,
"standard": polarized_calibrators[field]["standard"],
"fluxdensity": polarized_calibrators[field]["fluxdensity"],
"spix": spix,
"reffreq": polarized_calibrators[field]["reffreq"],
"polindex": polindex,
"polangle": polangle,
},
input=pipeline.input, output=pipeline.output,
label="set_model_%d" % 0)
gain_opts = {
"vis": msname,
"caltable": prefix + '.Gpol1:output',
"field": field,
"uvrange": config["uvrange"],
"refant": ref,
"solint": gain_solint,
"combine": "",
"parang": True,
"gaintype": "G",
"calmode": "ap",
"spw": '',
}
if caltablelist:
gain_opts.update({
"gaintable": ["%s:output" % ct for ct in caltablelist],
"gainfield": gainfieldlist,
"interp": interplist,
})
recipe.add("cab/casa_gaincal", "gain_xcal",
gain_opts,
input=pipeline.input, output=pipeline.caltables,
label="gain_xcal")
tmp_gtab = caltablelist + [prefix + '.Gpol1']
tmp_field = gainfieldlist + ['']
tmp_interp = interplist + ['linear']
recipe.add("cab/casa_gaincal", "crosshand_delay",
{
"vis": msname,
"caltable": prefix + '.Kcrs:output',
"field": field,
"uvrange": config["uvrange"],
"refant": ref,
"solint": time_solint,
"combine": "",
"parang": True,
"gaintype": "KCROSS",
"spw": '',
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="crosshand_delay")
tmp_gtab = caltablelist + [prefix + '.Gpol1', prefix + '.Kcrs']
tmp_field = gainfieldlist + ['', '']
tmp_interp = interplist + ['linear', 'nearest']
if freqsel != '':
recipe.add("cab/casa_polcal", "crosshand_phase_ref",
{
"vis": msname,
"caltable": prefix + '.Xref:output',
"field": field,
"uvrange": config["uvrange"],
"solint": time_solint,
"combine": "",
"poltype": "Xf",
"refant": ref,
"spw": freqsel,
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="crosshand_phase_ref")
tmp_gtab = caltablelist + [prefix + '.Gpol1', prefix + '.Kcrs', prefix + '.Xref']
tmp_field = gainfieldlist + ['', '', '']
tmp_interp = interplist + ['linear', 'nearest', 'nearest']
recipe.add("cab/casa_polcal", "crosshand_phase_freq",
{
"vis": msname,
"caltable": prefix + '.Xf:output',
"field": field,
"uvrange": config["uvrange"],
"solint": time_solint,
"combine": "scan",
"poltype": "Xf",
"refant": ref,
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="crosshand_phase_freq")
recipe.add("cab/casa_flagdata", "flag_phase",
{
"vis": prefix + '.Xf:msfile',
"mode": 'tfcrop',
"ntime": '60s',
"combinescans": True,
"datacolumn": 'CPARAM',
"usewindowstats": "both",
"flagbackup": False,
},
input=pipeline.input, output=pipeline.caltables, msdir=pipeline.caltables,
label="flag_phase_freq")
tmp_gtab = caltablelist + [prefix + '.Gpol1', prefix + '.Kcrs', prefix + '.Xf']
tmp_field = gainfieldlist + ['', '', '']
tmp_interp = interplist + ['linear', 'nearest', 'nearest']
if freqsel != '':
tmp_gtab = caltablelist + [prefix + '.Gpol1', prefix + '.Kcrs', prefix + '.Xref', prefix + '.Xf']
tmp_field = gainfieldlist + ['', '', '', '']
tmp_interp = interplist + ['linear', 'nearest', 'nearest', 'nearest']
recipe.add("cab/casa_polcal", "leakage",
{
"vis": msname,
"caltable": prefix + '.Df0gen:output',
"field": field,
"uvrange": config["uvrange"],
"solint": time_solint,
"spw": '',
"combine": 'obs,scan',
"preavg": scandur,
"poltype": 'Dflls',
"refant": '',
"smodel": S,
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="leakage")
if config['plotgains']:
plotdir = os.path.join(pipeline.diagnostic_plots, "polcal")
if not os.path.exists(plotdir):
os.mkdir(plotdir)
plotgains(recipe, pipeline, plotdir, field, prefix + '.Df0gen', i, 'Df0gen')
recipe.run()
recipe.jobs = []
if os.path.exists(os.path.join(plotdir, prefix + '.Df0gen.html')):
os.rename(os.path.join(plotdir, prefix + '.Df0gen.html'),
os.path.join(plotdir, prefix + '.Df0gen_before_flag.html'))
if os.path.exists(os.path.join(plotdir, prefix + '.Df0gen.png')):
os.rename(os.path.join(plotdir, prefix + '.Df0gen.png'),
os.path.join(plotdir, prefix + '.Df0gen_before_flag.png'))
# Clip solutions
recipe.add("cab/casa_flagdata", "flag_leakage",
{
"vis": prefix + '.Df0gen:msfile',
"mode": 'clip',
"clipminmax": [-0.9, 0.9],
"datacolumn": 'CPARAM',
"flagbackup": False,
},
input=pipeline.input, output=pipeline.caltables, msdir=pipeline.caltables,
label="flag_leakage")
else:
caracal.log.info("Reusing existing tables as requested")
applycal_recipes = callibs.new_callib()
for _gt, _fldmap, _interp, _calwt, _field in zip(gaintables, fields, interps, calwts, applyfields):
callibs.add_callib_recipe(applycal_recipes, _gt, _interp, _fldmap, calwt=_calwt, field=_field)
pipeline.save_callib(applycal_recipes, prefix)
if config['plotgains']:
plotdir = os.path.join(pipeline.diagnostic_plots, "polcal")
if not os.path.exists(plotdir):
os.mkdir(plotdir)
for ix, gt in enumerate(gfields):
plotgains(recipe, pipeline, plotdir, gfields[ix], gaintables[ix], i, terms[ix])
if config['apply_pcal']:
for ff in config["applyto"]:
fld = ",".join(getattr(pipeline, ff)[i])
_, (caltablelist, gainfieldlist, interplist, calwtlist, applylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
config['otfcal']['callib'],
config['otfcal']['label_cal'], [fld],
default_interpolation_types=config['otfcal']['interpolation'])
_, (pcaltablelist, pgainfieldlist, pinterplist, pcalwtlist, papplylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
'',
config['label_cal'], [fld])
pcal = caltablelist + pcaltablelist
pgain = gainfieldlist + pgainfieldlist
pinter = interplist + pinterplist
pcalwt = calwtlist + pcalwtlist
recipe.add("cab/casa_applycal", "apply_caltables_" + str(ff),
{
"vis": msname,
"field": fld,
"calwt": pcalwt,
"gaintable": ["%s:output" % ct for ct in pcal],
"gainfield": pgain,
"interp": pinter,
"parang": True,
},
input=pipeline.input, output=pipeline.caltables,
label="Apply_caltables_" + str(ff))
def xcal_from_pa_xcal_leak(msname, msinfo, prefix_msbase, recipe, config, pipeline, i, prefix, ref, caltablelist, gainfieldlist, interplist,
calwtlist, applylist):
field = ",".join(getattr(pipeline, config["pol_calib"])[i])
scandur = scan_length(msinfo, field)
gain_solint = config.get("gain_solint")
time_solint = config.get("time_solint")
gaintables = [prefix + '.Gpol2', prefix + '.Gxyamp', prefix + '.Kcrs', prefix + '.Xfparang', prefix + '.Df0gen']
interps = ['linear', 'linear', 'nearest', 'nearest', 'nearest']
fields = ['', '', '', '', '']
calwts = [True, True, False, False, False]
applyfields = [field, ",".join(set(pipeline.fcal[i] + pipeline.bpcal[i] + pipeline.gcal[i] + pipeline.target[i])),
'', '', '']
gfields = [field, field, field, field, field]
terms = ['G', 'G', 'KCROSS', 'Xf', 'Df0gen']
docal = config['reuse_existing_tables']
if docal:
for cal in gaintables:
if not os.path.exists(os.path.join(pipeline.caltables, cal)):
caracal.log.info("No polcal table found in %s" % str(os.path.join(pipeline.caltables, cal)))
docal = False
if not docal:
gain_opts = {
"vis": msname,
"caltable": prefix + '.Gpol1:output',
"field": field,
"uvrange": config["uvrange"],
"refant": ref,
"solint": gain_solint,
"combine": "",
"parang": False,
"gaintype": 'G',
"calmode": 'ap',
"spw": '',
"refantmode": 'strict',
"smodel": ['1', '0', '0', '0'],
}
if caltablelist:
gain_opts.update({
"gaintable": ["%s:output" % ct for ct in caltablelist],
"gainfield": gainfieldlist,
"interp": interplist,
})
recipe.add("cab/casa_gaincal", "gain_xcal_1",
gain_opts,
input=pipeline.input, output=pipeline.caltables,
label="gain_xcal_1")
shutil.rmtree(os.path.join(pipeline.caltables, prefix + '.Gpol1a'), ignore_errors=True)
# Extrapolate QU by fitting the gain at different PAs, save results in prefix + '_S1_from_QUfit:output'
recipe.add("cab/casa_polfromgain",
"QU_from_gain",
{
"vis": msname,
"tablein": prefix + '.Gpol1:output',
"caltable": prefix + '.Gpol1a:output',
"save_result": prefix + '_S1_from_QUfit:output',
},
input=pipeline.input, output=pipeline.caltables,
label="QU_from_gain")
recipe.run()
recipe.jobs = []
# We search for the scan where the polarization signal is minimum in XX and YY
# (i.e., maximum in XY and YX):
with tb(os.path.join(pipeline.caltables, prefix + '.Gpol1')) as t:
scans = t.getcol('SCAN_NUMBER')
gains = numpy.squeeze(t.getcol('CPARAM'))
t.close()
scanlist = numpy.array(list(set(scans)))
ratios = numpy.zeros(len(scanlist))
for si, s in enumerate(scanlist):
filt = scans == s
ratio = numpy.sqrt(
numpy.average(numpy.power(numpy.abs(gains[filt, 0]) / numpy.abs(gains[filt, 1]) - 1.0, 2.)))
ratios[si] = ratio
bestscidx = numpy.argmin(ratios)
bestscan = scanlist[bestscidx]
caracal.log.info('Scan with highest expected X-Y signal: ' + str(bestscan))
recipe.run()
recipe.jobs = []
# Kcross
tmp_gtab = caltablelist + [prefix + '.Gpol1']
tmp_field = gainfieldlist + ['']
tmp_interp = interplist + ['linear']
recipe.add("cab/casa_gaincal", "crosshand_delay",
{
"vis": msname,
"caltable": prefix + '.Kcrs:output',
"field": field,
"uvrange": config["uvrange"],
"refant": ref,
"refantmode": 'strict',
"solint": time_solint,
"scan": str(bestscan),
"gaintype": 'KCROSS',
"smodel": ['1', '0', '1', '0'],
"selectdata": True,
"spw": '',
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="crosshand_delay")
recipe.run()
recipe.jobs = []
# Read the smodel=[1,Q,U,0] of xcal from prefix + '_S1_from_QUfit'
if os.path.isfile(pipeline.output + '/caltables/' + prefix + '_S1_from_QUfit'):
with open(pipeline.output + '/caltables/' + prefix + '_S1_from_QUfit', 'rb') as stdr:
S1 = pickle.load(stdr, encoding='latin1')
S1 = S1[field]['SpwAve']
caracal.log.info("First [I,Q,U,V] fitted model (with I=1 and Q, U fractional): %s" % S1)
else:
raise RuntimeError("Cannot find S1")
# Calibrate the abs phase and a better smodel for xcal, saved in prefix + '_S2_from_polcal'
tmp_gtab = caltablelist + [prefix + '.Gpol1', prefix + '.Kcrs']
tmp_field = gainfieldlist + ['', '']
tmp_interp = interplist + ['linear', 'nearest']
recipe.add("cab/casa_polcal", "crosshand_phase_QU_fit",
{
"vis": msname,
"caltable": prefix + '.Xfparang:output',
"field": field,
"uvrange": config["uvrange"],
"spw": '',
"poltype": 'Xfparang+QU',
"solint": time_solint,
"combine": 'scan,obs',
"preavg": scandur,
"smodel": S1,
"save_result": prefix + '_S2_from_polcal:output',
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="crosshand_phase_QU_fit")
# Smooth the solutions
recipe.add("cab/casa_flagdata", "flag_phase",
{
"vis": prefix + '.Xfparang:msfile',
"mode": 'tfcrop',
"ntime": '60s',
"combinescans": True,
"datacolumn": 'CPARAM',
"usewindowstats": "both",
"flagbackup": False,
},
input=pipeline.input, output=pipeline.caltables, msdir=pipeline.caltables,
label="flag_phase_freq")
recipe.run()
recipe.jobs = []
# Read the new xcal smodel
if os.path.isfile(pipeline.output + '/caltables/' + prefix + '_S2_from_polcal'):
with open(pipeline.output + '/caltables/' + prefix + '_S2_from_polcal', 'rb') as stdr:
S2 = pickle.load(stdr, encoding='latin1')
S2 = S2[field]['SpwAve'].tolist()
caracal.log.info("Second [I,Q,U,V] fitted model (with I=1 and Q, U fractional): %s" % S2)
else:
raise RuntimeError("Cannot find " + pipeline.output + "/caltables/" + prefix + "_S2_from_polcal")
# Re-calibrate the gain amp and phase of xcal assuming the last smodel
gain2_opts = {
"vis": msname,
"caltable": prefix + '.Gpol2:output',
"field": field,
"uvrange": config["uvrange"],
"refant": ref,
"solint": gain_solint,
"combine": "",
"parang": True,
"gaintype": 'G',
"calmode": 'ap',
"spw": '',
"refantmode": 'strict',
"smodel": S2,
}
if caltablelist:
gain2_opts.update({
"gaintable": ["%s:output" % ct for ct in caltablelist],
"gainfield": gainfieldlist,
"interp": interplist,
})
recipe.add("cab/casa_gaincal", "gain_xcal_2",
gain2_opts,
input=pipeline.input, output=pipeline.caltables,
label="gain_xcal_2")
# LEAKAGE
tmp_gtab = caltablelist + [prefix + '.Gpol2', prefix + '.Kcrs', prefix + '.Xfparang']
tmp_field = gainfieldlist + ['', '', '']
tmp_interp = interplist + ['linear', 'nearest', 'nearest']
recipe.add("cab/casa_polcal", "leakage",
{
"vis": msname,
"caltable": prefix + '.Df0gen:output',
"field": field,
"uvrange": config["uvrange"],
"solint": time_solint,
"spw": '',
"combine": 'obs,scan',
"preavg": scandur,
"poltype": 'Dflls',
"refant": '',
"smodel": S2,
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
},
input=pipeline.input, output=pipeline.caltables,
label="leakage")
if config['plotgains']:
plotdir = os.path.join(pipeline.diagnostic_plots, "polcal")
if not os.path.exists(plotdir):
os.mkdir(plotdir)
plotgains(recipe, pipeline, plotdir, field, prefix + '.Df0gen', i, 'Df0gen')
recipe.run()
recipe.jobs = []
if os.path.exists(os.path.join(plotdir, prefix + '.Df0gen.html')):
os.rename(os.path.join(plotdir, prefix + '.Df0gen.html'),
os.path.join(plotdir, prefix + '.Df0gen_before_flag.html'))
if os.path.exists(os.path.join(plotdir, prefix + '.Df0gen.png')):
os.rename(os.path.join(plotdir, prefix + '.Df0gen.png'),
os.path.join(plotdir, prefix + '.Df0gen_before_flag.png'))
# Clip solutions
recipe.add("cab/casa_flagdata", "flag_leakage",
{
"vis": prefix + '.Df0gen:msfile',
"mode": 'clip',
"clipminmax": [-0.9, 0.9],
"datacolumn": 'CPARAM',
"flagbackup": False,
},
input=pipeline.input, output=pipeline.caltables, msdir=pipeline.caltables,
label="flag_leakage")
# solve for global normalized gain amp (to get X/Y ratios) on xcal (TO APPLY ON TARGET)
# amp-only and normalized, so only X/Y amp ratios matter
tmp_gtab = caltablelist + [prefix + '.Kcrs', prefix + '.Xfparang', prefix + '.Df0gen']
tmp_field = gainfieldlist + ['', '', '']
tmp_interp = interplist + ['nearest', 'nearest', 'nearest']
recipe.add("cab/casa_gaincal", "norm_gain_for_target",
{
"vis": msname,
"caltable": prefix + '.Gxyamp:output',
"field": field,
"uvrange": config["uvrange"],
"refant": ref,
"refantmode": 'strict',
"solint": 'inf',
"combine": 'scan,obs',
"gaintype": 'G',
"smodel": S2,
"calmode": 'a',
"gaintable": ["%s:output" % ct for ct in tmp_gtab],
"gainfield": tmp_field,
"interp": tmp_interp,
"solnorm": True,
"parang": True,
},
input=pipeline.input, output=pipeline.caltables,
label="norm_gain_for_target")
recipe.run()
recipe.jobs = []
else:
caracal.log.info("Reusing existing tables as requested")
applycal_recipes = callibs.new_callib()
for _gt, _fldmap, _interp, _calwt, _field in zip(gaintables, fields, interps, calwts, applyfields):
callibs.add_callib_recipe(applycal_recipes, _gt, _interp, _fldmap, calwt=_calwt, field=_field)
pipeline.save_callib(applycal_recipes, prefix)
if config['plotgains']:
plotdir = os.path.join(pipeline.diagnostic_plots, "polcal")
if not os.path.exists(plotdir):
os.mkdir(plotdir)
for ix, gt in enumerate(gfields):
plotgains(recipe, pipeline, plotdir, gfields[ix], gaintables[ix], i, terms[ix])
if config['apply_pcal']:
for ff in config["applyto"]:
fld = ",".join(getattr(pipeline, ff)[i])
_, (caltablelist, gainfieldlist, interplist, calwtlist, applylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
config['otfcal']['callib'],
config['otfcal']['label_cal'], [fld],
default_interpolation_types=config['otfcal']['interpolation'])
_, (pcaltablelist, pgainfieldlist, pinterplist, pcalwtlist, papplylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
'',
config['label_cal'], [fld])
pcal = caltablelist + pcaltablelist
pgain = gainfieldlist + pgainfieldlist
pinter = interplist + pinterplist
pcalwt = calwtlist + pcalwtlist
recipe.add("cab/casa_applycal", "apply_caltables_" + str(ff),
{
"vis": msname,
"field": fld,
"calwt": pcalwt,
"gaintable": ["%s:output" % ct for ct in pcal],
"gainfield": pgain,
"interp": pinter,
"parang": True,
},
input=pipeline.input, output=pipeline.caltables,
label="Apply_caltables_" + str(ff))
def calib_only_leakage(msname, msinfo, prefix_msbase, recipe, config, pipeline, i,
prefix, ref, leak_caltablelist, leak_gainfieldlist, leak_interplist, leak_calwtlist, leak_applylist):
leak_field = ",".join(getattr(pipeline, config["leakage_calib"])[i])
time_solint = config.get("time_solint")
gaintables = [prefix + '.Df']
interps = ['nearest']
fields = ['']
calwts = [False]
applyfields = ['']
gfields = [leak_field]
terms = ['Df']
docal = config['reuse_existing_tables']
if docal:
for cal in gaintables:
if not os.path.exists(os.path.join(pipeline.caltables, cal)):
caracal.log.info("No polcal table found in %s" % str(os.path.join(pipeline.caltables, cal)))
docal = False
if not docal:
if pipeline.enable_task(config, 'set_model_leakage'):
if config['set_model_leakage']['no_verify']:
opts = {
"vis": msname,
"field": leak_field,
"scalebychan": True,
"usescratch": True,
}
else:
modelsky = utils.find_in_native_calibrators(msinfo, leak_field, mode='sky')
modelpoint = utils.find_in_native_calibrators(msinfo, leak_field, mode='mod')
standard = utils.find_in_casa_calibrators(msinfo, leak_field)
if config['set_model_leakage']['meerkat_skymodel'] and modelsky:
# use local sky model of calibrator field if exists
opts = {
"skymodel": modelsky,
"msname": msname,
"field-id": utils.get_field_id(msinfo, leak_field)[0],
"threads": config["set_model_leakage"]['threads'],
"mode": "simulate",
"tile-size": config["set_model_leakage"]["tile_size"],
"column": "MODEL_DATA",
}
elif modelpoint: # spectral model if specified in our standard
opts = {
"vis": msname,
"field": leak_field,
"standard": "manual",
"fluxdensity": modelpoint['I'],
"reffreq": '{0:f}GHz'.format(modelpoint['ref'] / 1e9),
"spix": [modelpoint[a] for a in 'abcd'],
"scalebychan": True,
"usescratch": True,
}
elif standard: # NRAO model otherwise
opts = {
"vis": msname,
"field": leak_field,
"standard": standard,
"usescratch": True,
"scalebychan": True,
}
else:
raise RuntimeError('The flux calibrator field "{}" could not be '
'found in our database or in the CASA NRAO database'.format(leak_field))
step = 'set_model_cal-{0:d}'.format(i)
cabtouse = 'cab/casa_setjy'
recipe.add(cabtouse if "skymodel" not in opts else 'cab/simulator', step,
opts,
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Set jansky ms={1:s}'.format(step, msname))
recipe.add("cab/casa_polcal", "leakage_freq",
{
"vis": msname,
"caltable": prefix + '.Df:output',
"field": leak_field,
"uvrange": config["uvrange"],
"solint": time_solint,
"combine": "scan",
"poltype": "Df",
"refant": ref,
"gaintable": ["%s:output" % ct for ct in leak_caltablelist],
"gainfield": leak_gainfieldlist,
"interp": leak_interplist,
},
input=pipeline.input, output=pipeline.caltables,
label="leakage_freq")
if config['plotgains']:
plotdir = os.path.join(pipeline.diagnostic_plots, "polcal")
if not os.path.exists(plotdir):
os.mkdir(plotdir)
plotgains(recipe, pipeline, plotdir, leak_field, prefix + '.Df', i, 'Df')
recipe.run()
recipe.jobs = []
if os.path.exists(os.path.join(plotdir, prefix + '.Df.html')):
os.rename(os.path.join(plotdir, prefix + '.Df.html'),
os.path.join(plotdir, prefix + '.Df_before_flag.html'))
if os.path.exists(os.path.join(plotdir, prefix + '.Df.png')):
os.rename(os.path.join(plotdir, prefix + '.Df.png'),
os.path.join(plotdir, prefix + '.Df_before_flag.png'))
# Clip solutions
recipe.add("cab/casa_flagdata", "flag_leakage",
{
"vis": prefix + '.Df:msfile',
"mode": 'clip',
"clipminmax": [-0.6, 0.6],
"datacolumn": 'CPARAM',
"flagbackup": False,
},
input=pipeline.input, output=pipeline.caltables, msdir=pipeline.caltables,
label="flag_leakage")
recipe.run()
recipe.jobs = []
else:
caracal.log.info("Reusing existing tables as requested")
applycal_recipes = callibs.new_callib()
for _gt, _fldmap, _interp, _calwt, _field in zip(gaintables, fields, interps, calwts, applyfields):
callibs.add_callib_recipe(applycal_recipes, _gt, _interp, _fldmap, calwt=_calwt, field=_field)
pipeline.save_callib(applycal_recipes, prefix)
if config['plotgains']:
plotdir = os.path.join(pipeline.diagnostic_plots, "polcal")
if not os.path.exists(plotdir):
os.mkdir(plotdir)
for ix, gt in enumerate(gfields):
plotgains(recipe, pipeline, plotdir, gfields[ix], gaintables[ix], i, terms[ix])
if config['apply_pcal']:
for ff in config["applyto"]:
fld = ",".join(getattr(pipeline, ff)[i])
_, (caltablelist, gainfieldlist, interplist, calwtlist, applylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
config['otfcal']['callib'],
config['otfcal']['label_cal'], [fld])
_, (pcaltablelist, pgainfieldlist, pinterplist, pcalwtlist, papplylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
'',
config['label_cal'], [fld])
pcal = caltablelist + pcaltablelist
pgain = gainfieldlist + pgainfieldlist
pinter = interplist + pinterplist
pcalwt = calwtlist + pcalwtlist
recipe.add("cab/casa_applycal", "apply_caltables_" + str(ff),
{
"vis": msname,
"field": fld,
"calwt": pcalwt,
"gaintable": ["%s:output" % ct for ct in pcal],
"gainfield": pgain,
"interp": pinter,
"parang": True,
},
input=pipeline.input, output=pipeline.caltables,
label="Apply_caltables_" + str(ff))
def plotgains(recipe, pipeline, plotdir, field_id, gtab, i, term):
step = "plotgains-%s-%d-%s" % (term, i, gtab)
opts = {
"table": gtab + ":msfile",
"corr": '',
"htmlname": gtab,
"field": field_id,
}
if term in ['Xf', 'Df0gen', 'Dffls']:
opts.update({
"xaxis": "channel",
})
elif term == 'Dref':
opts.update({
"xaxis": "antenna1",
})
recipe.add('cab/ragavi', step, opts,
input=pipeline.input, msdir=pipeline.caltables, output=plotdir,
label='{0:s}:: Plot gaincal phase'.format(step))
def worker(pipeline, recipe, config):
wname = pipeline.CURRENT_WORKER
flags_before_worker = '{0:s}_{1:s}_before'.format(pipeline.prefix, wname)
flags_after_worker = '{0:s}_{1:s}_after'.format(pipeline.prefix, wname)
label = config["label_cal"]
label_in = config["label_in"]
# define pol and unpol calibrators, P&B2017 + updated pol properties from NRAO web site (https://science.nrao.edu/facilities/vla/docs/manuals/obsguide/modes/pol, Table 7.2.7)
polarized_calibrators = {"3C138": {"standard": "manual",
"fluxdensity": [8.33843],
"spix": [-0.4981, -0.1552, -0.0102, 0.0223],
"reffreq": "1.47GHz",
"polindex": [0.078],
"polangle": [-0.16755],
"rotmeas": 0.0},
"3C286": {"standard": "manual",
"fluxdensity": [14.7172],
"spix": [-0.4507, -0.1798, 0.0357],
"reffreq": "1.47GHz",
"polindex": [0.098],
"polangle": [0.575959],
"rotmeas": 0.0},
"J1130-1449": {"standard": "manual",
"fluxdensity": [4.940],
"spix": 0,
"reffreq": "1.35GHz",
"polindex": [0.03],
"polangle": [-0.202893],
"rotmeas": 33},
}
polarized_calibrators["J1331+3030"] = polarized_calibrators["3C286"]
polarized_calibrators["J0521+1638"] = polarized_calibrators["3C138"]
unpolarized_calibrators = ["PKS1934-63", "J1939-6342", "J1938-6341", "PKS 1934-638", "PKS 1934-63", "PKS1934-638",
"PKS0408-65", "J0408-6545", "J0408-6544", "PKS 0408-65", "0407-658", "0408-658", "PKS 0408-658", "0408-65"]
# loop over all MSs for this label
for i, (msbase, prefix_msbase) in enumerate(zip(pipeline.msbasenames, pipeline.prefix_msbases)):
msname = pipeline.form_msname(msbase, label_in)
msinfo = pipeline.get_msinfo(msname)
prefix = f"{pipeline.prefix_msbases[i]}-{label}"
fields = []
if pipeline.refant[i] in ['auto']:
refant = manants.get_refant(pipeline, recipe,
prefix, msname, fields,
pipeline.minbase[i],
pipeline.maxdist[i], i)
if refant:
caracal.log.info(f"Auto selected ref antenna(s): refant")
else:
caracal.log.error("Cannot auto-select ref antenna(s). Set it manually.")
else:
refant = pipeline.refant[i]
# Check if feeds are linear
if set(list(msinfo['CORR']['CORR_TYPE'])) & {'XX', 'XY', 'YX', 'YY'} == 0:
raise RuntimeError(
"Cannot calibrate polarization! Allowed strategies are for linear feed data but correlation is: " + str(
[
'XX', 'XY', 'YX', 'YY']))
if config["pol_calib"] != 'none':
pol_calib = ",".join(getattr(pipeline, config["pol_calib"])[i])
if pol_calib == 'J1130-1449':
caracal.log.info("CARACal knows only bandwidth averaged properties of J1130-1449 based on https://archive-gw-1.kat.ac.za/public/meerkat/MeerKAT-L-band-Polarimetric-Calibration.pdf")
else:
pol_calib = 'none'
leakage_calib = ",".join(getattr(pipeline, config["leakage_calib"])[i])
# check if cross_callib needs to be applied
if config['otfcal']:
if pol_calib != 'none':
_, (caltablelist, gainfieldlist, interplist, calwtlist, applylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
config['otfcal']['callib'],
config['otfcal']['label_cal'], [pol_calib],
default_interpolation_types=config['otfcal']['interpolation'])
_, (leak_caltablelist, leak_gainfieldlist, leak_interplist, leak_calwtlist, leak_applylist) = \
callibs.resolve_calibration_library(pipeline, prefix_msbase,
config['otfcal']['callib'],
config['otfcal']['label_cal'], [leakage_calib],
default_interpolation_types=config['otfcal']['interpolation'])
else:
_, (caltablelist, gainfieldlist, interplist, calwtlist, applylist) = \
None, ([],) * 5
_, (leak_caltablelist, leak_gainfieldlist, leak_interplist, leak_calwtlist, leak_applylist) = \
None, ([],) * 5
# Set -90 deg receptor angle rotation [if we are using MeerKAT data]
if float(config['feed_angle_rotation']) != '':
with tb("%s::FEED" % os.path.join(pipeline.msdir, msname), readonly=False) as t:
ang = t.getcol("RECEPTOR_ANGLE")
ang[:, 0] = numpy.deg2rad(float(config['feed_angle_rotation']))
ang[:, 1] = numpy.deg2rad(float(config['feed_angle_rotation']))
t.putcol("RECEPTOR_ANGLE", ang)
caracal.log.info('RECEPTOR_ANGLE has been rotated by %s degrees' % config['feed_angle_rotation'])
# save flags before and after
if {"xcal", "gcal", "fcal", "target"}.intersection(config["applyto"]):
# Write/rewind flag versions
available_flagversions = manflags.get_flags(pipeline, msname)
if config['rewind_flags']['enable']:
if config['rewind_flags']['mode'] == 'reset_worker':
version = flags_before_worker
stop_if_missing = False
elif config['rewind_flags']['mode'] == 'rewind_to_version':
version = config['rewind_flags']['version']
if version == 'auto':
version = flags_before_worker
stop_if_missing = True
if version in available_flagversions:
if flags_before_worker in available_flagversions and available_flagversions.index(
flags_before_worker) < available_flagversions.index(version) and not config[
'overwrite_flagvers']:
manflags.conflict('rewind_too_little', pipeline, wname, msname, config, flags_before_worker,
flags_after_worker)
substep = 'version-{0:s}-ms{1:d}'.format(version, i)
manflags.restore_cflags(pipeline, recipe, version, msname, cab_name=substep)
if version != available_flagversions[-1]:
substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, i)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
msname, cab_name=substep)
if version != flags_before_worker:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
elif stop_if_missing:
manflags.conflict('rewind_to_non_existing', pipeline, wname, msname, config, flags_before_worker,
flags_after_worker)
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
else:
if flags_before_worker in available_flagversions and not config['overwrite_flagvers']:
manflags.conflict('would_overwrite_bw', pipeline, wname, msname, config, flags_before_worker,
flags_after_worker)
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
# preliminary flags
if config['extendflags'] and pol_calib != 'none':
recipe.add("cab/casa_flagdata",
"extend_flags_polcal",
{
"vis": msname,
"mode": 'extend',
"field": pol_calib,
"ntime": '60s',
"combinescans": True,
"growtime": 80.0,
"growfreq": 80.0,
"growaround": True,
"flagnearfreq": True,
"flagneartime": True,
"flagbackup": False,
},
input=pipeline.input, output=pipeline.output,
label="extend_flags_polcal")
if pol_calib != leakage_calib:
recipe.add("cab/casa_flagdata",
"extend_flags_polcal",
{
"vis": msname,
"mode": 'extend',
"field": leakage_calib,
"ntime": '60s',
"combinescans": True,
"growtime": 80.0,
"growfreq": 80.0,
"growaround": True,
"flagnearfreq": True,
"flagneartime": True,
"flagbackup": False,
},
input=pipeline.input, output=pipeline.output,
label="extend_flags_polcal")
# choose the strategy according to config parameters
if leakage_calib in unpolarized_calibrators:
if pol_calib in polarized_calibrators:
caracal.log.info(
"You decided to calibrate the polarized angle with a polarized calibrator assuming a model for the calibrator and the leakage with an unpolarized calibrator.")
xcal_model_fcal_leak(msname, msinfo, prefix_msbase, recipe, config, pipeline, i, prefix, refant, polarized_calibrators,
caltablelist, gainfieldlist, interplist, calwtlist, applylist,
leak_caltablelist, leak_gainfieldlist, leak_interplist, leak_calwtlist, leak_applylist)
elif pol_calib == 'none':
caracal.log.info(
"You decided to calibrate only the leakage with an unpolarized calibrator. This is experimental.")
calib_only_leakage(msname, msinfo, prefix_msbase, recipe, config, pipeline, i,
prefix, refant, leak_caltablelist, leak_gainfieldlist, leak_interplist, leak_calwtlist, leak_applylist)
else:
raise RuntimeError(f"Unable to determine pol_calib={config['pol_calib']}. Is your obsconf section configured properly?"
f"""Your setting of pol_calib={config['pol_calib']} selects {pol_calib}.
Supported calibrators are {', '.join(polarized_calibrators.keys())}.
Alternatively, you can calibrate both leakage and polarization using a (known or unknown) polarized source
observed at several parallactic angles. Configure this source as obsconf:xcal, and leakage_calib=pol_calib=xcal.""")
elif leakage_calib == pol_calib:
caracal.log.info(
"You decided to calibrate the polarized angle and leakage with a polarized calibrator.")
idx = utils.get_field_id(msinfo, leakage_calib)[0]
if config['set_model_pol']:
caracal.log.info("Using a known model for the polarized calibrator.")
xcal_model_xcal_leak(msname, msinfo, prefix_msbase, recipe, config, pipeline, i,
prefix, refant, polarized_calibrators, caltablelist, gainfieldlist, interplist,
calwtlist, applylist)
else:
if len(msinfo['SCAN'][str(idx)]) >= 3:
caracal.log.info("The model for the polarized calibrator will be derived from data.")
xcal_from_pa_xcal_leak(msname, msinfo, prefix_msbase, recipe, config, pipeline, i,
prefix, refant, caltablelist, gainfieldlist, interplist, calwtlist, applylist)
else:
raise RuntimeError(
"Cannot calibrate polarization! Insufficient number of scans for the pol calibrator.")
else:
raise RuntimeError(f"""Unable to determine a polarization calibration strategy. Supported strategies are:
1. Calibrate leakage using an unpolarized source ({', '.join(unpolarized_calibrators)}), and
polarization angle using a known polarized source ({', '.join(polarized_calibrators.keys())}).
This is usually achieved by setting leakage_cal=bpcal, pol_cal=xcal.
2. Calibrate both leakage and polarized angle with a (known or unknown) polarized source observed at
different parallactic angles. This is usually achieved by setting leakage_cal=xcal, pol_cal=xcal.
If the polarized source is unknown at least three scans are required.""")
if pipeline.enable_task(config, 'summary') and pol_calib != 'none':
step = 'summary-{0:s}-{1:d}'.format(label, i)
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"mode": 'summary',
"field": pol_calib,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Flagging summary ms={1:s}'.format(step, msname))
recipe.run()
recipe.jobs = []
| 67,720 | 47.47602 | 197 | py |
caracal | caracal-master/caracal/workers/flag_worker.py | # -*- coding: future_fstrings -*-
from caracal.workers.utils import manage_fields as manfields
from caracal.workers.utils import manage_flagsets as manflags
import os
from caracal.dispatch_crew import utils
import stimela.dismissable as sdm
import yaml
import re
import caracal
import sys
import glob
import fnmatch
import numpy as np
# import casacore.tables as tables
NAME = 'Flag'
LABEL = 'flag'
def worker(pipeline, recipe, config):
label = config['label_in']
wname = pipeline.CURRENT_WORKER
flags_before_worker = '{0:s}_{1:s}_before'.format(pipeline.prefix, wname)
flags_after_worker = '{0:s}_{1:s}_after'.format(pipeline.prefix, wname)
nobs = pipeline.nobs
msiter = 0
for i in range(nobs):
prefix_msbase = pipeline.prefix_msbases[i]
mslist = pipeline.get_mslist(i, label, target=(config['field'] == "target"))
target_ls = pipeline.target[i] if config['field'] == "target" else []
for j, msname in enumerate(mslist):
msdict = pipeline.get_msinfo(msname)
prefix = os.path.splitext(msname)[0]
if not os.path.exists(os.path.join(pipeline.msdir, msname)):
raise IOError("MS file {0:s} does not exist. Please check that is where it should be.".format(msname))
# Write/rewind flag versions
available_flagversions = manflags.get_flags(pipeline, msname)
if config['rewind_flags']['enable']:
if config['rewind_flags']['mode'] == 'reset_worker':
version = flags_before_worker
stop_if_missing = False
elif config['rewind_flags']['mode'] == 'rewind_to_version':
version = config['rewind_flags']['version']
if version == 'auto':
version = flags_before_worker
stop_if_missing = True
if version in available_flagversions:
if flags_before_worker in available_flagversions and available_flagversions.index(flags_before_worker) < available_flagversions.index(version) and not config['overwrite_flagvers']:
manflags.conflict('rewind_too_little', pipeline, wname, msname, config, flags_before_worker, flags_after_worker)
substep = 'version-{0:s}-ms{1:d}'.format(version, msiter)
manflags.restore_cflags(pipeline, recipe, version, msname, cab_name=substep)
if version != available_flagversions[-1]:
substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, msiter)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
msname, cab_name=substep)
if version != flags_before_worker:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, msiter)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
elif stop_if_missing:
manflags.conflict('rewind_to_non_existing', pipeline, wname, msname, config, flags_before_worker, flags_after_worker)
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, msiter)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
else:
if flags_before_worker in available_flagversions and not config['overwrite_flagvers']:
manflags.conflict('would_overwrite_bw', pipeline, wname, msname, config, flags_before_worker, flags_after_worker)
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, msiter)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
# Define fields and field_ids to be used to only flag the fields selected with
# flagging:field (either 'target' or 'calibrators') and with
# flagging:calfields (for further selection among the calibrators)
if config['field'] == 'target':
fields = [target_ls[j]]
else:
fields = []
fld_string = config['calfields']
if fld_string == "auto":
iter_fields = "gcal bpcal xcal fcal".split()
else:
iter_fields = fld_string.split(",")
for item in iter_fields:
if hasattr(pipeline, item):
tfld = getattr(pipeline, item)[i]
else:
raise ValueError("Field given is invalid. Options are 'xcal bpcal gcal fcal'.")
if tfld:
fields += tfld
fields = list(set(fields))
field_ids = utils.get_field_id(msdict, fields)
fields = ",".join(fields)
if pipeline.enable_task(config, 'unflag'):
step = '{0:s}-unflag-ms{1:d}'.format(wname, msiter)
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"mode": 'unflag',
"field": fields,
"flagbackup": False,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Unflag ms={1:s}'.format(step, msname))
# flag antennas automatically based on drifts in the scan average of the
# auto correlation spectra per field. This doesn't strictly require any calibration. It is also
# not field structure dependent, since it is just based on the DC of the field
# Compares scan to median power of scans per field per channel
# Also compares antenna to median of the array per scan per field per channel
# This should catch any antenna with severe temperature problems
if pipeline.enable_task(config, 'flag_autopowerspec'):
step = '{0:s}-autopowerspec-ms{1:d}'.format(wname, msiter)
recipe.add("cab/politsiyakat_autocorr_amp", step,
{
"msname": msname,
"field": ",".join([str(id) for id in field_ids]),
"cal_field": ",".join([str(id) for id in field_ids]),
"scan_to_scan_threshold": config["flag_autopowerspec"]["scan_thr"],
"antenna_to_group_threshold": config["flag_autopowerspec"]["ant_group_thr"],
"dpi": 300,
"plot_size": 6,
"nproc_threads": config['flag_autopowerspec']['threads'],
"data_column": config['flag_autopowerspec']['col']
},
input=pipeline.input, output=pipeline.output,
label="{0:s}:: Flag out antennas with drifts in autocorrelation powerspectra ms={1:s}".format(step, msname))
if pipeline.enable_task(config, 'flag_autocorr'):
step = '{0:s}-autocorr-ms{1:d}'.format(wname, msiter)
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"mode": 'manual',
"autocorr": True,
"field": fields,
"flagbackup": False,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Flag auto-correlations ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'flag_quack'):
step = '{0:s}-quack-ms{1:d}'.format(wname, msiter)
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"mode": 'quack',
"quackinterval": config['flag_quack']['interval'],
"quackmode": config['flag_quack']['mode'],
"field": fields,
"flagbackup": False,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Quack flagging ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'flag_elevation'):
step = '{0:s}-elevation-ms{1:d}'.format(wname, msiter)
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"mode": 'elevation',
"lowerlimit": config['flag_elevation']['low'],
"upperlimit": config['flag_elevation']['high'],
"field": fields,
"flagbackup": False,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Flag elevation ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'flag_shadow'):
if config['flag_shadow']['full_mk64']:
addantennafile = '{0:s}/mk64.txt'.format(pipeline.input)
subarray = msdict['ANT']['NAME']
idleants = open(addantennafile, 'r').readlines()
for aa in subarray:
for kk in range(len(idleants)):
if aa in idleants[kk]:
del (idleants[kk:kk + 3])
break
addantennafile = 'idleants.txt'
with open('{0:s}/{1:s}'.format(pipeline.input, addantennafile), 'w') as ia:
for aa in idleants:
ia.write(aa)
addantennafile += ':input'
else:
addantennafile = None
step = '{0:s}-shadow-ms{1:d}'.format(wname, msiter)
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"mode": 'shadow',
"tolerance": config['flag_shadow']['tol'],
"addantenna": addantennafile,
"flagbackup": False,
"field": fields,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Flag shadowed antennas ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'flag_spw'):
step = '{0:s}-spw-ms{1:d}'.format(wname, msiter)
flagspwselection = config['flag_spw']['chans']
firsts = [min(ff) for ff in msdict['SPW']['CHAN_FREQ']]
lasts = [max(ff) for ff in msdict['SPW']['CHAN_FREQ']]
nrs = msdict['SPW']['NUM_CHAN']
nspws = len(nrs)
found_valid_data = 0
if config['flag_spw']['ensure_valid']:
scalefactor, scalefactor_dict = 1, {
'GHz': 1e+9, 'MHz': 1e+6, 'kHz': 1e+3}
for ff in flagspwselection.split(','):
found_units = False
for dd in scalefactor_dict:
if dd.lower() in ff.lower():
ff, scalefactor = ff.lower().replace(
dd.lower(), ''), scalefactor_dict[dd]
found_units = True
if 'hz' in ff.lower():
ff = ff.lower().replace('hz', '')
found_units = True
ff = ff.split(':')
if len(ff) > 1:
spws = ff[0]
else:
spws = '*'
edges = [
ii * scalefactor for ii in map(float, ff[-1].split('~'))]
if '*' in spws:
spws = list(range(nspws))
elif '~' in spws:
spws = list(
range(int(spws.split('~')[0]), int(spws.split('~')[1]) + 1))
else:
spws = [int(spws), ]
edges = [edges for uu in range(len(spws))]
for ss in spws:
if found_units and ss < nspws and min(edges[ss][1], lasts[ss]) - max(edges[ss][0], firsts[ss]) > 0:
found_valid_data = 1
elif not found_units and ss < nspws and edges[ss][0] >= 0 and edges[ss][1] < nrs[ss]:
found_valid_data = 1
if not found_valid_data:
caracal.log.warn(
'The following channel selection has been made in the flag_spw module of the flagging worker: "{1:s}". This selection would result in no valid data in {0:s}. This would lead to the FATAL error "No valid SPW & Chan combination found" in CASA/FLAGDATA. To avoid this error the corresponding cab {2:s} will not be added to the Stimela recipe of the flagging worker.'.format(msname, flagspwselection, step))
if found_valid_data or not config['flag_spw']['ensure_valid']:
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"mode": 'manual',
"spw": flagspwselection,
"field": fields,
"flagbackup": False,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}::Flag out channels ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'flag_time'):
step = '{0:s}-time-ms{1:d}'.format(wname, msiter)
found_valid_data = 0
if config['flag_time']['ensure_valid']:
if pipeline.startdate[i]:
start_flagrange, end_flagrange = config['flag_time']['timerange'].split('~')
flag_start = float(''.join(re.split('/|:', start_flagrange)))
flag_end = float(''.join(re.split('/|:', end_flagrange)))
if (flag_start <= pipeline.enddate[i]) and (pipeline.startdate[i] <= flag_end):
found_valid_data = 1
else:
raise ValueError("You wanted to ensure a valid time range but we could not find a start and end time")
if not found_valid_data:
caracal.log.warn(
'The following time selection has been made in the flag_time module of the flagging worker: "{1:s}". This selection would result in no valid data in {0:s}. This would lead to the FATAL error " The selected table has zero rows" in CASA/FLAGDATA. To avoid this error the corresponding cab {2:s} will not be added to the Stimela recipe of the flagging worker.'.format(msname, config['flag_time']['timerange'], step))
if found_valid_data or not config['flag_time']['ensure_valid']:
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"mode": 'manual',
"timerange": config['flag_time']['timerange'],
"flagbackup": False,
"field": fields,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}::Flag out channels ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'flag_scan'):
step = '{0:s}-scan-ms{1:d}'.format(wname, msiter)
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"mode": 'manual',
"scan": config['flag_scan']['scans'],
"flagbackup": False,
"field": fields,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}::Flag out channels ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'flag_antennas'):
# step = '{0:s}-antennas-ms{1:d}'.format(wname, msiter)
antennas = [config['flag_antennas']['antennas']]
times = [config['flag_antennas']['timerange']]
found_valid_data = [0]
ensure = config['flag_antennas']['ensure_valid']
if times[0] == '':
ensure = False
if ensure:
if pipeline.startdate[i]:
antennas = config['flag_antennas']['antennas'].split(',')
times = config['flag_antennas']['timerange'].split(',')
while len(times) < len(antennas):
times.append(times[-1])
while len(found_valid_data) < len(antennas):
found_valid_data.append(0)
for nn, time_range in enumerate(times):
start_flagrange, end_flagrange = time_range.split('~')
flag_start = float(''.join(re.split('/|:', start_flagrange)))
flag_end = float(''.join(re.split('/|:', end_flagrange)))
if (flag_start <= pipeline.enddate[i]) and (pipeline.startdate[i] <= flag_end):
found_valid_data[nn] = 1
else:
raise ValueError("You wanted to ensure a valid time range but we could not find a start and end time")
for nn, antenna in enumerate(antennas):
antstep = 'ant-{0:s}-ms{1:d}-antsel{2:d}'.format(wname, i, nn)
if found_valid_data[nn] or not ensure:
recipe.add('cab/casa_flagdata', antstep,
{
"vis": msname,
"mode": 'manual',
"antenna": antenna,
"timerange": times[nn],
"field": fields,
"flagbackup": False,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Flagging bad antenna {2:s} ms={1:s}'.format(antstep, msname, antenna))
elif ensure and not found_valid_data[nn]:
caracal.log.warn(
'The following time selection has been made in the flag_antennas module of the flagging worker: "{1:s}". This selection would result in no valid data in {0:s}. This would lead to the FATAL error " The selected table has zero rows" in CASA/FLAGDATA. To avoid this error the corresponding cab {2:s} will not be added to the Stimela recipe of the flagging worker.'.format(msname, times[nn], antstep))
if pipeline.enable_task(config, 'flag_mask'):
step = '{0:s}-mask-ms{1:d}'.format(wname, msiter)
recipe.add('cab/rfimasker', step,
{
"msname": msname,
"mask": config['flag_mask']['mask'],
"accumulation_mode": 'or',
"uvrange": sdm.dismissable(config['flag_mask']['uvrange'] or None),
"memory": 4096,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Apply flag mask ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'flag_manual'):
rules = config['flag_manual']['rules']
for irule, rule in enumerate(rules):
# a manual flagging rule has a pattern to match the MS name, followed by key:value pairs
rule_elements = rule.split()
if len(rule_elements) < 2 or not all(':' in el for el in rule_elements[1:]):
raise ValueError(f"invalid flag_manual rule '{rule}'")
pattern = rule_elements[0]
keywords = {tuple(elem.split(":", 1)) for elem in rule_elements[1:]}
# end of parsing block. Replace this with file if you like
if not fnmatch.fnmatch(msname, pattern):
continue
caracal.log.info(f"adding manual flagging rule for {pattern}")
step = f'{wname}-manual-ms{msiter}-{irule}'
args = {
"vis": msname,
"mode": 'manual',
"flagbackup": False,
"field": fields,
}
args.update(keywords)
recipe.add('cab/casa_flagdata', step, args,
input=pipeline.input,
output=pipeline.output,
label=f'{step}::Flag ms={msname} using {rule}')
if pipeline.enable_task(config, 'flag_rfi'):
step = '{0:s}-rfi-ms{1:d}'.format(wname, msiter)
if config['flag_rfi']["flagger"] == "aoflagger":
if config['flag_rfi']['aoflagger']['ensure_valid']:
ms_corr = msdict['CORR']['CORR_TYPE']
flag_corr = []
with open('{0:s}/{1:s}'.format(pipeline.input, config['flag_rfi']['aoflagger']['strategy'])) as stdr:
for ss in stdr.readlines():
for pp in 'xx,xy,yx,yy,stokes-i,stokes-q,stokes-u,stokes-v'.split(','):
if '<on-{0:s}>1</on-{0:s}>'.format(pp) in ss:
flag_corr.append(pp)
if ('stokes-u' in flag_corr and (('XY' not in ms_corr and 'RL' not in ms_corr) or ('YX' not in ms_corr and 'LR' not in ms_corr))) or\
('stokes-v' in flag_corr and (('XY' not in ms_corr and 'RR' not in ms_corr) or ('YX' not in ms_corr and 'LL' not in ms_corr))) or\
('stokes-i' in flag_corr and (('XX' not in ms_corr and 'RR' not in ms_corr) or ('YY' not in ms_corr and 'LL' not in ms_corr))) or\
('stokes-q' in flag_corr and (('XX' not in ms_corr and 'RL' not in ms_corr) or ('YY' not in ms_corr and 'LR' not in ms_corr))) or\
('xy' in flag_corr and ('XY' not in ms_corr and 'RL' not in ms_corr)) or\
('yx' in flag_corr and ('YX' not in ms_corr and 'LR' not in ms_corr)) or\
('xx' in flag_corr and ('XX' not in ms_corr and 'RR' not in ms_corr)) or\
('yy' in flag_corr and ('YY' not in ms_corr and 'LL' not in ms_corr)):
raise ValueError("The selected flagging strategy {0:s}/{1:s} will attempt to flag on {2:} but this is"
" not compatible with the {3:} correlations available in {4:s}. To proceed you can edit the flagging"
" strategy or, if you know what you are doing, disable aoflagger: ensure_valid.".format(
pipeline.input, config['flag_rfi']['aoflagger']['strategy'], flag_corr, ms_corr, msname))
recipe.add('cab/autoflagger', step,
{
"msname": msname,
"column": config['flag_rfi']['col'],
"fields": ",".join(map(str, field_ids)),
"strategy": config['flag_rfi']['aoflagger']['strategy'],
"indirect-read": True if config['flag_rfi']['aoflagger']['readmode'] == 'indirect' else False,
"memory-read": True if config['flag_rfi']['aoflagger']['readmode'] == 'memory' else False,
"auto-read-mode": True if config['flag_rfi']['aoflagger']['readmode'] == 'auto' else False,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: AOFlagger auto-flagging flagging pass ms={1:s} fields={2:s}'.format(step, msname, fields))
elif config['flag_rfi']["flagger"] == "tricolour":
tricolour_strat = config['flag_rfi']['tricolour']['strategy']
if config['flag_rfi']['tricolour']['mode'] == 'auto':
bandwidth = msdict['SPW']['TOTAL_BANDWIDTH'][0] / 10.0**6
caracal.log.info("Total Bandwidth = {0:} MHz".format(bandwidth))
if bandwidth <= 20.0:
caracal.log.info("Narrowband data detected, selecting appropriate flagging strategy")
tricolour_strat = config['flag_rfi']['tricolour']['strat_narrow']
caracal.log.info("Flagging strategy in use: {0:}".format(tricolour_strat))
recipe.add('cab/tricolour', step,
{
"ms": msname,
"data-column": config['flag_rfi']['col'],
"window-backend": config['flag_rfi']['tricolour']['backend'],
"field-names": fields,
"flagging-strategy": 'polarisation',
"config": tricolour_strat,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Tricolour auto-flagging flagging pass ms={1:s} fields={2:s}'.format(step, msname, fields))
elif config['flag_rfi']["flagger"] == "tfcrop":
col = config['flag_rfi']['col'].split("_DATA")[0].lower()
recipe.add('cab/casa_flagdata', step,
{
"vis": msname,
"datacolumn": col,
"mode": "tfcrop",
"field": fields,
"usewindowstats": config["flag_rfi"]["tfcrop"]["usewindowstats"],
"combinescans": config["flag_rfi"]["tfcrop"]["combinescans"],
"flagdimension": config["flag_rfi"]["tfcrop"]["flagdimension"],
"flagbackup": False,
"timecutoff": config["flag_rfi"]["tfcrop"]["timecutoff"],
"freqcutoff": config["flag_rfi"]["tfcrop"]["freqcutoff"],
"correlation": config["flag_rfi"]["tfcrop"]["correlation"],
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Tfcrop auto-flagging flagging pass ms={1:s} fields={2:s}'.format(step, msname, fields))
else:
raise RuntimeError(
"Flagger, {0:s} is not available. Options are 'aoflagger, tricolour, tfcrop'.")
if pipeline.enable_task(config, 'inspect'):
step = '{0:s}-inspect-ms{1:d}'.format(wname, msiter)
if config['field'] == 'target':
field = '0'
else:
field = ",".join(map(str, utils.get_field_id(msdict, manfields.get_field(
pipeline, i, config['inspect']['field']).split(","))))
for f in field.split(','):
outlabel = '_{0:d}'.format(i) if len(field.split(',')) == 1 else '_{0:d}_{1:s}'.format(i, f)
recipe.add('cab/rfinder', step,
{
"msname": msname,
"field": int(f),
"plot_noise": "noise",
"RFInder_mode": "use_flags",
"outlabel": outlabel, # The output will be rfi_<pol>_<outlabel>
"polarization": config['inspect']['polarization'],
"spw_width": config['inspect']['spw_width'],
"time_step": config['inspect']['time_step'],
"time_enable": config['inspect']['time_enable'],
"spw_enable": config['inspect']['spw_enable'],
"1d_gif": config['inspect']['time_enable'],
"2d_gif": config['inspect']['time_enable'],
"altaz_gif": config['inspect']['spw_enable'],
"movies_in_report": config['inspect']['time_enable'] or config['spw_enable']
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Investigate presence of rfi in ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'summary'):
__label = config['label_in']
step = '{0:s}-summary-ms{1:d}'.format(wname, msiter)
recipe.add('cab/flagstats', step, {
"msname": msname,
"plot": True,
"outfile": ('{0:s}-{1:s}-'
'flagging-summary-{2:d}.json').format(
prefix, wname, i),
"htmlfile": ('{0:s}-{1:s}-'
'flagging-summary-plots-{2:d}.html').format(
prefix, wname, i)
},
input=pipeline.input,
output=pipeline.diagnostic_plots,
label='{0:s}:: Flagging summary ms={1:s}'.format(step, msname))
recipe.run()
# Empty job que after execution
recipe.jobs = []
substep = 'save-{0:s}-ms{1:d}'.format(flags_after_worker, msiter)
manflags.add_cflags(pipeline, recipe, flags_after_worker, msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
msiter += 1
| 32,681 | 60.317073 | 441 | py |
caracal | caracal-master/caracal/workers/transform_worker.py | # -*- coding: future_fstrings -*-
import os
import sys
import caracal
import stimela.dismissable as sdm
import stimela.recipe
import json
import caracal
from caracal.workers.utils import manage_flagsets as manflags
from caracal import log
from caracal.workers.utils import remove_output_products
from caracal.workers.utils.callibs import resolve_calibration_library
NAME = 'Transform Data by Splitting/Average/Applying calibration'
LABEL = 'transform'
# Rules for interpolation mode to use when applying calibration solutions
applycal_interp_rules = {
'target': {
'delay_cal': 'linear',
'bp_cal': 'linear',
'transfer_fluxscale': 'linear',
'gain_cal_gain': 'linear',
},
}
def get_dir_path(string, pipeline):
return string.split(pipeline.output)[1][1:]
table_suffix = {
"delay_cal": 'K0',
"bp_cal": 'B0',
"gain_cal_gain": 'G0',
"gain_cal_flux": 'G0',
"transfer_fluxscale": 'F0',
}
_target_fields = {'target'}
_cal_fields = set("fcal bpcal gcal xcal".split())
def get_fields_to_split(config, name):
fields = config['field']
if not fields:
raise caracal.ConfigurationError(f"'{name}: field' cannot be empty")
elif fields == 'calibrators':
return _cal_fields
elif fields == 'target':
return _target_fields
# else better be a combination of calibrator designators
else:
fields_to_split = set(fields.split(','))
diff = fields_to_split.difference(_cal_fields)
if diff:
raise caracal.ConfigurationError("'{}: field: expected 'target', "
"'calibrators', or one or more of {}. Got '{}'"
"".format(name, ', '.join([f"'{f}'" for f in _cal_fields]), ','.join(diff)))
return fields_to_split
def check_config(config, name):
get_fields_to_split(config, name)
def worker(pipeline, recipe, config):
wname = pipeline.CURRENT_WORKER
flags_before_worker = '{0:s}_{1:s}_before'.format(pipeline.prefix, wname)
flags_after_worker = '{0:s}_{1:s}_after'.format(pipeline.prefix, wname)
label_in = config['label_in']
label_out = config['label_out']
from_target = True if label_in and config['field'] == 'target' else False
field_to_split = get_fields_to_split(config, wname)
# are we splitting calibrators
splitting_cals = field_to_split.intersection(_cal_fields)
if (pipeline.enable_task(config, 'split_field') or
pipeline.enable_task(config, 'changecentre')) and pipeline.enable_task(config, 'concat'):
raise ValueError(
"split_field/changecentre and concat cannot be enabled in the same run of the transform worker. "
"The former need a single-valued label_in, the latter multiple comma-separated values.")
if ',' in label_in:
if pipeline.enable_task(config, 'split_field'):
raise ValueError("split_field cannot be enabled with multiple (i.e., comma-separated) entries in label_in")
if pipeline.enable_task(config, 'changecentre'):
raise ValueError("changecentre cannot be enabled with multiple (i.e., comma-separated) entries in label_in")
else:
transform_mode = 'concat' # in this mode all .MS files from the same input .MS and with the same target, and with label inside the list label_in, are concatenated
else:
if pipeline.enable_task(config, 'concat'):
raise ValueError("concat cannot be enabled with a single entry in label_in")
else:
transform_mode = 'split'
for i, (msbase, prefix_msbase) in enumerate(zip(pipeline.msbasenames, pipeline.prefix_msbases)):
# if splitting from target, we have multiple MSs to iterate over
if transform_mode == 'split':
from_mslist = pipeline.get_mslist(i, label_in, target=from_target)
elif transform_mode == 'concat':
from_mslist = pipeline.get_mslist(i, '', target=from_target)
to_mslist = pipeline.get_mslist(i, label_out, target=not splitting_cals)
# if splitting cals, we'll split one (combined) target to one output MS
if splitting_cals:
calfields = set()
for fd in field_to_split:
for elem in getattr(pipeline, fd)[i]:
calfields.add(elem)
output_fields = calfields
target_ls = [','.join(calfields)]
# else splitting target -- we'll split a list of targets to a list of output MSs
else:
target_ls = pipeline.target[i]
output_fields = [x.strip() for x in target_ls]
# repeat the from-ms once per target, if not splitting from the target MS
if not from_target:
from_mslist = from_mslist * len(target_ls)
dcol = config['split_field']['col']
# if these are set to not None below, this means OTF is enabled and a valid library is to be applied
polcal_lib = crosscal_lib = None
pcaltablelist = pgainfieldlist = pinterplist = pcalwtlist = papplyfield = []
if pipeline.enable_task(config['split_field'], 'otfcal'):
if dcol != 'corrected':
caracal.log.warning(
f"split_field: col set to '{dcol}' but OTF calibration is enabled. Forcing to 'corrected'")
dcol = 'corrected'
crosscal_lib, (caltablelist, gainfieldlist, interplist, calwtlist, applyfield) = \
resolve_calibration_library(pipeline, prefix_msbase,
config['split_field']['otfcal']['callib'],
config['split_field']['otfcal']['label_cal'],
output_fields=output_fields,
default_interpolation_types=config['split_field']['otfcal'][
'interpolation'])
if crosscal_lib:
caracal.log.info(f"applying OTF cross-cal from {os.path.basename(crosscal_lib)}")
else:
caracal.log.info(f"no cross-cal lib specified for OTF, ignoring")
# load/export if specified -- otherwise will be empty lists. Also converts to full filename.
polcal_lib, (pcaltablelist, pgainfieldlist, pinterplist, pcalwtlist, papplyfield) = \
resolve_calibration_library(pipeline, prefix_msbase,
config['split_field']['otfcal']['pol_callib'],
config['split_field']['otfcal']['label_pcal'],
output_fields=output_fields,
default_interpolation_types=config['split_field']['otfcal'][
'interpolation'])
if polcal_lib:
caracal.log.info(f"applying OTF polcal from {os.path.basename(polcal_lib)}")
else:
caracal.log.info(f"no polcal lib specified for OTF, ignoring")
for target_iter, (target, from_ms, to_ms) in enumerate(zip(target_ls, from_mslist, to_mslist)):
# Rewind flags
available_flagversions = manflags.get_flags(pipeline, from_ms)
if config['rewind_flags']['enable'] and label_in:
version = config['rewind_flags']['version']
if version in available_flagversions:
substep = 'rewind-{0:s}-ms{1:d}'.format(version, target_iter)
manflags.restore_cflags(pipeline, recipe, version, from_ms, cab_name=substep)
if available_flagversions[-1] != version:
substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, target_iter)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
from_ms, cab_name=substep)
else:
manflags.conflict('rewind_to_non_existing', pipeline, wname, from_ms,
config, flags_before_worker, flags_after_worker)
flagv = to_ms + '.flagversions'
tmp_ms = 'tmp_' + to_ms
tmpflagv = tmp_ms + '.flagversions'
if pipeline.enable_task(config, 'split_field'):
msbase = os.path.splitext(to_ms)[0]
else:
msbase = pipeline.msbasenames[i]
summary_file = f'{msbase}-summary.json'
obsinfo_file = f'{msbase}-obsinfo.txt'
if pipeline.enable_task(config, 'split_field'):
step = 'split_field-ms{0:d}-{1:d}'.format(i, target_iter)
# If the output of this run of mstransform exists, delete it first
remove_output_products((to_ms, tmp_ms, flagv, tmpflagv, summary_file, obsinfo_file),
directory=pipeline.msdir, log=log)
if not polcal_lib:
recipe.add('cab/casa_mstransform', step, {
"vis": from_ms if label_in else from_ms + ":input",
"outputvis": to_ms,
"timeaverage": config['split_field']['time_avg'] not in ('', '0s'),
"timebin": config['split_field']['time_avg'],
"chanaverage": config['split_field']['chan_avg'] > 1,
"chanbin": config['split_field']['chan_avg'],
"spw": config['split_field']['spw'],
"antenna": config['split_field']['antennas'],
"datacolumn": dcol,
"correlation": config['split_field']['correlation'],
"scan": config['split_field']['scan'],
"usewtspectrum": config['split_field']['create_specweights'],
"field": target,
"keepflags": True,
"docallib": bool(crosscal_lib),
"callib": sdm.dismissable(crosscal_lib and crosscal_lib + ':output'),
"nthreads": config['split_field']['nthreads'],
},
input=pipeline.input if label_in else pipeline.rawdatadir,
output=pipeline.output,
label=f'{step}:: Split and average data ms={"".join(from_ms)}')
# workaround because mstransform does not accept the polcal gaintypes such as Xfparang
else:
output_pcal_ms = config['split_field']['otfcal']['output_pcal_ms']
# in intermediate-output mode, do transform directly to the output MS
if output_pcal_ms == 'intermediate':
tmp_ms = to_ms
tmpflagv = flagv
log.warning(
"otfcal: output_pcal_ms is 'intermediate', output will be an intermediate MS only with DATA and CORRECTED_DATA columns. This is experimenatal.")
recipe.add('cab/casa_mstransform', step + '_tmp_split_crosscal_corrected', {
"vis": from_ms if label_in else from_ms + ":input",
"outputvis": tmp_ms,
"timeaverage": config['split_field']['time_avg'] not in ('', '0s'),
"timebin": config['split_field']['time_avg'],
"chanaverage": config['split_field']['chan_avg'] > 1,
"chanbin": config['split_field']['chan_avg'],
"spw": config['split_field']['spw'],
"datacolumn": sdm.dismissable('corrected' if crosscal_lib is not None else 'data'),
"correlation": config['split_field']['correlation'],
"scan": config['split_field']['scan'],
"antenna": config['split_field']['antennas'],
"usewtspectrum": config['split_field']['create_specweights'],
"field": target,
"keepflags": True,
"docallib": bool(crosscal_lib),
"callib": sdm.dismissable(crosscal_lib and crosscal_lib + ':output'),
"nthreads": config['split_field']['nthreads'],
},
input=pipeline.input if label_in else pipeline.rawdatadir,
output=pipeline.output,
label=f'{step}:: Split and average data ms={"".join(from_ms)}')
if any(papplyfield):
recipe.add('cab/casa_applycal', step + '_apply_polcal', {
"vis": tmp_ms,
"field": target,
"docallib": False,
"calwt": pcalwtlist,
"gaintable": [f"{ct}:output" for ct in pcaltablelist],
"gainfield": pgainfieldlist,
"interp": pinterplist,
"parang": config['split_field']['otfcal']['derotate_pa'],
},
input=pipeline.input,
output=pipeline.caltables,
label=f'{step}:: Apply pol callib ms={"".join(to_ms)}')
else:
trgt = [x.strip() for x in target.split(',')]
for ii, fld in enumerate(trgt):
pcal = []
pgain = []
pinter = []
pcalwt = []
for idx, f in enumerate(papplyfield):
if f == '' or f == fld:
if pcaltablelist[idx] not in pcal:
pcal.append(pcaltablelist[idx])
pgain.append(pgainfieldlist[idx])
pinter.append(pinterplist[idx])
pcalwt.append(pcalwtlist[idx])
recipe.add('cab/casa_applycal', step + '_apply_polcal_' + str(ii), {
"vis": tmp_ms,
"field": fld,
"docallib": False,
"calwt": pcalwt,
"gaintable": ["%s:output" % ct for ct in pcal],
"gainfield": pgain,
"interp": pinter,
"parang": config['split_field']['otfcal']['derotate_pa'],
},
input=pipeline.input,
output=pipeline.caltables,
label=f'{step}:: Apply pol callib ms={"".join(to_ms)}, field={ii}')
recipe.run()
recipe.jobs = []
# generate final MS, unless we're only asked to produce the intermediate one
if tmp_ms != to_ms:
recipe.add('cab/casa_mstransform', step + '_split_polcal_corrected', {
"vis": tmp_ms,
"outputvis": to_ms,
"datacolumn": 'corrected',
"timeaverage": False,
"chanaverage": False,
"spw": '',
"correlation": '',
"usewtspectrum": config['split_field']['create_specweights'],
"field": '',
"keepflags": True,
"docallib": False,
},
input=pipeline.input if label_in else pipeline.rawdatadir,
output=pipeline.output,
label=f'{step}:: Split polcal corrected ms={"".join(to_ms)}')
recipe.run()
recipe.jobs = []
# Delete intermediate ms
if output_pcal_ms == 'final':
remove_output_products((tmp_ms, tmpflagv), directory=pipeline.msdir, log=log)
substep = 'save-{0:s}-ms{1:d}'.format('caracal_legacy', target_iter)
manflags.add_cflags(pipeline, recipe, 'caracal_legacy', to_ms,
cab_name=substep, overwrite=False)
obsinfo_msname = to_ms if pipeline.enable_task(config, 'split_field') else from_ms
if pipeline.enable_task(config, 'changecentre'):
if config['changecentre']['ra'] == '' or config['changecentre']['dec'] == '':
caracal.log.error(
'Wrong format for RA and/or Dec you want to change to. '
'Check your settings of split_target:changecentre:ra and split_target:changecentre:dec')
caracal.log.error('Current settings for ra,dec are {0:s},{1:s}'.format(
config['changecentre']['ra'], config['changecentre']['dec']))
sys.exit(1)
step = 'changecentre-ms{0:d}-{1:d}'.format(i, target_iter)
recipe.add('cab/casa_fixvis', step,
{
"msname": to_ms,
"outputvis": to_ms,
"phasecenter": 'J2000 {0:s} {1:s}'.format(config['changecentre']['ra'],
config['changecentre']['dec']),
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Change phase centre ms={1:s}'.format(step, to_ms))
if pipeline.enable_task(config, 'concat'):
concat_labels = label_in.split(',')
step = 'concat-ms{0:d}-{1:d}'.format(i, target_iter)
concat_ms = [from_ms.replace('.ms', '-{0:s}.ms'.format(cl)) for cl in concat_labels]
recipe.add('cab/casa_concat', step,
{
"vis": concat_ms,
"concatvis": 'tobedeleted-' + to_ms,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Concatenate {1:}'.format(step, concat_ms))
# If the output of this run of mstransform exists, delete it first
if os.path.exists('{0:s}/{1:s}'.format(pipeline.msdir, to_ms)) or \
os.path.exists('{0:s}/{1:s}'.format(pipeline.msdir, flagv)):
os.system(
'rm -rf {0:s}/{1:s} {0:s}/{2:s}'.format(pipeline.msdir, to_ms, flagv))
step = 'singlespw-ms{0:d}-{1:d}'.format(i, target_iter)
recipe.add('cab/casa_mstransform', step,
{
"vis": 'tobedeleted-' + to_ms,
"outputvis": to_ms,
"datacolumn": config['concat']['col'],
"combinespws": True,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Single SPW {1:}'.format(step, concat_ms))
substep = 'save-{0:s}-ms{1:d}'.format('caracal_legacy', target_iter)
manflags.add_cflags(pipeline, recipe, 'caracal_legacy', to_ms,
cab_name=substep, overwrite=False)
# Delete the tobedeleted file, but first we need to have created it, thus...
recipe.run()
# Empty job que after execution
recipe.jobs = []
os.system(
'rm -rf {0:s}/tobedeleted-{1:s}'.format(pipeline.msdir, to_ms))
obsinfo_msname = to_ms
if pipeline.enable_task(config, 'obsinfo'):
if config['obsinfo']['listobs']:
if pipeline.enable_task(config, 'split_field') or transform_mode == 'concat':
listfile = '{0:s}-obsinfo.txt'.format(os.path.splitext(to_ms)[0])
else:
listfile = '{0:s}-obsinfo.txt'.format(pipeline.msbasenames[i])
step = 'listobs-ms{0:d}-{1:d}'.format(i, target_iter)
recipe.add('cab/casa_listobs', step,
{
"vis": obsinfo_msname,
"listfile": listfile + ":msfile",
"overwrite": True,
},
input=pipeline.input,
output=pipeline.obsinfo,
label='{0:s}:: Get observation information ms={1:s}'.format(step, obsinfo_msname))
if config['obsinfo']['summary_json']:
if pipeline.enable_task(config, 'split_field') or transform_mode == 'concat':
listfile = '{0:s}-summary.json'.format(os.path.splitext(to_ms)[0])
else:
listfile = '{0:s}-summary.json'.format(pipeline.msbasenames[i])
step = 'summary_json-ms{0:d}-{1:d}'.format(i, target_iter)
recipe.add('cab/msutils', step,
{
"msname": obsinfo_msname,
"command": 'summary',
"display": False,
"outfile": listfile + ":msfile"
},
input=pipeline.input,
output=pipeline.obsinfo,
label='{0:s}:: Get observation information as a json file ms={1:s}'.format(step,
obsinfo_msname))
| 22,609 | 52.705463 | 175 | py |
caracal | caracal-master/caracal/workers/worker_administrator.py | # -*- coding: future_fstrings -*-
from caracal.dispatch_crew import utils
from collections import OrderedDict
import caracal
from caracal import log, pckgdir, notebooks
import sys
import os
from datetime import datetime
import stimela
import glob
import shutil
import traceback
import itertools
import ruamel.yaml
assert ruamel.yaml.version_info >= (0, 12, 14)
REPORTS = True
class WorkerAdministrator(object):
def __init__(self, config, workers_directory,
prefix=None, configFileName=None,
add_all_first=False, singularity_image_dir=None,
start_worker=None, end_worker=None,
container_tech='docker', generate_reports=True):
self.config = config
self.config_file = configFileName
self.singularity_image_dir = singularity_image_dir
self.container_tech = container_tech
for key in "msdir input output".split():
if not self.config['general'].get(key):
raise caracal.ConfigurationError(f"'general: {key}' must be specified")
self.msdir = self.config['general']['msdir']
self.input = self.config['general']['input']
self.output = self.config['general']['output']
self.obsinfo = f'{self.output}/obsinfo'
self.reports = f'{self.output}/reports'
self.diagnostic_plots = f'{self.output}/diagnostic_plots'
self.configFolder = f'{self.output}/cfgFiles'
self.caltables = f'{self.output}/caltables'
self.masking = f'{self.output}/masking'
self.continuum = f'{self.output}/continuum'
self.crosscal_continuum = f'{self.output}/continuum/crosscal'
self.cubes = f'{self.output}/cubes'
self.mosaics = f'{self.output}/mosaics'
self.generate_reports = generate_reports
self.timeNow = '{:%Y%m%d-%H%M%S}'.format(datetime.now())
self.ms_extension = self.config["getdata"]["extension"]
self.ignore_missing = self.config["getdata"]["ignore_missing"]
self._msinfo_cache = {}
self.logs_symlink = f'{self.output}/logs'
self.logs = "{}-{}".format(self.logs_symlink, self.timeNow)
self.rawdatadir = self.config['general']['rawdatadir']
if not self.rawdatadir:
self.rawdatadir = self.config['general']['rawdatadir'] = self.msdir
self.virtconcat = False
self.workers_directory = workers_directory
# Add workers to packages
if workers_directory:
sys.path.append(self.workers_directory)
self.workers = []
last_mandatory = 2 # index of last mendatory worker
# general, getdata and obsconf are all mendatory.
# That's why the lowest starting index is 2 (third element)
start_idx = last_mandatory
end_idx = len(self.config.keys())
workers = []
if start_worker and start_worker not in self.config.keys():
raise RuntimeError("Requested --start-worker '{0:s}' is unknown. Please check your options".format(start_worker))
if end_worker and end_worker not in self.config.keys():
raise RuntimeError("Requested --end-worker '{0:s}' is unknown. Please check your options".format(end_worker))
for i, (name, opts) in enumerate(self.config.items()):
if name.find('general') >= 0 or name == "schema_version":
continue
if name.find('__') >= 0:
worker = name.split('__')[0] + '_worker'
else:
worker = name + '_worker'
if name == start_worker and name == end_worker:
start_idx = len(workers)
end_idx = len(workers)
elif name == start_worker:
start_idx = len(workers)
elif name == end_worker:
end_idx = len(workers)
workers.append((name, worker, i))
if end_worker in list(self.config.keys())[:last_mandatory + 1]:
# no need for +1 this time since 'general' was removed from
# this list
self.workers = workers[:last_mandatory]
else:
start_idx = max(start_idx, last_mandatory)
end_idx = max(end_idx, last_mandatory)
self.workers = workers[:last_mandatory] + workers[start_idx:end_idx + 1]
self.prefix = prefix or self.config['general']['prefix']
# Get possible flagsets for reduction
self.flags = {"legacy": ["legacy"]}
for _name, _worker, i in self.workers:
try:
wkr = __import__(_worker)
except ImportError:
traceback.print_exc()
raise ImportError('Worker "{0:s}" could not be found at {1:s}'.format(
_worker, self.workers_directory))
if hasattr(wkr, "FLAG_NAMES"):
self.flags[_name] = ["_".join(
[_name, suffix]) if suffix else _name for suffix in wkr.FLAG_NAMES]
self.recipes = {}
# Workers to skip
self.skip = []
# Initialize empty lists for ddids, leave this up to getdata worker to define
self.dataid = []
# names of all MSs
self.msnames = []
# basenames of all MSs (sans extension)
self.msbasenames = []
# filename prefixes for outputs (formed up as prefix-msbase)
self.prefix_msbases = []
# OMS skipping this here, leave it to the getdata
# self.init_names([], allow_empty=True)
self.init_pipeline(prep_input=config["general"]["prep_workspace"])
# save configuration files
config_base = os.path.splitext(os.path.basename(configFileName))[0]
outConfigOrigName = f'{self.configFolder}/{config_base}-{self.timeNow}.orig.yml'
outConfigName = f'{self.configFolder}/{config_base}-{self.timeNow}.yml'
log.info(f"Saving original configuration file as {outConfigOrigName}")
shutil.copyfile(configFileName, outConfigOrigName) # original config
log.info(f"Saving full configuration as {outConfigName}")
with open(outConfigName, 'w') as outfile: # config+command line
ruamel.yaml.dump(self.config, outfile, Dumper=ruamel.yaml.RoundTripDumper)
def init_names(self, dataids):
""" iniitalize names to be used throughout the pipeline and associated
general fields that must be propagated
"""
# OMS: this was a very elaborate no-op, away with it
# for item in 'rawdatadir input msdir output'.split():
# value = getattr(self, item, None)
# if value:
# setattr(self, item, value)
self.dataid = list(filter(bool, dataids))
if not self.dataid:
raise caracal.ConfigurationError(f"Empty 'getdata: dataid' entry")
patterns = [f"{dataid}.{self.ms_extension}" for dataid in self.dataid]
for pattern in patterns:
msnames = [os.path.basename(ms) for ms in glob.glob(os.path.join(self.rawdatadir, pattern))]
if not msnames:
if self.ignore_missing:
log.warning(f"'{pattern}' did not match any files, but getdata: ignore_missing is set, proceeding anyway")
else:
raise caracal.ConfigurationError(f"'{pattern}' did not match any files under {self.rawdatadir}. Check your "
"'general: msdir/rawdatadir' and/or 'getdata: dataid/extension' settings, or "
"set 'getdata: ignore_missing: true'")
msbases = [os.path.splitext(ms)[0] for ms in msnames]
self.msnames += msnames
self.msbasenames += msbases
self.prefix_msbases += [f"{self.prefix}-{x}" for x in msbases]
self.nobs = len(self.msnames)
if not self.nobs:
raise caracal.ConfigurationError(f"No matching input data found in {self.rawdatadir} for {','.join(patterns)}. Check your "
" 'general: msdir/rawdatadir' and/or 'getdata: dataid/extension' settings.")
for item in 'refant fcal bpcal gcal target xcal'.split():
value = getattr(self, item, None)
if value and len(value) == 1:
value = value * self.nobs
setattr(self, item, value)
def get_msinfo(self, msname):
"""Returns info dict corresponding to an MS. Caches and reloads as needed"""
msinfo_file = os.path.splitext(msname)[0] + "-summary.json"
msinfo_path = os.path.join(self.msdir, msinfo_file)
msdict, mtime_cache = self._msinfo_cache.get(msname, (None, 0))
if not os.path.exists(msinfo_path):
raise RuntimeError(f"MS summary file {msinfo_file} not found at expected location. This is a bug or "
"a misconfiguration. Was the MS transformed properly?")
# reload cached dict if file on disk is newer
mtime = os.path.getmtime(msinfo_path)
if msdict is None or mtime > mtime_cache:
with open(msinfo_path, 'r') as f:
msdict = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
self._msinfo_cache[msname] = msdict, mtime
return msdict
# The following three methods provide MS naming services for workers
def form_msname(self, msbase, label=None, field=None):
"""
Given a base MS name, an optional label, and an optional field name, return the full MS name
"""
label = '' if not label else '-' + label
field = '' if not field else '-' + utils.filter_name(field)
return f'{msbase}{field}{label}.{self.ms_extension}'
def get_mslist(self, iobs, label="", target=False):
"""
Given an MS number (0...nobs-1), and an optional label, returns list of corresponding MSs.
If target is True, this will be one MS per each (split-out) target.
If target is False, the list will contain just the single MS.
Applies label in both cases.
"""
msbase = self.msbasenames[iobs]
if target:
return [self.form_msname(msbase, label, targ) for targ in self.target[iobs]]
else:
return [self.form_msname(msbase, label)]
def get_target_mss(self, label=None):
"""
Given an MS label, returns a tuple of unique_targets, all_mss, mss_per_target
Where all_mss is a list of all MSs to be processed for all targets, and mss_per_target maps target field
to associated list of MSs
"""
target_msfiles = OrderedDict()
# self.target is a list of lists of targets, per each MS
for msbase, targets in zip(self.msbasenames, self.target):
for targ in targets:
target_msfiles.setdefault(targ, []).append(self.form_msname(msbase, label, targ))
# collect into flat list of MSs
target_ms_ls = list(itertools.chain(*target_msfiles.values()))
return list(target_msfiles.keys()), target_ms_ls, target_msfiles
def get_callib_name(self, name, ext="yml", extra_label=None):
"""Makes a callib name with the given extension. Replaces extension if needed. Adds callib- if needed."""
name, _ = os.path.splitext(name)
if not name.startswith("callib-"):
name = f"callib-{name}"
if extra_label:
name = f"{name}-{extra_label}"
return os.path.join(self.caltables, f"{name}.{ext}")
def load_callib(self, name):
"""Loads calibration library specified by name"""
filename = self.get_callib_name(name)
if not os.path.exists(filename):
raise IOError(f"Calibration library {filename} doesn't exist")
with open(filename, 'r') as f:
return ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
def save_callib(self, callib, name):
"""Dumps caldict to calibration library specified by name"""
with open(self.get_callib_name(name), 'w') as f:
ruamel.yaml.dump(callib, f, ruamel.yaml.RoundTripDumper)
def parse_cabspec_dict(self, cabspec_seq):
"""Turns sequence of cabspecs into a Stimela cabspec dict"""
cabspecs = OrderedDict()
speclists = OrderedDict()
# collect all specs encountered, sort them by cab
for spec in cabspec_seq:
name, version, tag = spec["name"], spec.get("version") or None, spec.get("tag") or None
if not version and not tag:
log.warning(f"Neither version nor tag specified for cabspec {name}, ignoring")
continue
speclists.setdefault(name, []).append((version, tag))
# now process each cab's list of specs.
for name, speclist in speclists.items():
if len(speclist) == 1:
version, tag = speclist[0]
if version is None:
log.info(f" {name}: forcing tag {tag} for all invocations")
cabspecs[name] = dict(tag=tag, force=True)
continue
elif tag is None:
log.info(f" {name}: forcing version {version} for all invocations")
cabspecs[name] = dict(version=version)
continue
# else make dict of version: tag pairs
cabspecs[name] = dict(version={version: tag for version, tag in speclist}, force=True)
for version, tag in speclist:
log.info(f" {name}: using tag {tag} for version {version}")
return cabspecs
def init_pipeline(self, prep_input=True):
def make_symlink(link, target):
if os.path.lexists(link):
if os.path.islink(link):
os.unlink(link) # old symlink can go
else:
log.warning("{} already exists and is not a symlink, can't relink".format(link))
return False
if not os.path.lexists(link):
os.symlink(target, link)
log.info("{} links to {}".format(link, target))
# First create input folders if they don't exist
if not os.path.exists(self.input):
os.mkdir(self.input)
if not os.path.exists(self.output):
os.mkdir(self.output)
if not os.path.exists(self.rawdatadir):
raise caracal.ConfigurationError(f"{self.rawdatadir} does not exist, check your general config section")
if not os.path.exists(self.obsinfo):
os.mkdir(self.obsinfo)
if not os.path.exists(self.logs):
os.mkdir(self.logs)
log.info("output directory for logs is {}".format(self.logs))
make_symlink(self.logs_symlink, os.path.basename(self.logs))
if not os.path.exists(self.reports):
os.mkdir(self.reports)
if not os.path.exists(self.diagnostic_plots):
os.mkdir(self.diagnostic_plots)
if not os.path.exists(self.configFolder):
os.mkdir(self.configFolder)
if not os.path.exists(self.caltables):
os.mkdir(self.caltables)
if not os.path.exists(self.masking):
os.mkdir(self.masking)
if not os.path.exists(self.continuum):
os.mkdir(self.continuum)
if not os.path.exists(self.cubes):
os.mkdir(self.cubes)
# create proper logfile and start flushing
# NB (Oleg): placing this into output rather than output/logs to make the reporting notebooks easier
CARACAL_LOG_BASENAME = 'log-caracal.txt'
caracal.CARACAL_LOG = os.path.join(self.logs, CARACAL_LOG_BASENAME)
caracal.log_filehandler.setFilename(caracal.CARACAL_LOG, delay=False)
# placing a symlink into logs to appease Josh
make_symlink(os.path.join(self.output, CARACAL_LOG_BASENAME),
os.path.join(os.path.basename(self.logs), CARACAL_LOG_BASENAME))
# Copy input data files into pipeline input folder
if prep_input:
log.info("Copying MeerKAT input files into input folder")
datadir = "{0:s}/data/meerkat_files".format(pckgdir)
for filename in os.listdir(datadir):
src = os.path.join(datadir, filename)
dest = os.path.join(self.input, filename)
if not os.path.exists(dest):
if os.path.isdir(src):
shutil.copytree(src, dest)
else:
shutil.copy2(src, dest, follow_symlinks=False)
# Copy standard notebooks
self._init_notebooks = self.config['general']['init_notebooks']
self._report_notebooks = self.config['general']['report_notebooks']
all_nbs = set(self._init_notebooks) | set(self._report_notebooks)
if all_nbs:
notebooks.setup_default_notebooks(all_nbs, output_dir=self.output, prefix=self.prefix, config=self.config)
def enable_task(self, config, task):
return task in config and config[task].get("enable")
def run_workers(self):
""" Runs the workers """
report_updated = False
for _name, _worker, i in self.workers:
try:
worker = __import__(_worker)
except ImportError:
traceback.print_exc()
raise ImportError('Worker "{0:s}" could not be found at {1:s}'.format(
_worker, self.workers_directory))
if self.config["general"]["cabs"]:
log.info("Configuring cab specification overrides")
cabspecs_general = self.parse_cabspec_dict(self.config["general"]["cabs"])
else:
cabspecs_general = {}
active_workers = []
# first, check that workers import, and check their configs
for _name, _worker, i in self.workers:
config = self.config[_name]
if 'enable' in config and not config['enable']:
self.skip.append(_worker)
continue
log.info("Configuring worker {}".format(_name))
try:
worker = __import__(_worker)
except ImportError:
log.error('Error importing worker "{0:s}" from {1:s}'.format(_worker, self.workers_directory))
raise
if hasattr(worker, 'check_config'):
worker.check_config(config, name=_name)
# check for cab specs
cabspecs = cabspecs_general
if config["cabs"]:
cabspecs = cabspecs.copy()
cabspecs.update(self.parse_cabspec_dict(config["cabs"]))
active_workers.append((_name, worker, config, cabspecs))
# now run the actual pipeline
# for _name, _worker, i in self.workers:
for _name, worker, config, cabspecs in active_workers:
# Define stimela recipe instance for worker
# Also change logger name to avoid duplication of logging info
label = getattr(worker, 'LABEL', None)
if label is None:
# if label is not set, take filename, and split off _worker.py
label = os.path.basename(worker.__file__).rsplit("_", 1)[0]
# if worker name has a __suffix, add that to label
if "__" in _name:
label += "__" + _name.split("__", 1)[1]
recipe = stimela.Recipe(label,
ms_dir=self.msdir,
singularity_image_dir=self.singularity_image_dir,
log_dir=self.logs,
cabspecs=cabspecs,
logfile=False, # no logfiles for recipes
logfile_task=f'{self.logs}/log-{label}-{{task}}-{self.timeNow}.txt')
recipe.JOB_TYPE = self.container_tech
self.CURRENT_WORKER = _name
# Don't allow pipeline-wide resume
# functionality
os.system('rm -f {}'.format(recipe.resume_file))
# Get recipe steps
# 1st get correct section of config file
log_label = "" if _name == label or _name.startswith(label + "__") else f" ({label})"
log.info(f"{_name}{log_label}: initializing", extra=dict(color="GREEN"))
worker.worker(self, recipe, config)
log.info(f"{_name}{log_label}: running")
recipe.run()
log.info(f"{_name}{log_label}: finished")
# this should be in the cab cleanup code, no?
casa_last = glob.glob(self.output + '/*.last')
for file_ in casa_last:
os.remove(file_)
# update report at end of worker if so configured
if self.generate_reports and config["report"]:
self.regenerate_reports()
report_updated = True
else:
report_updated = False
# generate final report
if self.config["general"]["final_report"] and self.generate_reports and not report_updated:
self.regenerate_reports()
log.info("pipeline run complete")
def regenerate_reports(self):
notebooks.generate_report_notebooks(self._report_notebooks, self.output, self.prefix, self.container_tech)
| 21,367 | 45.251082 | 135 | py |
caracal | caracal-master/caracal/workers/inspect_worker.py | # -*- coding: future_fstrings -*-
import os
import sys
from collections import Mapping, Sequence, OrderedDict, namedtuple
import itertools
import yaml
from stimela.dismissable import dismissable as sdm
from caracal import log, ConfigurationError
import caracal.dispatch_crew.utils as utils
import numpy as np
import json
def check_config(config, name):
shadems_cfg = config["shadems"]
# dummy-process each plots sequence to catch any config errors
basesubst = dict(msbase="", all_fields="", all_corrs="", bpcal="", gcal="", fcal="", xcal="")
for plot_cat in "plots", "plot_by_field", "plots_by_corr":
_process_shadems_plot_list([], basesubst, shadems_cfg.get(plot_cat, []), {}, plot_cat)
# Miscellaneous functions
def l2d(ins):
"""
Convert some list of command line arguments and values to a dictionary
from a list
Parameters
----------
ins: :obj:`list`
List containing the command line arguemnts
"""
if not isinstance(ins, list):
ins = ins.split()
keys = [(i, _) for i, _ in enumerate(ins)
if _.startswith("-") and not _.lstrip("-").isdigit()]
test = {}
for i, (kidx, key) in enumerate(keys, start=1):
if i < len(keys):
test[key] = " ".join(ins[slice(kidx + 1, keys[i][0])])
else:
test[key] = " ".join(ins[slice(kidx + 1, None)])
return test
def ms_exists(msdir, ms):
return os.path.exists(os.path.join(msdir, ms))
# Input parameter related functions
def check_params(params):
"""
Remove items from params dictionary containing value None, "", " " and
convert python lists to comma separated string lists
Parameters
----------
params: :obj:`dict`
Dictionary from which to remove params with value as None
Returns
params: :obj:`dict`
'Cleaned' dictionary
"""
params = {k: v for k, v in params.items() if v not in (None, "", " ")}
# if any values are python lists, convert them to comma separated list
params = {k: (",".join(v) if isinstance(v, list) else v)
for k, v in params.items()}
return params
def create_param_group(subgrp_name, subgrp_items, large_grp):
"""
Divide a large group of parameters into smaller subgroups
Parameters
----------
subgrp_name: :obj:`str`
Name of a subgroup
subgrp_items: :obj:`list`
List containing names of items to be placed in the subgroup. These
items should be keys in the large_grp dictionary
large_grp: :obj:`dict`
Dictionary containing the larger group to be subdivided.
"""
# get data from larger group
group = {}
for _ in subgrp_items:
group[_] = large_grp[_]
group = make_namespace(subgrp_name, group)
return group
def group_configs(configs):
"""
Group inputs from worker's configuration file for easier access
Parameters
----------
configs: :obj:`OrderedDict`
Dictionary containing inputs from configuration file
Returns
-------
Tuple containing named tuples containing grouped data:
general: contains the general pipeline configuration settings
plot_type: contains the types of plots available and their settings
plot_params: contains the plotting tools' parameters
"""
general = ["enable", "label_in", "label_plot", "dirname",
"standard_plotter"]
general = create_param_group("general", general, configs)
plot_type = ["amp_ant", "amp_chan", "amp_phase", "amp_scan", "amp_uvwave",
"phase_chan", "phase_uvwave", "real_imag"]
plot_type = create_param_group("plot_type", plot_type, configs)
plot_params = ["field", "correlation", "mem_limit", "num_cores",
"uvrange"]
plot_params = create_param_group("plot_params", plot_params, configs)
return (general, plot_type, plot_params)
def make_namespace(name, items):
"""
Create a named tuples
Parameters
----------
name: :obj:`str`
Name of the namespace object
items: :obj:`dict`
Dictionary containing desired variable name as key and its
corresponding value
Returns
-------
tuple with the namespace provided
"""
Name = namedtuple(name, " ".join(list(items.keys())))
out = Name(**items)
return out
# Axes, fields and correlation matching functions
def check_data(col):
"""Change pseudo data column name to actual column name if necessary"""
cols = {
"corrected": "CORRECTED_DATA",
"data": "DATA",
"model": "MODEL_DATA",
"scan": "SCAN_NUMBER",
"antenna1": "ANTENNA1"
}
return cols.get(col, col)
def get_xy(plot_name):
""" Return x and y axis names given a plot name e.g amp_scan"""
basic = {
"amp_ant": {
"xaxis": "antenna1",
"yaxis": "amp"
},
"amp_chan": {
"xaxis": "chan",
"yaxis": "amp"
},
"amp_phase": {
"xaxis": "phase",
"yaxis": "amp"
},
"amp_scan": {
"xaxis": "scan",
"yaxis": "amp"
},
"amp_uvwave": {
"xaxis": "uvwave",
"yaxis": "amp",
"colour": "scan"
},
"phase_chan": {
"xaxis": "chan",
"yaxis": "phase"
},
"phase_uvwave": {
"xaxis": "uvwave",
"yaxis": "phase",
"colour": "scan"
},
"real_imag": {
"xaxis": "imag",
"yaxis": "real",
"colour": "scan"
}
}
return basic[plot_name]
def get_cfg_fields(pipeline, iobs, cfg_field, label_in):
"""
Convert field representative names (e.g bpcal etc) to actual field names
and ids
Parameters
----------
pipeline:
caracal pipeline object
iobs: :obj:`int`
Item number in observation list
cfg_field: :obj:`str`
A string from the field section configuration file containing the
representative field names
label_in: str
Label associated with input MS
Returns
-------
fields: obj:`dict`
A dictionary of form field_name: (repr_name, field_id) or None if the
selected fields were invalid/not available
"""
cases = {
"calibrators": ['bpcal', 'gcal', 'fcal', 'xcal'],
"target": ["target"]
}
f_types = cases.get(cfg_field, None)
if f_types is None:
# meaning the field specified were comma separated
cfg_field = set(cfg_field.split())
f_types = (cfg_field if cfg_field.issubset(cases["calibrators"])
else [])
# convert field types to field names and field IDs
fields = {}
for f_type in f_types:
fnames = getattr(pipeline, f_type)[iobs]
for _fname in fnames:
fields.setdefault(_fname, []).append(f_type)
# return none if no items in field dict
return fields or None
def get_cfg_corrs(cfg_corr, ms_corrs):
""" Convert correlations specified to actual corr labels"""
if cfg_corr in ['auto', 'all']:
cfg_corr = ','.join(ms_corrs)
elif cfg_corr in ['diag', 'parallel']:
# the corr list has the order XX,XY,YX,YY or RR,RL,LR,LL
# collect first and last regardless if list is size 2 or 4
cfg_corr = ",".join([ms_corrs[0], ms_corrs[-1]])
return cfg_corr
# Recipe functions
def plotms(pipeline, recipe, basic, extras=None):
"""
Add the plotms recipe to stimela
Parameters
----------
pipeline:
A caracal pipeline object containing the general pipeline details
recipe:
Stimela recipe object onto which we add recipes
basic: :obj:`dict`
Dictionary containing all the basic parameters to be passed on to the
plotter a.k.a stimela cab. It also contains some information to be
removed and passed to the pipeline.
extras: :obj`dict` (optional)
Can contain additional keyword arguments to be passed directly on to
the plotter i.e. all the other arguments that are not available by
default on the config file. These arguments will be passed as they
are and thus attention must be paid.
"""
step = basic.pop("step")
label = basic.pop("label")
output_dir = basic.pop("output_dir")
basic["data"] = basic["data"].split("_")[0].lower()
plotms_keys = {
"xaxis": basic["xaxis"],
"yaxis": basic["yaxis"],
"vis": basic["ms"],
"xdatacolumn": basic["data"],
"ydatacolumn": basic["data"],
"correlation": basic["corr"],
"field": basic["field"],
"iteraxis": basic["iterate"],
"coloraxis": basic.get("colour", None),
"plotfile": basic["output"],
"expformat": 'png',
"exprange": 'all',
"overwrite": True,
"showgui": False
}
if extras:
plotms_keys.update(extras)
# remove any empties or none
plotms_keys = check_params(plotms_keys)
recipe.add("cab/casa_plotms", step, plotms_keys,
input=pipeline.input, output=output_dir,
label=label, memory_limit=None, cpus=None)
def _process_shadems_plot_list(plot_args, basesubst, plotlist, defaults, description, extras=None):
"""Processes a list of plots, recusing into dicts"""
for entry in plotlist:
if not entry:
continue
# if plot is specified as a dict, its keys will override category defaults
if isinstance(entry, Mapping):
entry = entry.copy()
desc = entry.pop("desc", "")
comment = entry.pop("comment", "")
enable = entry.pop("enable", True)
plots = entry.pop("plots", None)
if not isinstance(plots, Sequence):
raise ConfigurationError(f"{description}: expecting a 'plots' sequence")
# skip enable=False entries
if not enable:
log.info(f"shadems plot section '{desc}' is explicitly disabled in the config file")
continue
# all other keys go into new defaults (with substitutions done)
new_defaults = defaults.copy()
new_defaults.update(**{"--" + key.replace("_", "-"): val.format(**basesubst) if isinstance(val, str) else val
for key, val in entry.items()})
# and ecurse into new plot list
_process_shadems_plot_list(plot_args, basesubst, plots, new_defaults, f"{description}: {desc or comment}", extras=extras)
elif isinstance(entry, str):
# add user-defined substitutions
plot = entry.format(**basesubst)
# convert argument list to dictionary for easy update
args = l2d(plot)
# add in defaults
for key, value in defaults.items():
args.setdefault(key, value)
# add in extras, if any
if extras:
args.update(extras)
# convert to list of arguments and add to plotlist
# arg values of None and True and "" represent command-line arguments without a value
cmdline_args = []
for option, value in args.items():
cmdline_args.append(option)
if value not in (None, True, ""):
cmdline_args.append(str(value))
plot_args.append(" ".join(cmdline_args))
else:
raise ConfigurationError(f"{description}: unexpected 'plots' entry of type {type(entry)}")
def direct_shadems(pipeline, recipe, shade_cfg, extras=None):
"""
Create recipes for the new shade-ms plots
"""
iobs = shade_cfg.pop("iobs")
step = f"plot-shadems-ms{iobs}"
msbase = shade_cfg.pop("ms_base")
label = shade_cfg.pop("label")
fields = shade_cfg["fields"]
# some user facing substitutions for fields, corrs, and base MS name
basesubst = dict(
msbase=msbase,
all_fields=",".join(fields.keys()),
all_corrs=shade_cfg["corrs"]
)
for _f in fields.keys():
for _ft in fields[_f]:
basesubst[_ft] = _f
# groups of plots available
plot_cats = {
"plots_by_field": {"--iter-field": "",
"--field": basesubst["all_fields"]},
"plots_by_corr": {"--iter-corr": ""},
"plots": {}
}
# remove the keys enable and ignore_errors
bares = {k: v for k, v in shade_cfg.items()
if k in ("plots_by_field", "plots_by_corr", "plots")}
# # remove plot categories that have not been specified
# bares = {k: v for k, v in bares.items() if len(v) > 1 or (v and v[0])}
# I just skip them below as that's easier with the new logic
plot_args = []
# for each plot category i.e. plots, plot-by-field, plots-by-corr
for plot_cat, plotlist in bares.items():
# make dict of default arguments for this plot type
category_defaults = {
"--title": "'{ms} {_field}{_Spw}{_Scan}{_Ant}{_title}{_Alphatitle}{_Colortitle}'",
"--col": shade_cfg["default_column"],
"--png": f"{label}-{msbase}-{{field}}{{_Spw}}{{_Scan}}{{_Ant}}-{{label}}{{_alphalabel}}{{_colorlabel}}{{_suffix}}.png",
"--corr": shade_cfg["corrs"],
** plot_cats[plot_cat]
}
_process_shadems_plot_list(plot_args, basesubst, plotlist, category_defaults, plot_cat)
if len(plot_args) == 0:
log.warning(
"The shadems section doesn't contain any enabled 'plot_by_field' or 'plot_by_corr' or 'plots' entries.")
else:
recipe.add("cab/shadems_direct", step,
dict(ms=shade_cfg["ms"],
args=plot_args,
ignore_errors=shade_cfg["ignore_errors"]),
input=pipeline.input, output=shade_cfg["output_dir"],
label=f"{step}:: Plotting", memory_limit=None, cpus=None)
def shadems(pipeline, recipe, basic, extras=None):
"""
Add the shadems recipe to stimela
See docstring of :func:`plotms` for parameter descriptions
"""
step = basic.pop("step")
label = basic.pop("label")
output_dir = basic.pop("output_dir")
# contains the var names to be used as the suffix in case of iteration
iter_axes = {"field": "{_field}",
"spw": "{_Spw}",
"scan": "{_Scan}",
"baseline": "{_Baseline}",
"ant": "{_Ant}"}
col_names = {
"antenna1": "ANTENNA1", "scan": "SCAN_NUMBER",
"chan": "CHAN", "freq": "FREQ",
"amp": "amp", "phase": "phase",
"real": "real", "imag": " imag",
"uvwave": "UV", "baseline": "UV"
}
# get a name conforming to those allowed in shadems
def shade_cols(_c, names): return names.get(_c, _c.upper())
# get the correlation names for the args
corrs = basic["corr"].split(",")
# iterate over correlation because of shadems naming issues
for _corr in corrs:
shadems_keys = {}
shadems_keys = {
"col": basic["data"],
"xaxis": shade_cols(basic["xaxis"], col_names),
"yaxis": shade_cols(basic["yaxis"], col_names),
"ms": basic["ms"],
"corr": _corr,
"field": basic["field"],
"colour-by": shade_cols(basic.get("colour", None), col_names),
"png": f"{basic['output']}-corr-{_corr}.png",
# "mem_limit": basic["mem_limit"],
"num-parallel": basic["num_cores"]
}
iterate = basic["iterate"]
if iterate and (iterate in iter_axes):
shadems_keys.update(
{f"iter-{iterate}": True,
"png": f"{basic['output']}-corr-{_corr}{iter_axes[iterate]}.png"})
if shadems_keys["colour-by"] == "baseline":
shadems_keys["colour-by"] = "UV"
if extras:
shadems_keys.update(extras)
# remove any empties or none
shadems_keys = check_params(shadems_keys)
recipe.add("cab/shadems", step, shadems_keys,
input=pipeline.input, output=output_dir,
label=label, memory_limit=None, cpus=None)
def ragavi_vis(pipeline, recipe, basic, extras=None):
"""
Add the ragavi_vis recipe to stimela
See docstring of :func:`plotms` for parameter descriptions
"""
step = basic.pop("step")
label = basic.pop("label")
output_dir = basic.pop("output_dir")
ragavi_keys = {
"data-column": basic["data"],
"xaxis": basic["xaxis"],
"yaxis": basic["yaxis"],
"ms": basic["ms"],
"corr": basic["corr"],
"field": basic["field"],
"iter-axis": basic["iterate"],
"colour-axis": basic.get("colour", None),
"htmlname": f"{basic['output']}.html",
# "cbin": basic["avgchan"],
# "tbin": basic["avgtime"],
"canvas-width": 1080,
"canvas-height": 720,
"mem-limit": basic["mem_limit"],
"num-cores": basic["num_cores"]
}
if extras:
ragavi_keys.update(extras)
# remove any empties or none
ragavi_keys = check_params(ragavi_keys)
recipe.add("cab/ragavi_vis", step, ragavi_keys,
input=pipeline.input, output=output_dir,
label=label, memory_limit=None, cpus=None)
# main function
def worker(pipeline, recipe, config):
"""
Inspect worker driver function
1. Parses inputs from the worker's configuration file
2. Iterate over observations
- Iterate over mss for this observation
- Iterate over the plots available
- Iterate over the required fields
- Form the plotter's arguments
- Call plotter's function to add to stimela recipe
"""
gen_params, plot_axes, plotter_params = group_configs(config)
plot_axes = plot_axes._asdict()
# general pipeline setup
nobs = pipeline.nobs
subdir = gen_params.dirname
label_in = gen_params.label_in
label = gen_params.label_plot
plotter = gen_params.standard_plotter
# use default output dir if no explict output dir was specified
if subdir:
output_dir = os.path.join(pipeline.diagnostic_plots, subdir)
else:
output_dir = pipeline.diagnostic_plots
for iobs in range(nobs):
mslist = pipeline.get_mslist(iobs, label_in,
target=(config['field'] == 'target'))
for ms in mslist:
if not ms_exists(pipeline.msdir, ms):
raise IOError(f"MS {ms} does not exist. Please check that is where it should be.")
log.info(f"Plotting MS: {ms}")
ms_base = os.path.splitext(ms)[0]
ms_info_dict = pipeline.get_msinfo(ms)
# get corr types for MS
ms_corrs = ms_info_dict["CORR"]["CORR_TYPE"]
corrs = get_cfg_corrs(plotter_params.correlation, ms_corrs)
fields = get_cfg_fields(pipeline, iobs, plotter_params.field,
label_in)
if fields is None:
raise ValueError(f"""
Eligible values for 'field': 'target', \
'calibrators', 'fcal', 'bpcal', 'xcal' or 'gcal'. \
User selected {",".join(fields)}""")
# for the newer plots to shadems
if pipeline.enable_task(config, "shadems"):
shade_cfg = config["shadems"]
shade_cfg.update({
"ms": ms,
"iobs": iobs,
"label": label,
"corrs": corrs,
"fields": fields,
"ms_base": ms_base,
"output_dir": output_dir})
direct_shadems(pipeline, recipe, shade_cfg)
# the older plots
if plotter and plotter != "none":
for axes in plot_axes:
if pipeline.enable_task(config, axes):
del plot_axes[axes]["enable"]
else:
continue
plot_args = get_xy(axes)
for fname, ftype in fields.items():
plot_args.update({
"ms": ms,
"data": check_data(plot_axes[axes].get("col")),
"corr": corrs,
"iterate": "corr",
# "colour": "scan",
"num_cores": plotter_params.num_cores,
"mem_limit": plotter_params.mem_limit,
"uvrange": plotter_params.uvrange,
"field": fname,
"output": f"{label}-{ms_base}-{ftype[0]}-{fname}-{axes}",
"output_dir": output_dir,
"step": f"plot-{axes}-{iobs}-{ftype[0]}",
"label": label,
**plot_axes[axes]})
globals()[plotter](pipeline, recipe, plot_args, extras=None)
| 21,274 | 32.398744 | 133 | py |
caracal | caracal-master/caracal/workers/crosscal_worker.py | # -*- coding: future_fstrings -*-
from collections import OrderedDict
import sys
import os
import caracal.dispatch_crew.utils as utils
import caracal
import stimela.dismissable as sdm
from caracal.workers.utils import manage_flagsets as manflags
from caracal.workers.utils import manage_caltabs as manGtabs
from caracal.workers.utils import manage_antennas as manants
from caracal.workers.utils import callibs
import copy
import re
import json
import glob
import shutil
import numpy as np
from casacore.tables import table
from caracal.utils.requires import extras
NAME = "Cross-calibration"
LABEL = 'crosscal'
def check_config(config, name):
for primsec in "primary", "secondary":
order = config[primsec]["order"]
# check that all order steps are legal
invalid = [x for x in order if x not in RULES]
if invalid:
raise caracal.ConfigurationError(f"{name}: {primsec}: order: invalid steps {','.join(invalid)}")
# check that numbers match
for other in "calmode", "solint", "combine":
if len(config[primsec][other]) != len(order):
raise caracal.ConfigurationError(
f"{name}: {primsec}: {other}: expected {len(order)} elements, found {len(config[primsec][other])}")
# E.g. to split out continuum/<dir> from output/continuum/dir
def get_dir_path(string, pipeline):
return string.split(pipeline.output)[1][1:]
FLAG_NAMES = [""]
def exists(outdir, path):
_path = os.path.join(outdir, path)
return os.path.exists(_path)
# Rules for interpolation mode to use when applying calibration solutions
RULES = {
"K": {
"name": "delay_cal",
"interp": "linear",
"cab": "cab/casa_gaincal",
"gaintype": "K",
"field": "bpcal",
},
"G": {
"name": "gain_cal",
"interp": "linear",
"cab": "cab/casa_gaincal",
"gaintype": "G",
"mode": "ap",
"field": "gcal",
},
"F": {
"name": "gaincal_for_Ftable",
"interp": "linear",
"cab": "cab/casa_gaincal",
"gaintype": "F",
"mode": "ap",
"field": "gcal",
},
"B": {
"name": "bp_cal",
"interp": "linear",
"cab": "cab/casa_bandpass",
"field": "bpcal",
},
"A": {
"name": "auto_flagging",
"cab": "cab/casa_flagdata",
"mode": "tfcrop",
},
"I": {
"name": "image",
"cab": "cab/wsclean",
},
"S": {
"name": "slope_freq_delay",
"cab": "cab/casa_fringefit",
},
}
CALS = {
"primary": "fcal",
"secondary": "gcal",
"bandpass_cal": "bpcal",
}
def first_if_single(items, i):
try:
return items[i]
except IndexError:
return items[0]
def get_last_gain(gaintables, my_term="dummy"):
if isinstance(my_term, str):
my_term = [my_term]
if gaintables:
gtype = [tab[-2] for tab in gaintables]
gtype.reverse()
last_indices = []
N = len(gtype)
for term in set(gtype):
idx = N - 1 - gtype.index(term)
if gtype[gtype.index(term)] not in my_term:
last_indices.append(idx)
return last_indices
else:
return []
def solve(msname, msinfo, recipe, config, pipeline, iobs, prefix, label, ftype,
append_last_secondary=None, prev=None, prev_name=None, smodel=False):
"""
"""
gaintables = []
interps = []
fields = []
iters = {}
if prev and prev_name:
for item in config[ftype]['apply']:
gaintables.append("%s_%s.%s%d" % (prefix, prev_name, item, prev["iters"][item]))
ft = RULES[item]["field"]
fields.append(",".join(getattr(pipeline, ft)[iobs]))
interps.append(RULES[item]["interp"])
field = getattr(pipeline, CALS[ftype])[iobs]
order = config[ftype]["order"]
field_id = utils.get_field_id(msinfo, field)
def do_KGBF(i):
gtable_ = None
ftable_ = None
interp = RULES[term]["interp"]
if pipeline.refant[iobs] in ['auto']:
params["refant"] = manants.get_refant(pipeline, recipe,
prefix, msname, fields,
pipeline.minbase[iobs],
pipeline.maxdist[iobs], i)
if params["refant"]:
caracal.log.info(f"Auto selected ref antenna(s): {params['refant']}")
else:
caracal.log.error("Cannot auto-select ref antenna(s). Set it manually.")
else:
params["refant"] = pipeline.refant[iobs]
params["solint"] = first_if_single(config[ftype]["solint"], i)
params["combine"] = first_if_single(config[ftype]["combine"], i).strip("'")
params["field"] = ",".join(field)
caltable = "%s_%s.%s%d" % (prefix, ftype, term, itern)
params["caltable"] = caltable + ":output"
my_term = term
did_I = 'I' in order[:i + 1]
if not did_I and smodel and term in "KGF":
params["smodel"] = ["1", "0", "0", "0"]
# allow selection of band subset(s) for gaincal see #1204 on github issue tracker
if term in "GF":
params["spw"] = config[ftype]["spw_g"]
params["scan"] = config[ftype]["scanselection"]
elif term == "K":
params["spw"] = config[ftype]["spw_k"]
params["scan"] = config[ftype]["scanselection"]
if term == "B":
params["bandtype"] = term
params["solnorm"] = config[ftype]["b_solnorm"]
params["fillgaps"] = config[ftype]["b_fillgaps"]
params["uvrange"] = config["uvrange"]
params["scan"] = config[ftype]["scanselection"]
elif term == "K":
params["gaintype"] = term
params["scan"] = config[ftype]["scanselection"]
elif term in "FG":
my_term = ["F", "G"]
if term == "F":
# Never append to the original. Make a copy for each F that is needed
caltable_original = "%s_%s.G%d" % (prefix, prev_name, prev["iters"]["G"])
primary_G = "%s_%s_append-%d.G%d" % (prefix, prev_name, itern, prev["iters"]["G"])
caltable_path_original = os.path.join(pipeline.caltables, caltable_original)
caltable_path = os.path.join(pipeline.caltables, primary_G)
params["append"] = True
caltable = "%s_%s.F%d" % (prefix, ftype, itern)
params["caltable"] = primary_G + ":output"
else:
params["scan"] = config[ftype]["scanselection"]
params["gaintype"] = "G"
params["uvrange"] = config["uvrange"]
params["calmode"] = first_if_single(config[ftype]["calmode"], i).strip("'")
otf_apply = get_last_gain(gaintables, my_term=my_term)
if otf_apply:
params["gaintable"] = [gaintables[count] + ":output" for count in otf_apply]
params["interp"] = [interps[count] for count in otf_apply]
params["gainfield"] = [fields[count] for count in otf_apply]
can_reuse = False
if config[ftype]["reuse_existing_gains"] and exists(pipeline.caltables, caltable):
# check if field is in gain table
fields_in_tab = set(table(os.path.join(pipeline.caltables, caltable), ack=False).getcol("FIELD_ID"))
if fields_in_tab.issubset(field_id):
can_reuse = True
if can_reuse:
caracal.log.info("Reusing existing gain table '%s' as requested" % caltable)
else:
if term == "F":
if os.path.exists(caltable_path):
shutil.rmtree(caltable_path)
cpstep = "copy_primary_gains_%s-%s-%d-%d-%s" % (name, label, itern, iobs, ftype)
recipe.add(shutil.copytree, cpstep, {
"src": caltable_path_original,
"dst": caltable_path,
}, label="{0}:: Copy parimary gains".format(step))
recipe.add(RULES[term]["cab"], step,
copy.deepcopy(params),
input=pipeline.input, output=pipeline.caltables,
label="%s:: %s calibration" % (step, term))
if term == "F":
transfer_fluxscale(msname, recipe, primary_G + ":output", caltable + ":output", pipeline,
iobs, reference=pipeline.fluxscale_reference, label=label)
elif term == "B" and config[ftype]["b_smoothwindow"] > 1:
recipe.add(smooth_bandpass, 'smooth_bandpass', {
"bptable": '{0:s}/{1:s}'.format(pipeline.caltables, caltable),
"window": config[ftype]["b_smoothwindow"],
},
input=pipeline.input,
output=pipeline.output,
label='smooth bandpass')
# Assume gains were plotted when they were created
if config[ftype]["plotgains"] and not can_reuse:
plotgains(recipe, pipeline, field_id if term != "F" else None, caltable, iobs, term=term)
fields.append(",".join(field))
interps.append(interp)
gaintables.append(caltable)
def do_IA(i):
if i == 0:
raise RuntimeError("Have encountered an imaging/flagging request before any gains have been computed."
"an I only makes sense after a G or K (usually both)."
"Please review your 'order' option in the self_cal:secondary section")
if not applied:
applycal(latest_KGBF_group, msname, recipe, gaintables,
interps, fields, CALS[ftype], pipeline, iobs,
calmode="calflag")
else:
caracal.log.info(
"Gains have already been applied using this exact set of gain tables and fields. Skipping unnecessary applycal step")
if term == "A":
if not set("KGBF").intersection(order[:i]):
raise RuntimeError(
"Have encountered a request to flag the secondary calibrator without any gain, bandpass or delay tables to apply first.")
step = "%s-%s-%d-%d-%s" % (name, label, itern, iobs, ftype)
params["mode"] = RULES[term]["mode"]
params["field"] = ",".join(field)
params["datacolumn"] = config[ftype]["flag"]["col"]
params["usewindowstats"] = config[ftype]["flag"]["usewindowstats"]
params["combinescans"] = config[ftype]["flag"]["combinescans"]
params["flagdimension"] = config[ftype]["flag"]["flagdimension"]
params["flagbackup"] = False
params["timecutoff"] = config[ftype]["flag"]["timecutoff"]
params["freqcutoff"] = config[ftype]["flag"]["freqcutoff"]
params["correlation"] = config[ftype]["flag"]["correlation"]
recipe.add(RULES[term]["cab"], step,
copy.deepcopy(params),
input=pipeline.input, output=pipeline.output,
label="%s::" % step)
else:
for fid in field_id:
step = "%s-%s-%d-%d-%s-field%d" % (name, label, itern, iobs, ftype, fid)
calimage = "%s-%s-I%d-%d-field%d:output" % (prefix, ftype, itern, iobs, fid)
cab_params = {
"msname": msname,
"name": calimage,
"size": config[ftype]["image"]['npix'],
"scale": config[ftype]["image"]['cell'],
"join-channels": False if config[ftype]["image"]["nchans"] == 1 else True,
"fit-spectral-pol": config[ftype]["image"]["fit_spectral_pol"],
"channels-out": config[ftype]["image"]['nchans'],
"auto-threshold": config[ftype]["image"]['auto_threshold'],
"local-rms-window": config[ftype]["image"]['rms_window'],
"local-rms": config[ftype]["image"]['local_rms'],
"padding": config[ftype]["image"]['padding'],
"niter": config[ftype]["image"]['niter'],
"weight": config[ftype]["image"]["weight"],
"mgain": config[ftype]["image"]['mgain'],
"field": fid}
if config[ftype]["image"]['external_fits_masks']:
mask_file = ''
for mask in config[ftype]["image"]['external_fits_masks']:
if str(fid) in [mask.split('-')[-1]]:
mask_file = f"{mask}.fits"
if mask_file:
cab_params.update({"fits-mask": mask_file})
else:
cab_params.update({"auto-mask": config[ftype]["image"]['auto_mask']})
else:
cab_params.update({"auto-mask": config[ftype]["image"]['auto_mask']})
recipe.add(RULES[term]["cab"], step,
cab_params,
input=pipeline.input, output=pipeline.crosscal_continuum,
label="%s:: Image %s field" % (step, ftype))
nterms = len(order)
# terms that need an apply
groups_apply = list(filter(lambda g: g, re.findall("([AI]+)?", order)))
# terms that need a solve
groups_solve = list(filter(lambda g: g, re.findall("([KGBF]+)?", order)))
# Order has to start with solve group.
# TODO(sphe) in the philosophy of giving user enough roap to hang themselves
# Release II will allow both starting with I/A in case
# someone wants to apply primary gains to the secondary
n_apply = len(groups_apply)
n_solve = len(groups_solve)
groups = [None] * (n_apply + n_solve)
groups[::2] = groups_solve # even indices
groups[1::2] = groups_apply # odd indices
# no need to apply gains multiple when encountering consecutive terms that need to apply
applied = False
i = -1 #
for jj, group in enumerate(groups):
for g, term in enumerate(group):
i += 1
# if this is not the case, then something has gone horribly wrong
assert term == order[i]
if (jj % 2) == 0: # even counter is solve group
even = True
latest_KGBF_group = group
else:
latest_IA_group = group
even = False
if g == 0:
applied = False
else:
applied = True
name = RULES[term]["name"]
if term in iters:
iters[term] += 1
else:
iters[term] = 0
itern = iters[term]
params = {}
params["vis"] = msname
step = "%s-%s-%d-%d-%s" % (name, label, itern, iobs, ftype)
if even:
do_KGBF(i)
else:
do_IA(i)
return {
"gaintables": gaintables,
"interps": interps,
"iters": iters,
"gainfield": fields,
}
def plotgains(recipe, pipeline, field_id, gtab, i, term):
step = "plotgains-%s-%d-%s" % (term, i, "".join(map(str, field_id or [])))
params = {
"table": f"{gtab}:msfile",
"corr": '',
"htmlname": gtab,
"plotname": "{}.png".format(gtab)
}
if field_id is not None:
params['field'] = ",".join(map(str, field_id))
recipe.add('cab/ragavi', step, params,
input=pipeline.input,
msdir=pipeline.caltables,
output=os.path.join(pipeline.diagnostic_plots, "crosscal"),
label='{0:s}:: Plot gaincal phase'.format(step))
def transfer_fluxscale(msname, recipe, gaintable, fluxtable, pipeline, i, reference, label=""):
"""
Transfer fluxscale
"""
step = "transfer_fluxscale-%s-%d" % (label, i)
recipe.add("cab/casa_fluxscale", step, {
"vis": msname,
"caltable": gaintable,
"fluxtable": fluxtable,
"reference": reference,
"transfer": "",
},
input=pipeline.input, output=pipeline.caltables,
label="Transfer fluxscale")
def get_caltab_final(order, gaintable, interp, gainfield, field):
rorder = list(reversed(order))
if "G" in order:
gi = rorder.index("G")
else:
gi = np.inf
if "F" in order:
fi = rorder.index("F")
else:
fi = np.inf
# if both are not there (or = inf), then it does not matter
if fi == gi: # ooh, very naughty
lidx = get_last_gain(gaintable)
elif gi < fi:
lidx = get_last_gain(gaintable, my_term="F")
else:
lidx = get_last_gain(gaintable, my_term="G")
gaintables = []
interps = []
fields = []
for idx in lidx:
gaintables.append(gaintable[idx])
interps.append(interp[idx])
if isinstance(gainfield, str):
fields.append(gainfield)
else:
fields.append(gainfield[idx])
return gaintables, interps, fields
def applycal(order, msname, recipe, gaintable, interp, gainfield, field, pipeline, i,
calmode="calflag", label=""):
"""
Apply gains
-----------------
Parameters:
order: order in which to apply gains
"""
gaintables, interps, fields = get_caltab_final(order, gaintable, interp,
gainfield, field)
step = "apply_gains-%s-%s-%d" % (field, label, i)
recipe.add("cab/casa_applycal", step, {
"vis": msname,
"field": ",".join(getattr(pipeline, field)[i]),
"applymode": calmode,
"gaintable": [tab + ":output" for tab in gaintables],
"interp": interps,
"calwt": [False],
"gainfield": fields,
"parang": False,
"flagbackup": False,
},
input=pipeline.input, output=pipeline.caltables,
label="%s::Apply gain tables" % step)
@extras("scipy")
def smooth_bandpass(bptable, window, filter_type='mean'):
from scipy import ndimage
caracal.log.info('Smoothing {0:s} with {2:s} window of width {1:d} channels'.format(bptable, window, filter_type))
bp = table(bptable, ack=False).getcol('CPARAM')
bp = [np.real(bp), np.imag(bp)]
if filter_type == 'median':
bp = [ndimage.median_filter(bb, size=(1, window, 1)) for bb in bp]
elif filter_type == 'mean':
bp = [ndimage.uniform_filter(bb, size=(1, window, 1)) for bb in bp]
table(bptable, ack=False, readonly=False).putcol('CPARAM', bp[0] + 1j * bp[1])
def worker(pipeline, recipe, config):
wname = pipeline.CURRENT_WORKER
flags_before_worker = '{0:s}_{1:s}_before'.format(pipeline.prefix, wname)
flags_after_worker = '{0:s}_{1:s}_after'.format(pipeline.prefix, wname)
label = config["label_cal"]
label_in = config["label_in"]
# loop over all MSs for this label
for i, msbase in enumerate(pipeline.msbasenames):
msname = pipeline.form_msname(msbase, label_in)
msinfo = pipeline.get_msinfo(msname)
prefix_msbase = f"{pipeline.prefix_msbases[i]}-{label}"
if {"gcal", "fcal", "target"}.intersection(config["apply_cal"]["applyto"]):
# Write/rewind flag versions
available_flagversions = manflags.get_flags(pipeline, msname)
if config['rewind_flags']['enable']:
if config['rewind_flags']['mode'] == 'reset_worker':
version = flags_before_worker
stop_if_missing = False
elif config['rewind_flags']['mode'] == 'rewind_to_version':
version = config['rewind_flags']['version']
if version == 'auto':
version = flags_before_worker
stop_if_missing = True
if version in available_flagversions:
if flags_before_worker in available_flagversions and available_flagversions.index(
flags_before_worker) < available_flagversions.index(version) and not config[
'overwrite_flagvers']:
manflags.conflict('rewind_too_little', pipeline, wname, msname, config, flags_before_worker,
flags_after_worker)
substep = 'version-{0:s}-ms{1:d}'.format(version, i)
manflags.restore_cflags(pipeline, recipe, version, msname, cab_name=substep)
if version != available_flagversions[-1]:
substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, i)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
msname, cab_name=substep)
if version != flags_before_worker:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
elif stop_if_missing:
manflags.conflict('rewind_to_non_existing', pipeline, wname, msname, config, flags_before_worker,
flags_after_worker)
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
else:
if flags_before_worker in available_flagversions and not config['overwrite_flagvers']:
manflags.conflict('would_overwrite_bw', pipeline, wname, msname, config, flags_before_worker,
flags_after_worker)
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
if len(pipeline.fcal[i]) > 1:
fluxscale_field = utils.observed_longest(msinfo, pipeline.fcal[i])
fluxscale_field_id = utils.get_field_id(msinfo, fluxscale_field)[0]
caracal.log.info("Found more than one flux calibrator."
f"Will use the one observed the longest {fluxscale_field}.")
else:
fluxscale_field = pipeline.fcal[i][0]
fluxscale_field_id = utils.get_field_id(msinfo, fluxscale_field)[0]
pipeline.fluxscale_reference = fluxscale_field
if pipeline.enable_task(config, 'set_model'):
if config['set_model']['no_verify']:
opts = {
"vis": msname,
"field": fluxscale_field,
"scalebychan": True,
"usescratch": True,
}
else:
modelsky = utils.find_in_native_calibrators(msinfo, fluxscale_field, mode='sky')
modelcrystal = utils.find_in_native_calibrators(msinfo, fluxscale_field, mode='crystal')
modelpoint = utils.find_in_native_calibrators(msinfo, fluxscale_field, mode='mod')
standard = utils.find_in_casa_calibrators(msinfo, fluxscale_field)
if config['set_model']['meerkat_skymodel'] and modelsky:
# use local sky model of calibrator field if exists
opts = {
"skymodel": modelsky,
"msname": msname,
"field-id": utils.get_field_id(msinfo, fluxscale_field)[0],
"threads": config["set_model"]['threads'],
"mode": "simulate",
"tile-size": config["set_model"]["tile_size"],
"column": "MODEL_DATA",
}
elif config['set_model']['meerkat_crystalball_skymodel'] and modelcrystal: # Use Ben's crystalball models
opts = {
"ms": msname,
"sky-model": modelcrystal,
"field": fluxscale_field,
"memory-fraction": sdm.dismissable(config['set_model']["meerkat_crystalball_memory_fraction"]),
"num-workers": sdm.dismissable(config['set_model']['meerkat_crystalball_ncpu']),
"row-chunks": sdm.dismissable(config['set_model']["meerkat_crystalball_row_chunks"]),
"model-chunks": sdm.dismissable(config['set_model']["meerkat_crystalball_model_chunks"]),
"num-sources": sdm.dismissable(config['set_model']['meerkat_crystalball_num_sources']),
}
elif modelpoint: # spectral model if specified in our standard
opts = {
"vis": msname,
"field": fluxscale_field,
"standard": "manual",
"fluxdensity": modelpoint['I'],
"reffreq": '{0:f}GHz'.format(modelpoint['ref'] / 1e9),
"spix": [modelpoint[a] for a in 'abcd'],
"scalebychan": True,
"usescratch": True,
}
elif standard: # NRAO model otherwise
opts = {
"vis": msname,
"field": fluxscale_field,
"standard": standard,
"usescratch": True,
"scalebychan": True,
}
else:
raise RuntimeError('The flux calibrator field "{}" could not be '
'found in our database or in the CASA NRAO database'.format(fluxscale_field))
step = 'set_model_cal-{0:d}'.format(i)
if "skymodel" in opts:
cabtouse = 'cab/simulator'
elif "sky-model" in opts:
cabtouse = 'cab/crystalball'
else:
cabtouse = 'cab/casa_setjy'
recipe.add(cabtouse, step,
opts,
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Set jansky ms={1:s}'.format(step, msname))
gcal_set = set(pipeline.gcal[i])
fcal_set = set(pipeline.fcal[i])
calmode = config["apply_cal"]["calmode"]
primary_order = config["primary"]["order"]
secondary_order = config["secondary"]["order"]
no_secondary = gcal_set == set() or len(gcal_set - fcal_set) == 0
if no_secondary:
primary_order = config["primary"]["order"]
primary = solve(msname, msinfo, recipe, config, pipeline, i,
prefix_msbase, label=label, ftype="primary")
caracal.log.info("Secondary calibrator is the same as the primary. Skipping fluxscale")
interps = primary["interps"]
gainfields = primary["gainfield"]
gaintables = primary["gaintables"]
if "bpcal" in config["apply_cal"]["applyto"] or "gcal" in config["apply_cal"]["applyto"]:
applycal(primary_order, msname, recipe, copy.deepcopy(gaintables), copy.deepcopy(interps),
"nearest", "bpcal", pipeline, i, calmode=calmode, label=label)
if "xcal" in config["apply_cal"]["applyto"]:
applycal(primary_order, msname, recipe, copy.deepcopy(gaintables), copy.deepcopy(interps),
"nearest", "xcal", pipeline, i, calmode=calmode, label=label)
if "target" in config["apply_cal"]["applyto"]:
applycal(primary_order, msname, recipe, copy.deepcopy(gaintables), copy.deepcopy(interps),
"nearest", "target", pipeline, i, calmode=calmode, label=label)
else:
primary = solve(msname, msinfo, recipe, config, pipeline, i,
prefix_msbase, label=label, ftype="primary")
secondary = solve(msname, msinfo, recipe, config, pipeline, i,
prefix_msbase, label=label, ftype="secondary",
prev=primary, prev_name="primary", smodel=True)
interps = primary["interps"]
gaintables = primary["gaintables"]
if "bpcal" in config["apply_cal"]["applyto"]:
applycal(primary_order, msname, recipe, copy.deepcopy(gaintables), copy.deepcopy(interps),
"nearest", "bpcal", pipeline, i, calmode=calmode, label=label)
interps = secondary["interps"]
gainfields = secondary["gainfield"]
gaintables = secondary["gaintables"]
if "gcal" in config["apply_cal"]["applyto"]:
applycal(secondary_order, msname, recipe, copy.deepcopy(gaintables), interps,
gainfields, "gcal", pipeline, i, calmode=calmode, label=label)
if "xcal" in config["apply_cal"]["applyto"]:
applycal(secondary_order, msname, recipe, copy.deepcopy(gaintables), interps,
"nearest", "xcal", pipeline, i, calmode=calmode, label=label)
if "target" in config["apply_cal"]["applyto"]:
applycal(secondary_order, msname, recipe, copy.deepcopy(gaintables), interps,
"nearest", "target", pipeline, i, calmode=calmode, label=label)
if {"gcal", "fcal", "target"}.intersection(config["apply_cal"]["applyto"]):
substep = 'save-{0:s}-ms{1:d}'.format(flags_after_worker, i)
manflags.add_cflags(pipeline, recipe, flags_after_worker, msname, cab_name=substep,
overwrite=config['overwrite_flagvers'])
applycal_recipes = callibs.new_callib()
# the fluxscale_field has already been chosen, so using "nearest" here does not make sense to FROM(Sphe)
# see issue #1474
primary_tables = get_caltab_final(primary_order, primary["gaintables"], primary["interps"], fluxscale_field, "target")
if no_secondary:
for gt, itp, fd in zip(*primary_tables):
callibs.add_callib_recipe(applycal_recipes, gt, itp, fd)
else:
# default recipes from secondary
for gt, itp, fd in zip(*get_caltab_final(secondary_order, secondary["gaintables"], secondary["interps"],
"nearest", "target")):
# if the table is already applied with the primary in it, re-add it with an "all" (empty) field
# add_callib_recipe(applycal_recipes, gt, itp, fd, '' if gt in applycal_recipes else targets)
callibs.add_callib_recipe(applycal_recipes, gt, itp, fd)
# make list of primary recipes that apply specifically to primary
for gt, itp, fd in zip(*primary_tables):
callibs.add_callib_recipe(applycal_recipes, gt, itp, fd, field=fluxscale_field)
pipeline.save_callib(applycal_recipes, prefix_msbase)
if pipeline.enable_task(config, 'summary'):
step = 'summary-{0:s}-{1:d}'.format(label, i)
recipe.add('cab/flagstats', step,
{
"msname": msname,
"plot": True,
"outfile": ('{0:s}-{1:s}-'
'crosscal-summary-{2:d}.json').format(
prefix_msbase, wname, i),
"htmlfile": ('{0:s}-{1:s}-'
'crosscal-summary-plots-{2:d}.html').format(
prefix_msbase, wname, i)
},
input=pipeline.input,
output=pipeline.diagnostic_plots,
label='{0:s}:: Flagging summary ms={1:s}'.format(step, msname))
recipe.run()
# Empty job que after execution
recipe.jobs = []
| 32,636 | 43.586066 | 141 | py |
caracal | caracal-master/caracal/workers/mosaic_worker.py | # -*- coding: future_fstrings -*-
import os
import glob
import sys
import caracal
import numpy as np
from caracal.dispatch_crew import utils
from caracal.utils.requires import extras
NAME = "Mosaic 2D-images or cubes"
LABEL = 'mosaic'
@extras(packages="astropy")
def worker(pipeline, recipe, config):
from astropy import units as u
import astropy.coordinates as coord
from astropy.io import fits
from astropy import wcs
wname = pipeline.CURRENT_WORKER
##########################################
# Defining functions for the worker
##########################################
# Not using anymore, but might need later
def identify_last_selfcal_image(directory_to_check, prefix, field, mfsprefix):
# Doing this because convergence may have been reached before the user-specified number of iterations
matching_files = glob.glob(directory_to_check + '/{0:s}_{1:s}_*{2:s}-image.fits'.format(
prefix, field, mfsprefix)) # '*' to pick up the number
max_num = 0 # Initialisation
for filename in matching_files:
split_filename = filename.split('_')
number = split_filename[-1].split('-')[0]
num = int(number)
if num > max_num:
max_num = num
filename_of_last_selfcal_image = '{0:s}_{1:s}_{2:s}{3:s}-image.fits'.format(
prefix, field, str(max_num), mfsprefix)
return filename_of_last_selfcal_image
def identify_last_subdirectory(mosaictype):
max_num = 0 # Initialisation
# Subdirectory prefix depends on whether we are looking in pipeline.continuum or pipeline.cubes
if mosaictype == 'continuum':
directory_to_check = pipeline.continuum
subdirectory_prefix = 'image_'
else: # i.e. mosaictype == 'spectral'
directory_to_check = pipeline.cubes
subdirectory_prefix = 'cube_'
matching_subdirectories = glob.glob(
directory_to_check + '/' + subdirectory_prefix + '*') # '*' to pick up the number
for subdirectory in matching_subdirectories:
split_subdirectory = subdirectory.split('_')
# In case there is one or more '_' in the directory name, want to get the last portion
number = split_subdirectory[-1]
num = int(number)
if num > max_num:
max_num = num
last_subdirectory = subdirectory_prefix + str(max_num)
return max_num, last_subdirectory
# Copied from masking_worker.py and edited. This is to get a Gaussian beam.
def build_beam(obs_freq, centre, cell, imsize, out_beam):
# if copy_head == True:
# hdrfile = fits.open(headfile)
# hdr = hdrfile[0].header
# elif copy_head == False:
w = wcs.WCS(naxis=2)
# Using u.deg for both due to using 'CRVAL1' and 'CRVAL2' to set the centre
centre = coord.SkyCoord(
centre[0], centre[1], unit=(u.deg, u.deg), frame='icrs')
# cell /= 3600.0 # Am assuming that cell was passed to the function in units of arcsec, so this is converting it to units of deg.
# Commenting the above out as 'CDELT2' from the corresponding image will be passed to the function, and this is already in deg.
# The '+ 1's are needed to avoid a shape mismatch later on
w.wcs.crpix = [(imsize / 2) + 1, (imsize / 2) + 1]
w.wcs.cdelt = np.array([-cell, cell])
w.wcs.crval = [centre.ra.deg, centre.dec.deg]
w.wcs.ctype = ["RA---SIN", "DEC--SIN"]
hdr = w.to_header()
hdr['SIMPLE'] = 'T'
hdr['BITPIX'] = -32
hdr['NAXIS'] = 2
hdr.set('NAXIS1', imsize, after='NAXIS')
hdr.set('NAXIS2', imsize, after='NAXIS1')
if 'CUNIT1' in hdr:
del hdr['CUNIT1']
if 'CUNIT2' in hdr:
del hdr['CUNIT2']
# Units of m. The default assumes that MeerKAT data is being processed
dish_diameter = config['dish_diameter']
pb_fwhm_radians = 1.02 * (2.99792458E8 / obs_freq) / dish_diameter
pb_fwhm = 180.0 * pb_fwhm_radians / np.pi # Now in units of deg
pb_fwhm_pix = pb_fwhm / hdr['CDELT2']
x, y = np.meshgrid(np.linspace(-hdr['NAXIS2'] / 2.0, hdr['NAXIS2'] / 2.0, hdr['NAXIS2']),
np.linspace(-hdr['NAXIS1'] / 2.0, hdr['NAXIS1'] / 2.0, hdr['NAXIS1']))
d = np.sqrt(x * x + y * y)
sigma, mu = pb_fwhm_pix / 2.35482, 0.0 # sigma = FWHM/sqrt(8ln2)
gaussian = np.exp(-((d - mu)**2 / (2.0 * sigma**2)))
fits.writeto(out_beam, gaussian, hdr, overwrite=True)
# Copied from line_worker.py and edited. This is to get a Mauchian beam.
# The original version makes the build_beam function above redundant but I do not want to change too many things at once.
def make_mauchian_pb(filename, freq): # pbtype):
with fits.open(filename) as image:
headimage = image[0].header
ang_offset = np.indices(
(headimage['naxis2'], headimage['naxis1']), dtype=np.float32)
ang_offset[0] -= (headimage['crpix2'] - 1)
ang_offset[1] -= (headimage['crpix1'] - 1)
ang_offset = np.sqrt((ang_offset**2).sum(axis=0)) # Using offset in x and y direction to calculate the total offset from the pointing centre
ang_offset = ang_offset * np.abs(headimage['cdelt1']) # Now offset is in units of deg
# if pbtype == 'gaussian':
# sigma_pb = 17.52 / (freq / 1e+9) / dish_size / 2.355
# sigma_pb.resize((sigma_pb.shape[0], 1, 1))
# datacube = np.exp(-datacube**2 / 2 / sigma_pb**2)
# elif pbtype == 'mauchian':
FWHM_pb = (57.5 / 60) * (freq / 1.5e9)**-1 # Eqn 4 of Mauch et al. (2020), but in deg # freq is just a float for the 2D case
pb_image = (np.cos(1.189 * np.pi * (ang_offset / FWHM_pb)) / (
1 - 4 * (1.189 * ang_offset / FWHM_pb)**2))**2 # Eqn 3 of Mauch et al. (2020)
fits.writeto(filename.replace('image.fits', 'pb.fits'),
pb_image, header=headimage, overwrite=True)
caracal.log.info('Created Mauchian primary-beam FITS {0:s}'.format(
filename.replace('image.fits', 'pb.fits')))
def consistent_cdelt3(image_filenames, input_directory, nrdecimals):
cdelt3s = []
for ff in image_filenames:
cc = fits.getval(ff, 'cdelt3')
if cc not in cdelt3s:
cdelt3s.append(cc)
if len(cdelt3s) > 1:
if nrdecimals:
caracal.log.warn('Not all input cubes have the same CDELT3. Values found:')
caracal.log.warn(' {0:}'.format(cdelt3s))
caracal.log.warn('Rounding up the CDELT3 values to {0:d} decimals:'.format(nrdecimals))
cdelt3s_r = []
for cc in cdelt3s:
if round(cc, nrdecimals) not in cdelt3s_r:
cdelt3s_r.append(round(cc, nrdecimals))
caracal.log.warn(' {0:}'.format(cdelt3s_r))
if len(cdelt3s_r) > 1:
caracal.log.error('Rounding was insufficient, cannot proceed.')
raise caracal.BadDataError('Inconsistent CDELT3 values in input cubes.')
else:
caracal.log.warn('Changing CDELT3 of all input image.fits and pb.fits cubes to {0:}'.format(cdelt3s_r[0]))
for ff in image_filenames:
fits.setval(ff, 'cdelt3', value=cdelt3s_r[0])
fits.setval(ff.replace('image.fits', 'pb.fits'), 'cdelt3', value=cdelt3s_r[0])
else:
caracal.log.error('Not all input cubes have the same CDELT3. Values found:')
caracal.log.error(' {0:}'.format(cdelt3s))
caracal.log.error('To proceed Please set mosaic:round_cdelt3 to round the CDELT3 values to an adequate number of decimals.')
caracal.log.error('This will overwrite CDELT3 in the input cubes.')
raise caracal.BadDataError('Inconsistent CDELT3 values in input cubes.')
##########################################
# Main part of the worker
##########################################
# Prioritise parameters specified in the config file, under the 'mosaic' worker
# i.e. 'continuum' or 'spectral'
specified_mosaictype = config['mosaic_type']
use_mfs_images = config['use_mfs']
specified_images = config['target_images']
label = config['label_in']
line_name = config['line_name']
pb_type = config['pb_type']
# Parameters that depend on the mosaictype
if specified_mosaictype == 'spectral':
pb_origin = 'generated by the line_worker'
else:
pb_origin = 'that are already in place (generated by the selfcal_worker, or during a previous run of the mosaic_worker)'
# To ease finding the appropriate files, and to keep this worker self-contained
if use_mfs_images:
mfsprefix = '-MFS'
else:
mfsprefix = ''
# please forget pipeline.dataid: it is now pipeline.msbasenames
# pipeline.prefixes = ['{0:s}-{1:s}-{2:s}'.format(pipeline.prefix,did,config['label_in']) for did in pipeline.dataid]
# In case there are different pipeline prefixes
# for i in range(len(pipeline.prefixes)): ### I may need to put this loop back in later
prefix = pipeline.prefix
# Delete empty strings from list of specified images (as in default list = [''])
while '' in specified_images:
del (specified_images[specified_images.index('')])
# If nothing is passed via the config file, then specified_images[0] adopts this via the schema
if not len(specified_images):
caracal.log.info(
"No image names were specified via the config file, so they are going to be selected automatically.")
caracal.log.info(
"It is assumed that they are all in the highest-numbered subdirectory of 'general:output/continuum' and 'general:output/cubes'.")
caracal.log.info(
"You should check the selected image names. If unhappy with the selection, please specify the correct ones to use with mosaic:target_images.")
# Needed for working out the field names for the targets, so that the correct files can be selected
all_targets, all_msfile, ms_dict = pipeline.get_target_mss(label)
n_targets = len(all_targets)
caracal.log.info(
'The number of targets to be mosaicked is {0:d}'.format(n_targets))
# Where the targets are in the output directory
max_num, last_subdirectory = identify_last_subdirectory(specified_mosaictype)
# Empty list to add filenames to
pathnames = []
# Expecting the same prefix and mfsprefix to apply for all fields to be mosaicked together
for target in all_targets:
field = utils.filter_name(target)
# Use the mosaictype to infer the filenames of the images
if specified_mosaictype == 'continuum': # Add name of 2D image output by selfcal_worker
image_name = '{5:s}/{0:s}/{1:s}_{2:s}_{3:s}{4:s}-image.fits'.format(
last_subdirectory, prefix, field, str(max_num), mfsprefix, pipeline.continuum)
specified_images.append(image_name)
else: # i.e. mosaictype = 'spectral', so add name of cube output by line_worker
image_name = '{5:s}/{0:s}/{1:s}_{2:s}_{3:s}{4:s}-image.fits'.format(
last_subdirectory, prefix, field, line_name, mfsprefix, pipeline.cubes)
if mfsprefix == '':
# Following the naming in line_worker
image_name = image_name.replace('-image', '.image')
specified_images.append(image_name)
caracal.log.info('PLEASE CHECK -- Images to be mosaicked are:')
caracal.log.info(specified_images)
# Although montage_mosaic checks whether pb.fits files are present, we need to do this earlier in the worker,
# so that we can create simple Gaussian (or Mauchian) primary beams if need be
for image_name in specified_images:
pb_name = image_name.replace('image.fits', 'pb.fits')
# Need the corresponding pathname for the image being considered
index_to_use = specified_images.index(image_name)
if os.path.exists(pb_name):
caracal.log.info(
'{0:s} is already in place, and will be used by montage_mosaic.'.format(pb_name))
else:
if specified_mosaictype == 'spectral':
caracal.log.error(
'{0:s} does not exist. Please make sure that it is in place before proceeding.'.format(pb_name))
caracal.log.error(
'You may need to re-run the line_worker with pb_cube enabled. EXITING.')
raise caracal.ConfigurationError("missing primary beam file {}".format(pb_name))
else: # i.e. mosaictype == 'continuum'
caracal.log.info(
'{0:s} does not exist, so going to create a pb.fits file instead.'.format(pb_name))
if pb_type == 'gaussian':
# Create rudimentary primary-beam, which is assumed to be a Gaussian with FWMH = 1.02*lambda/D
image_hdu = fits.open(image_name)
image_header = image_hdu[0].header
# i.e. [ RA, Dec ]. Assuming that these are in units of deg.
image_centre = [image_header['CRVAL1'], image_header['CRVAL2']]
# Again assuming that these are in units of deg.
image_cell = image_header['CDELT2']
image_imsize = image_header['NAXIS1']
recipe.add(build_beam, 'build_gaussian_pb',
{
# Units of Hz. The default assumes that MeerKAT data is being processed
'obs_freq': config['ref_frequency'],
'centre': image_centre,
'cell': image_cell,
'imsize': image_imsize,
'out_beam': pb_name,
},
input=pipeline.input,
# Was pipeline=pipeline.output before the restructure of the output directory
output=pipeline.output,
label='build_gaussian_pb:: Generating {0:s}'.format(pb_name))
# Confirming freq and dish_diameter values being used for the primary beam
caracal.log.info('Observing frequency = {0:f} Hz, dish diameter = {1:f} m'.format(
config['ref_frequency'], config['dish_diameter']))
caracal.log.info('If these are not the values that you were expecting to be used for primary-beam creation, then '
'please delete the newly-created beams and re-run the mosaic worker with ref_frequency and dish_diameter '
'set in the config file.')
else: # i.e. pb_type == 'mauchian'
filename = image_name
freq = config['ref_frequency'] # Units of Hz. The default assumes that MeerKAT data is being processed
make_mauchian_pb(filename, freq)
# Confirming freq value being used for the primary beam
caracal.log.info('Observing frequency = {0:f} Hz'.format(freq))
if freq == 1383685546.875: # i.e. if the default value was used
caracal.log.info('If you did not want this value (i.e. the default) to be used for primary-beam creation, then '
'please delete the newly-created beams and re-run the mosaic worker with ref_frequency set in the config file.')
else:
caracal.log.info('as set via ref_frequency in the config file, and used for primary-beam creation.')
pb_origin = 'generated by the mosaic_worker'
caracal.log.info('Checking for *pb.fits files now complete.')
# Will need it later, unless Sphe has a more elegant method
original_working_directory = os.getcwd()
caracal.log.info(
'Now creating symlinks to images and beams, in case they are distributed across multiple subdirectories')
# To get the symlinks created in the correct directory
input_directory = pipeline.continuum if specified_mosaictype == 'continuum' else pipeline.cubes
os.chdir(input_directory)
# Empty list to add filenames to, as we are not to pass 'image_1', etc, to the recipe
image_filenames = []
# Start by assuming that 'image' is of the form 'image_1/image_filename'
for specified_image in specified_images:
split_imagename = specified_image.split('/')
subdirectory = '/'.join(split_imagename[:-1])
image_filename = split_imagename[-1]
image_filenames.append(image_filename)
if not specified_image.split(input_directory)[0]:
specified_image = specified_image.replace(input_directory, '')
else:
specified_image = '{0:s}/{1:s}'.format('/'.join(['..' for ss in input_directory.split('/')]), specified_image)
if specified_image[0] == '/':
specified_image = specified_image[1:]
symlink_for_image_command = 'ln -sf {0:s} {1:s}'.format(specified_image, image_filename)
os.system(symlink_for_image_command)
specified_beam = specified_image.replace('image.fits', 'pb.fits')
beam_filename = image_filename.replace('image.fits', 'pb.fits')
symlink_for_beam_command = 'ln -sf {0:s} {1:s}'.format(specified_beam, beam_filename)
os.system(symlink_for_beam_command)
# To get back to where we were before symlink creation
os.chdir(original_working_directory)
# Prefix of the output files should be either the default (pipeline.prefix) or that specified by the user via the config file
mosaic_prefix = config['name']
if mosaic_prefix == '': # i.e. this has been set via the schema
mosaic_prefix = pipeline.prefix
# List of images in place, and have ensured that there are corresponding pb.fits files,
# so now ready to add montage_mosaic to the caracal recipe
image_filenames = ['{0:s}/{1:s}'.format(input_directory, ff) for ff in image_filenames]
input_directory = '.'
if specified_mosaictype == 'spectral':
recipe.add(consistent_cdelt3, 'cdelt3_check',
{
"image_filenames": image_filenames,
"input_directory": input_directory,
"nrdecimals": config['round_cdelt3'],
},
input=input_directory,
output=pipeline.mosaics,
label='cdelt3_check')
recipe.run()
recipe.jobs = []
if pipeline.enable_task(config, 'domontage'):
recipe.add('cab/mosaicsteward', 'mosaic-steward',
{
"mosaic-type": specified_mosaictype,
"domontage": True,
"cutoff": config['cutoff'],
"name": mosaic_prefix,
"target-images": image_filenames,
},
input=input_directory,
output=pipeline.mosaics,
label='MosaicSteward:: Re-gridding {0:s} images before mosaicking them. For this mode, the mosaic_worker is using *pb.fits files {1:s}.'.format(specified_mosaictype, pb_origin))
else: # Written out for clarity as to what difference the 'domontage' setting makes
recipe.add('cab/mosaicsteward', 'mosaic-steward',
{
"mosaic-type": specified_mosaictype,
"domontage": False,
"cutoff": config['cutoff'],
"name": mosaic_prefix,
"target-images": image_filenames,
},
input=input_directory,
output=pipeline.mosaics,
label='MosaicSteward:: Re-gridding of images and beams is assumed to be already done, so straight to mosaicking {0:s} images. For this mode, the mosaic_worker is using *pb.fits files {1:s}.'.format(specified_mosaictype, pb_origin))
recipe.run()
recipe.jobs = []
# Set mosaic bunit, bmaj, bmin, bpa
bunits, bmajs, bmins, bpas = [], [], [], []
for ff in image_filenames:
bunits.append(fits.getval(ff, 'bunit'))
bmajs.append(fits.getval(ff, 'bmaj'))
bmins.append(fits.getval(ff, 'bmin'))
bpas.append(fits.getval(ff, 'bpa'))
if np.unique(np.array(bunits)).shape[0] == 1:
mosbunit = bunits[0]
else:
raise caracal.BadDataError('Inconsistent BUNIT values in input cubes. Cannot proceed')
mosbmaj = np.median(np.array(bmajs))
mosbmin = np.median(np.array(bmins))
mosbpa = np.median(np.array(bpas))
caracal.log.info('Setting BUNIT = {0:}, BMAJ = {1:}, BMIN = {2:}, BPA = {3:} in mosaic FITS headers'.format(mosbunit, mosbmaj, mosbmin, mosbpa))
# Add missing keys and convert some keys from string to float in the mosaic FITS headers
for ff in ['.fits', '_noise.fits', '_weights.fits']:
fitsfile = '{0:s}/{1:s}{2:s}'.format(pipeline.mosaics, mosaic_prefix, ff)
fits.setval(fitsfile, 'bunit', value=mosbunit)
fits.setval(fitsfile, 'bmaj', value=mosbmaj)
fits.setval(fitsfile, 'bmin', value=mosbmin)
fits.setval(fitsfile, 'bpa', value=mosbpa)
for hh in 'crval3,crval4,crpix3,crpix4,cdelt3,cdelt4,crota2'.split(','):
try:
fits.setval(fitsfile, hh, value=float(fits.getval(fitsfile, hh)))
caracal.log.info('Header key {0:s} found and converted to float in file {1:s}'.format(hh, fitsfile))
except BaseException:
caracal.log.info('Header key {0:s} not found in file {1:s}'.format(hh, fitsfile))
| 22,412 | 48.476821 | 250 | py |
caracal | caracal-master/caracal/workers/selfcal_worker.py | # -*- coding: future_fstrings -*-
import os
import shutil
import glob
import sys
import yaml
import json
import re
import copy
import caracal
import numpy as np
import stimela.dismissable as sdm
from caracal.dispatch_crew import utils
from caracal.utils.requires import extras
from stimela.pathformatter import pathformatter as spf
from typing import Any
from caracal.workers.utils import manage_flagsets as manflags
import psutil
NAME = 'Continuum Imaging and Self-calibration Loop'
LABEL = 'selfcal'
# self_cal_iter_counter is used as a global variable.
# To split out continuum/<dir> from output/continuum/dir
def get_dir_path(string, pipeline):
return string.split(pipeline.output)[1][1:]
CUBICAL_OUT = {
"CORRECTED_DATA": 'sc',
"CORR_DATA": 'sc',
"CORR_RES": 'sr',
}
CUBICAL_MT = {
"Gain2x2": 'complex-2x2',
"GainDiag": 'complex-2x2', # TODO:: Change this. Ask cubical to support this mode
"GainDiagAmp": 'complex-2x2',
"GainDiagPhase": 'phase-diag',
"ComplexDiag": 'complex-diag',
"Fslope": 'f-slope',
}
SOL_TERMS_INDEX = {
"G": 0,
"B": 1,
"DD": 2,
}
def check_config(config, name):
"""
Optional function to check consistency of config, invoked before the pipeline runs.
its purpose is to log warnings, or raise exceptions on bad errors.
"""
# First let' check that we are not using transfer gains with meqtrees or not starting at the start with meqtrees
if config['calibrate_with'].lower() == 'meqtrees':
if config['transfer_apply_gains']['enable']:
raise caracal.ConfigurationError(
'Gains cannot be interpolated with MeqTrees, please switch to CubiCal. Exiting.')
if int(config['start_iter']) != 1:
raise caracal.ConfigurationError(
"We cannot reapply MeqTrees calibration at a given step. Hence you will need to do a full selfcal loop.")
if int(config['cal_cubical']['chan_chunk']) != -1:
caracal.log.info("The channel chunk has no effect on MeqTrees.")
if 'Fslope' in config['calibrate']['gain_matrix_type']:
caracal.log.info("Delay selfcal does not work with MeqTrees, please switch to Cubical. Exiting.")
else:
if int(config['start_iter']) != 1:
raise caracal.ConfigurationError(
"We cannot reapply Cubical calibration at a given step. Hence you will need to do a full selfcal loop.")
# First check we are actually running a calibrate
if config['calibrate']['enable']:
# Running with a model shorter than the output type is dengerous with 'CORR_RES'
if 'CORR_RES' in config['calibrate']['output_data']:
if len(config['calibrate']['model']) < config['cal_niter']:
raise caracal.ConfigurationError(
"You did not set a model to use for every iteration while using residuals. This is too dangerous for CARACal to execute.")
# Make sure we are not using two_step with CubiCal
if config['calibrate_with'].lower() == 'cubical' and config['cal_meqtrees']['two_step']:
raise caracal.ConfigurationError(
"Two_Step calibration is an experimental mode only available for meqtrees at the moment.")
# Then let's check that the solutions are reasonable and fit in our time chunks
#!!!!!! Remainder solutions are not checked to be a full solution block!!!!!!!!
# we check there are enough solution
if len(config['calibrate']['gsols_timeslots']) < int(config['cal_niter']):
amount_sols = len(config['calibrate']['gsols_timeslots'])
else:
amount_sols = int(config['cal_niter'])
# we collect all time solutions
solutions = config['calibrate']['gsols_timeslots'][:amount_sols]
# if we do Bjones we add those
if config['cal_bjones']:
if len(config['calibrate']['bsols_timeslots']) < int(config['cal_niter']):
amount_sols = len(config['calibrate']['bsols_timeslots'])
else:
amount_sols = int(config['cal_niter'])
solutions.append(config['calibrate']['bsols_timeslots'][:amount_sols])
# Same for GA solutions
if len(config['calibrate']['gain_matrix_type']) < int(config['cal_niter']):
amount_matrix = len(config['calibrate']['gain_matrix_type'])
else:
amount_matrix = int(config['cal_niter'])
if 'GainDiag' in config['calibrate']['gain_matrix_type'][:amount_matrix] or \
'Gain2x2' in config['calibrate']['gain_matrix_type'][:amount_matrix]:
if len(config['calibrate']['gasols_timeslots']) < int(config['cal_niter']):
amount_sols = len(config['calibrate']['gasols_timeslots'])
else:
amount_sols = int(config['cal_niter'])
for i, val in enumerate(config['calibrate']['gasols_timeslots'][:amount_sols]):
if val >= 0:
solutions.append(val)
# then we assign the timechunk
if config['cal_timeslots_chunk'] == -1:
if np.min(solutions) != 0.:
time_chunk = np.max(solutions)
else:
time_chunk = 0
else:
time_chunk = config['cal_timeslots_chunk']
# if time_chunk is not 0 all solutions should fit in there.
# if it is 0 then it does not matter as we are not checking remainder intervals
if time_chunk != 0:
if 0. in solutions:
caracal.log.error("You are using all timeslots in your solutions (i.e. 0) but have set cal_timeslots_chunk, please set it to 0 for using all timeslots.")
caracal.log.error("Your timeslots chunk = {}".format(time_chunk))
caracal.log.error("Your timeslots solutions to be applied are {}".format(', '.join([str(x) for x in solutions])))
raise caracal.ConfigurationError("Inconsistent selfcal chunking")
sol_int_array = float(time_chunk) / np.array(solutions, dtype=float)
for val in sol_int_array:
if val != int(val):
caracal.log.error("Not all applied time solutions fit in the timeslot_chunk.")
caracal.log.error("Your timeslot chunk = {}".format(time_chunk))
caracal.log.error("Your time solutions to be applied are {}".format(', '.join([str(x) for x in solutions])))
raise caracal.ConfigurationError("Inconsistent selfcal chunking")
# Then we repeat for the channels, as these arrays do not have to be the same length as the timeslots this can not be combined
# This is not an option for meqtrees
if config['calibrate_with'].lower() == 'cubical':
if len(config['calibrate']['gsols_chan']) < int(config['cal_niter']):
amount_sols = len(config['calibrate']['gsols_chan'])
else:
amount_sols = int(config['cal_niter'])
# we collect all time solutions
solutions = config['calibrate']['gsols_chan'][:amount_sols]
# if we do bjones we add those
if config['cal_bjones']:
if len(config['calibrate']['bsols_chan']) < int(config['cal_niter']):
amount_sols = len(config['calibrate']['bsols_chan'])
else:
amount_sols = int(config['cal_niter'])
solutions.append(config['calibrate']['bsols_chan'][:amount_sols])
# Same for GA solutions
if 'GainDiag' in config['calibrate']['gain_matrix_type'][:amount_matrix] or \
'Gain2x2' in config['calibrate']['gain_matrix_type'][:amount_matrix]:
if len(config['calibrate']['gasols_chan']) < int(config['cal_niter']):
amount_sols = len(config['calibrate']['gasols_chan'])
else:
amount_sols = int(config['cal_niter'])
for i, val in enumerate(config['calibrate']['gasols_chan'][:amount_sols]):
if val >= 0:
solutions.append(val)
# then we assign the timechunk
if config['cal_cubical']['chan_chunk'] == -1:
if np.min(solutions) != 0.:
chan_chunk = max(solutions)
else:
chan_chunk = 0
else:
chan_chunk = config['cal_cubical']['chan_chunk']
# if chan_chunk is not 0 all solutions should fit in there.
# if it is 0 then it does not matter as we are not checking remainder intervals
if chan_chunk != 0:
if 0. in solutions:
caracal.log.error("You are using all channels in your solutions (i.e. 0) but have set chan_chunk, please set it to 0 for using all channels.")
caracal.log.error("Your channel chunk = {} \n".format(chan_chunk))
caracal.log.error("Your channel solutions to be applied are {}".format(', '.join([str(x) for x in solutions])))
raise caracal.ConfigurationError("Inconsistent selfcal chunking")
sol_int_array = float(chan_chunk) / np.array(solutions, dtype=float)
for val in sol_int_array:
if val != int(val):
caracal.log.error("Not all applied channel solutions fit in the chan_chunk.")
caracal.log.error("Your channel chunk = {} \n".format(chan_chunk))
caracal.log.error("Your channel solutions to be applied are {}".format(', '.join([str(x) for x in solutions])))
raise caracal.ConfigurationError("Inconsistent selfcal chunking")
# Check some imaging stuff
if config['image']['enable']:
if config['img_maxuv_l'] > 0. and config['img_taper'] > 0.:
caracal.UserInputError(
"You are trying to image with a Gaussian taper as well as a Tukey taper. Please remove one. ")
def worker(pipeline, recipe, config):
wname = pipeline.CURRENT_WORKER
flags_before_worker = '{0:s}_{1:s}_before'.format(pipeline.prefix, wname)
flags_after_worker = '{0:s}_{1:s}_after'.format(pipeline.prefix, wname)
flag_main_ms = pipeline.enable_task(config, 'calibrate') and config['cal_niter'] >= config['start_iter']
rewind_main_ms = config['rewind_flags']["enable"] and (config['rewind_flags']['mode'] == 'reset_worker' or config['rewind_flags']["version"] != 'null')
rewind_transf_ms = config['rewind_flags']["enable"] and (config['rewind_flags']['mode'] == 'reset_worker' or config['rewind_flags']["transfer_apply_gains_version"] != 'null')
spwid = str(config['spwid'])
niter = config['img_niter']
imgweight = config['img_weight']
robust = config['img_robust']
taper = config['img_taper']
maxuvl = config['img_maxuv_l']
transuvl = maxuvl * config['img_transuv_l'] / 100.
multiscale = config['img_multiscale']
multiscale_scales = config['img_multiscale_scales']
if taper == '':
taper = None
label = config['label_in']
cal_niter = config['cal_niter']
time_chunk = config['cal_timeslots_chunk']
# If user sets value that is not -1 use that
if len(config['calibrate']['gain_matrix_type']) < int(cal_niter):
amount_matrix = len(config['calibrate']['gain_matrix_type'])
else:
amount_matrix = int(cal_niter)
if int(time_chunk) < 0 and pipeline.enable_task(config, 'calibrate'):
# We're always doing gains
if len(config['calibrate']['gsols_timeslots']) < cal_niter:
g_amount_sols = len(config['calibrate']['gsols_timeslots'])
else:
g_amount_sols = cal_niter
all_time_solution = config['calibrate']['gsols_timeslots'][:g_amount_sols]
# add the various sections
if config['cal_bjones']:
if len(config['calibrate']['bsols_timeslots']) < cal_niter:
b_amount_sols = len(config['calibrate']['bsols_timeslots'])
else:
b_amount_sols = cal_niter
all_time_solution.append(config['calibrate']['bsols_timeslots'][:b_amount_sols])
if 'GainDiag' in config['calibrate']['gain_matrix_type'][:amount_matrix] or \
'Gain2x2' in config['calibrate']['gain_matrix_type'][:amount_matrix]:
if len(config['calibrate']['gasols_timeslots']) < cal_niter:
amount_sols = len(config['calibrate']['gasols_timeslots'])
else:
amount_sols = int(cal_niter)
for val in config['calibrate']['gasols_timeslots'][:amount_sols]:
if int(val) >= 0:
all_time_solution.append(val)
if min(all_time_solution) == 0:
time_chunk = 0
else:
time_chunk = max(all_time_solution)
# And for the frequencies
freq_chunk = config['cal_cubical']['chan_chunk']
# If user sets value that is not -1 then use that
if int(freq_chunk) < 0 and pipeline.enable_task(config, 'calibrate'):
# We're always doing gains
if len(config['calibrate']['gsols_chan']) < cal_niter:
g_amount_sols = len(config['calibrate']['gsols_chan'])
else:
g_amount_sols = cal_niter
all_freq_solution = config['calibrate']['gsols_chan'][:g_amount_sols]
# add the various sections
if config['cal_bjones']:
if len(config['calibrate']['bsols_chan']) < cal_niter:
b_amount_sols = len(config['calibrate']['bsols_chan'])
else:
b_amount_sols = cal_niter
all_freq_solution.append(config['calibrate']['bsols_chan'][:b_amount_sols])
if 'GainDiag' in config['calibrate']['gain_matrix_type'][:amount_matrix] or \
'Gain2x2' in config['calibrate']['gain_matrix_type'][:amount_matrix]:
if len(config['calibrate']['gasols_chan']) < cal_niter:
amount_sols = len(config['calibrate']['gasols_chan'])
else:
amount_sols = int(cal_niter)
for val in config['calibrate']['gasols_chan'][:amount_sols]:
if int(val) >= 0:
all_freq_solution.append(val)
if min(all_freq_solution) == 0:
freq_chunk = 0
else:
freq_chunk = int(max(all_freq_solution))
min_uvw = config['minuvw_m']
ncpu = config['ncpu']
if ncpu == 0:
ncpu = psutil.cpu_count()
else:
ncpu = min(ncpu, psutil.cpu_count())
nwlayers_factor = config['img_nwlayers_factor']
nrdeconvsubimg = ncpu if config['img_nrdeconvsubimg'] == 0 else config['img_nrdeconvsubimg']
if nrdeconvsubimg == 1:
wscl_parallel_deconv = None
else:
wscl_parallel_deconv = int(np.ceil(config['img_npix'] / np.sqrt(nrdeconvsubimg)))
mfsprefix = ["", '-MFS'][int(config['img_nchans'] > 1)]
# label of MS where we transform selfcal gaintables
label_tgain = config['transfer_apply_gains']['transfer_to_label']
# label of MS where we interpolate and transform model column
label_tmodel = config['transfer_model']['transfer_to_label']
all_targets, all_msfile, ms_dict = pipeline.get_target_mss(label)
i = 0
for i, m in enumerate(all_msfile):
# check whether all ms files to be used exist
if not os.path.exists(os.path.join(pipeline.msdir, m)):
raise IOError(
"MS file {0:s} does not exist. Please check that it is where it should be.".format(m))
# Write/rewind flag versions only if flagging tasks are being
# executed on these .MS files, or if the user asks to rewind flags
if flag_main_ms or rewind_main_ms:
available_flagversions = manflags.get_flags(pipeline, m)
if rewind_main_ms:
if config['rewind_flags']['mode'] == 'reset_worker':
version = flags_before_worker
stop_if_missing = False
elif config['rewind_flags']['mode'] == 'rewind_to_version':
version = config['rewind_flags']['version']
if version == 'auto':
version = flags_before_worker
stop_if_missing = True
if version in available_flagversions:
if flags_before_worker in available_flagversions and available_flagversions.index(flags_before_worker) < available_flagversions.index(version) and not config['overwrite_flagvers']:
manflags.conflict('rewind_too_little', pipeline, wname, m, config, flags_before_worker, flags_after_worker)
substep = 'version-{0:s}-ms{1:d}'.format(version, i)
manflags.restore_cflags(pipeline, recipe, version, m, cab_name=substep)
if version != available_flagversions[-1]:
substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, i)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
m, cab_name=substep)
if version != flags_before_worker:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
m, cab_name=substep, overwrite=config['overwrite_flagvers'])
elif stop_if_missing:
manflags.conflict('rewind_to_non_existing', pipeline, wname, m, config, flags_before_worker, flags_after_worker)
elif flag_main_ms:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
m, cab_name=substep, overwrite=config['overwrite_flagvers'])
else:
if flags_before_worker in available_flagversions and not config['overwrite_flagvers']:
manflags.conflict('would_overwrite_bw', pipeline, wname, m, config, flags_before_worker, flags_after_worker)
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
m, cab_name=substep, overwrite=config['overwrite_flagvers'])
i += 1
if pipeline.enable_task(config, 'transfer_apply_gains'):
t, all_msfile_tgain, ms_dict_tgain = pipeline.get_target_mss(label_tgain)
for j, m in enumerate(all_msfile_tgain):
# check whether all ms files to be used exist
if not os.path.exists(os.path.join(pipeline.msdir, m)):
raise IOError(
"MS file {0:s}, to transfer gains to, does not exist. Please check that it is where it should be.".format(m))
# Write/rewind flag versions
available_flagversions = manflags.get_flags(pipeline, m)
if rewind_transf_ms:
if config['rewind_flags']['mode'] == 'reset_worker':
version = flags_before_worker
stop_if_missing = False
elif config['rewind_flags']['mode'] == 'rewind_to_version':
version = config['rewind_flags']['transfer_apply_gains_version']
if version == 'auto':
version = flags_before_worker
stop_if_missing = True
if version in available_flagversions:
if flags_before_worker in available_flagversions and available_flagversions.index(flags_before_worker) < available_flagversions.index(version) and not config['overwrite_flagvers']:
manflags.conflict('rewind_too_little', pipeline, wname, m, config, flags_before_worker, flags_after_worker, read_version='transfer_apply_gains_version')
substep = 'version_{0:s}_ms{1:d}'.format(version, i)
manflags.restore_cflags(pipeline, recipe, version, m, cab_name=substep)
if version != available_flagversions[-1]:
substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, i)
manflags.delete_cflags(pipeline, recipe,
available_flagversions[available_flagversions.index(version) + 1],
m, cab_name=substep)
if version != flags_before_worker:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i + j)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
m, cab_name=substep, overwrite=config['overwrite_flagvers'])
elif stop_if_missing:
manflags.conflict('rewind_to_non_existing', pipeline, wname, m, config, flags_before_worker, flags_after_worker, read_version='transfer_apply_gains_version')
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i + j)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
m, cab_name=substep, overwrite=config['overwrite_flagvers'])
else:
if flags_before_worker in available_flagversions and not config['overwrite_flagvers']:
manflags.conflict('would_overwrite_bw', pipeline, wname, m, config, flags_before_worker, flags_after_worker, read_version='transfer_apply_gains_version')
else:
substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, i + j)
manflags.add_cflags(pipeline, recipe, flags_before_worker,
m, cab_name=substep, overwrite=config['overwrite_flagvers'])
if pipeline.enable_task(config, 'transfer_model'):
t, all_msfile_tmodel, ms_dict_tmodel = pipeline.get_target_mss(label_tmodel)
for m in all_msfile_tmodel: # check whether all ms files to be used exist
if not os.path.exists(os.path.join(pipeline.msdir, m)):
raise IOError(
"MS file {0:s}, to transfer model to, does not exist. Please check that it is where it should be.".format(m))
prefix = pipeline.prefix
def cleanup_files(mask_name):
# This function is never used
if os.path.exists(pipeline.output + '/' + mask_name):
shutil.move(pipeline.output + '/' + mask_name,
pipeline.output + '/masking/' + mask_name)
casafiles = glob.glob(pipeline.output + '/*.image')
for i in range(0, len(casafiles)):
shutil.rmtree(casafiles[i])
@extras("astropy")
def change_header_and_type(filename, headfile, copy_head):
import astropy.io.fits as fits
pblist = fits.open(filename)
dat = pblist[0].data
pblist.close()
if copy_head:
head = fits.getheader(headfile, 0)
else:
head = fits.getheader(filename, 0)
# delete ORIGIN, CUNIT1, CUNIT2
if 'ORIGIN' in head:
del head['ORIGIN']
if 'CUNIT1' in head:
del head['CUNIT1']
if 'CUNIT2' in head:
del head['CUNIT2']
# copy CRVAL3 from headfile to filename
template_head = fits.getheader(headfile, 0)
if 'crval3' in template_head:
head['crval3'] = template_head['crval3']
fits.writeto(filename, dat.astype('int32'), head, overwrite=True)
def fake_image(trg, num, img_dir, mslist, field):
key = 'image'
key_mt = 'calibrate'
ncpu_img = config[key]['ncpu_img'] if config[key]['ncpu_img'] else ncpu
absmem = config[key]['absmem']
step = 'image-field{0:d}-iter{1:d}'.format(trg, num)
fake_image_opts = {
"msname": mslist,
"column": config[key]['col'][0],
"weight": imgweight if not imgweight == 'briggs' else 'briggs {}'.format(robust),
"nmiter": sdm.dismissable(config['img_nmiter']),
"npix": config['img_npix'],
"padding": config['img_padding'],
"scale": config['img_cell'],
"prefix": '{0:s}/{1:s}_{2:s}_{3:d}'.format(img_dir, prefix, field, num),
"niter": config['img_niter'],
"gain": config["img_gain"],
"mgain": config['img_mgain'],
"pol": config['img_stokes'],
"channelsout": config['img_nchans'],
"joinchannels": config['img_joinchans'],
"local-rms": False,
"auto-mask": 6,
"auto-threshold": config[key]['clean_cutoff'][0],
"fitbeam": False,
"parallel-deconvolution": sdm.dismissable(wscl_parallel_deconv),
"nwlayers-factor": nwlayers_factor,
"threads": ncpu_img,
"absmem": absmem,
}
if config['img_specfit_nrcoeff'] > 0:
fake_image_opts["fit-spectral-pol"] = config['img_specfit_nrcoeff']
if not config['img_mfs_weighting']:
fake_image_opts["nomfsweighting"] = True
if maxuvl > 0.:
fake_image_opts.update({
"maxuv-l": maxuvl,
"taper-tukey": transuvl,
})
if float(taper) > 0.:
fake_image_opts.update({
"taper-gaussian": taper,
})
if min_uvw > 0:
fake_image_opts.update({"minuvw-m": min_uvw})
if multiscale:
fake_image_opts.update({"multiscale": multiscale})
if multiscale_scales:
fake_image_opts.update({"multiscale-scales": list(map(int, multiscale_scales.split(',')))})
recipe.add('cab/wsclean', step,
fake_image_opts,
input=pipeline.input,
output=pipeline.output,
label='{:s}:: Make image after first round of calibration'.format(step))
def image(trg, num, img_dir, mslist, field):
key = 'image'
key_mt = 'calibrate'
ncpu_img = config[key]['ncpu_img'] if config[key]['ncpu_img'] else ncpu
absmem = config[key]['absmem']
caracal.log.info("Number of threads used by WSClean for gridding:")
caracal.log.info(ncpu_img)
if num > 1:
matrix_type = config[key_mt]['gain_matrix_type'][
num - 2 if len(config[key_mt]['gain_matrix_type']) >= num else -1]
else:
matrix_type = 'null'
# If we have a two_step selfcal and Gaindiag we want to use CORRECTED_DATA
if config['calibrate_with'].lower() == 'meqtrees' and config['cal_meqtrees']['two_step'] and num > 1:
if trace_matrix[-1] == 'GainDiag':
imcolumn = "CORRECTED_DATA"
# If we do not have gaindiag but do have two step selfcal check against stupidity and that we are actually ending with ampphase cal and written to a special phase column
elif trace_matrix[-1] == 'GainDiagPhase':
imcolumn = 'CORRECTED_DATA_PHASE'
# If none of these apply then do our normal sefcal
else:
raise RuntimeError("Something has gone wrong in the two step processing")
else:
imcolumn = config[key][
'col'][num - 1 if len(config[key]['col']) >= num else -1]
step = 'image-field{0:d}-iter{1:d}'.format(trg, num)
image_opts = {
"msname": mslist,
"column": imcolumn,
"weight": imgweight if not imgweight == 'briggs' else 'briggs {}'.format(robust),
"nmiter": sdm.dismissable(config['img_nmiter']),
"npix": config['img_npix'],
"padding": config['img_padding'],
"scale": config['img_cell'],
"prefix": '{0:s}/{1:s}_{2:s}_{3:d}'.format(img_dir, prefix, field, num),
"niter": config['img_niter'],
"gain": config["img_gain"],
"mgain": config['img_mgain'],
"pol": config['img_stokes'],
"channelsout": config['img_nchans'],
"joinchannels": config['img_joinchans'],
"auto-threshold": config[key]['clean_cutoff'][num - 1 if len(config[key]['clean_cutoff']) >= num else -1],
"parallel-deconvolution": sdm.dismissable(wscl_parallel_deconv),
"nwlayers-factor": nwlayers_factor,
"threads": ncpu_img,
"absmem": absmem,
}
if config['img_specfit_nrcoeff'] > 0:
image_opts["fit-spectral-pol"] = config['img_specfit_nrcoeff']
if config['img_niter'] > 0:
image_opts["savesourcelist"] = True
if not config['img_mfs_weighting']:
image_opts["nomfsweighting"] = True
if maxuvl > 0.:
image_opts.update({
"maxuv-l": maxuvl,
"taper-tukey": transuvl,
})
if float(taper) > 0.:
image_opts.update({
"taper-gaussian": taper,
})
if min_uvw > 0:
image_opts.update({"minuvw-m": min_uvw})
if multiscale:
image_opts.update({"multiscale": multiscale})
if multiscale_scales:
image_opts.update({"multiscale-scales": list(map(int, multiscale_scales.split(',')))})
mask_key = config[key]['cleanmask_method'][num - 1 if len(config[key]['cleanmask_method']) >= num else -1]
if mask_key == 'wsclean':
image_opts.update({
"auto-mask": config[key]['cleanmask_thr'][num - 1 if len(config[key]['cleanmask_thr']) >= num else -1],
"local-rms": config[key]['cleanmask_localrms'][num - 1 if len(config[key]['cleanmask_localrms']) >= num else -1],
})
if config[key]['cleanmask_localrms'][num - 1 if len(config[key]['cleanmask_localrms']) >= num else -1]:
image_opts.update({
"local-rms-window": config[key]['cleanmask_localrms_window'][num - 1 if len(config[key]['cleanmask_localrms_window']) >= num else -1],
})
elif mask_key == 'sofia':
fits_mask = 'masking/{0:s}_{1:s}_{2:d}_clean_mask.fits'.format(
prefix, field, num)
if not os.path.isfile('{0:s}/{1:s}'.format(pipeline.output, fits_mask)):
raise caracal.ConfigurationError("SoFiA clean mask {0:s}/{1:s} not found. Something must have gone wrong with the SoFiA run"
" (maybe the detection threshold was too high?). Please check the logs.".format(pipeline.output, fits_mask))
image_opts.update({
"fitsmask": '{0:s}:output'.format(fits_mask),
"local-rms": False,
})
elif mask_key == 'breizorro':
fits_mask = 'masking/{0:s}_{1:s}_{2:d}_clean_mask.fits'.format(
prefix, field, num)
if not os.path.isfile('{0:s}/{1:s}'.format(pipeline.output, fits_mask)):
raise caracal.ConfigurationError("Breizorro clean mask {0:s}/{1:s} not found. Something must have gone wrong with the Breizorro run"
" (maybe the detection threshold was too high?). Please check the logs.".format(pipeline.output, fits_mask))
image_opts.update({
"fitsmask": '{0:s}:output'.format(fits_mask),
"local-rms": False,
})
else:
fits_mask = 'masking/{0:s}_{1:s}.fits'.format(
mask_key, field)
if not os.path.isfile('{0:s}/{1:s}'.format(pipeline.output, fits_mask)):
raise caracal.ConfigurationError("Clean mask {0:s}/{1:s} not found. Please make sure that you have given the correct mask label"
" in cleanmask_method, and that the mask exists.".format(pipeline.output, fits_mask))
image_opts.update({
"fitsmask": '{0:s}:output'.format(fits_mask),
"local-rms": False,
})
recipe.add('cab/wsclean', step,
image_opts,
input=pipeline.input,
output=pipeline.output,
label='{:s}:: Make wsclean image (selfcal iter {})'.format(step, num))
recipe.run()
# Empty job que after execution
recipe.jobs = []
def sofia_mask(trg, num, img_dir, field):
step = 'make-sofia_mask-field{0:d}-iter{1:d}'.format(trg, num)
key = 'img_sofia_settings'
if config['img_joinchans']:
imagename = '{0:s}/{1:s}_{2:s}_{3:d}-MFS-image.fits'.format(
img_dir, prefix, field, num)
else:
imagename = '{0:s}/{1:s}_{2:s}_{3:d}-image.fits'.format(
img_dir, prefix, field, num)
if config[key]['fornax_special'] and config[key]['fornax_sofia']:
forn_kernels = [[80, 80, 0, 'b']]
forn_thresh = config[key]['fornax_thr'][
num if len(config[key]['fornax_thr']) >= num + 1 else -1]
sofia_opts_forn = {
"import.inFile": imagename,
"steps.doFlag": True,
"steps.doScaleNoise": False,
"steps.doSCfind": True,
"steps.doMerge": True,
"steps.doReliability": False,
"steps.doParameterise": False,
"steps.doWriteMask": True,
"steps.doMom0": False,
"steps.doMom1": False,
"steps.doWriteCat": False,
"parameters.dilateMask": False,
"parameters.fitBusyFunction": False,
"parameters.optimiseMask": False,
"SCfind.kernelUnit": 'pixel',
"SCfind.kernels": forn_kernels,
"SCfind.threshold": forn_thresh,
"SCfind.rmsMode": 'mad',
"SCfind.edgeMode": 'constant',
"SCfind.fluxRange": 'all',
"scaleNoise.method": 'local',
"scaleNoise.windowSpatial": 51,
"scaleNoise.windowSpectral": 1,
"writeCat.basename": 'FornaxA_sofia',
"merge.radiusX": 3,
"merge.radiusY": 3,
"merge.radiusZ": 1,
"merge.minSizeX": 100,
"merge.minSizeY": 100,
"merge.minSizeZ": 1,
}
outmask = pipeline.prefix + '_' + field + '_' + str(num + 1) + '_clean'
outmaskName = outmask + '_mask.fits'
sofia_opts = {
"import.inFile": imagename,
"steps.doFlag": True,
"steps.doScaleNoise": config['image']['cleanmask_localrms'][num if len(config['image']['cleanmask_localrms']) >= num + 1 else -1],
"steps.doSCfind": True,
"steps.doMerge": True,
"steps.doReliability": False,
"steps.doParameterise": False,
"steps.doWriteMask": True,
"steps.doMom0": False,
"steps.doMom1": False,
"steps.doWriteCat": True,
"writeCat.writeASCII": False,
"writeCat.basename": outmask,
"writeCat.writeSQL": False,
"writeCat.writeXML": False,
"parameters.dilateMask": False,
"parameters.fitBusyFunction": False,
"parameters.optimiseMask": False,
"SCfind.kernelUnit": 'pixel',
"SCfind.kernels": [[kk, kk, 0, 'b'] for kk in config[key]['kernels']],
"SCfind.threshold": config['image']['cleanmask_thr'][num if len(config['image']['cleanmask_thr']) >= num + 1 else -1],
"SCfind.rmsMode": 'mad',
"SCfind.edgeMode": 'constant',
"SCfind.fluxRange": 'all',
"scaleNoise.statistic": 'mad',
"scaleNoise.method": 'local',
"scaleNoise.interpolation": 'linear',
"scaleNoise.windowSpatial": config['image']['cleanmask_localrms_window'][num if len(config['image']['cleanmask_localrms_window']) >= num + 1 else -1],
"scaleNoise.windowSpectral": 1,
"scaleNoise.scaleX": True,
"scaleNoise.scaleY": True,
"scaleNoise.scaleZ": False,
"scaleNoise.perSCkernel": config['image']['cleanmask_localrms'][num if len(config['image']['cleanmask_localrms']) >= num + 1 else -1], # work-around for https://github.com/SoFiA-Admin/SoFiA/issues/172, to be replaced by "True" once the next SoFiA version is in Stimela
"merge.radiusX": 3,
"merge.radiusY": 3,
"merge.radiusZ": 1,
"merge.minSizeX": 3,
"merge.minSizeY": 3,
"merge.minSizeZ": 1,
"merge.positivity": config[key]['pospix'],
}
if config[key]['flag']:
flags_sof = config[key]['flagregion']
sofia_opts.update({"flag.regions": flags_sof})
if config[key]['inputmask']:
mask_fits = 'masking/' + config[key]['inputmask']
mask_casa = mask_fits.replace('.fits', '.image')
mask_regrid_casa = mask_fits.replace('.fits', '_regrid.image')
mask_regrid_fits = mask_fits.replace('.fits', '_regrid.fits')
imagename_casa = imagename.split('/')[-1].replace('.fits', '.image')
recipe.add('cab/casa_importfits', step + "-import-image",
{
"fitsimage": imagename,
"imagename": imagename_casa,
"overwrite": True,
},
input=pipeline.output,
output=pipeline.output,
label='Import image in casa format')
recipe.add('cab/casa_importfits', step + "-import-mask",
{
"fitsimage": mask_fits + ':output',
"imagename": mask_casa,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Import mask in casa format')
recipe.add('cab/casa_imregrid', step + "-regrid-mask",
{
"template": imagename_casa + ':output',
"imagename": mask_casa + ':output',
"output": mask_regrid_casa,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Regrid mask to image')
recipe.add('cab/casa_exportfits', step + "-export-mask",
{
"fitsimage": mask_regrid_fits + ':output',
"imagename": mask_regrid_casa + ':output',
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Export regridded mask to fits')
recipe.add(change_header_and_type, step + "-copy-header",
{
"filename": pipeline.output + '/' + mask_regrid_fits,
"headfile": pipeline.output + '/' + imagename,
"copy_head": True,
},
input=pipeline.input,
output=pipeline.output,
label='Copy image header to mask')
sofia_opts.update({"import.maskFile": mask_regrid_fits})
sofia_opts.update({"import.inFile": imagename})
if config[key]['fornax_special'] and config[key]['fornax_sofia']:
recipe.add('cab/sofia', step + "-fornax_special",
sofia_opts_forn,
input=pipeline.output,
output=pipeline.output + '/masking/',
label='{0:s}:: Make SoFiA mask'.format(step))
fornax_namemask = 'masking/FornaxA_sofia_mask.fits'
sofia_opts.update({"import.maskFile": fornax_namemask})
elif config[key]['fornax_special'] and config[key]['fornax_sofia'] == False:
# this mask should be regridded to correct f.o.v.
fornax_namemask = 'masking/Fornaxa_vla_mask_doped.fits'
fornax_namemask_regr = 'masking/Fornaxa_vla_mask_doped_regr.fits'
mask_casa = fornax_namemask.split('.fits')[0]
mask_casa = fornax_namemask + '.image'
mask_regrid_casa = fornax_namemask + '_regrid.image'
imagename_casa = '{0:s}_{1:d}{2:s}-image.image'.format(
prefix, num, mfsprefix)
recipe.add('cab/casa_importfits', step + "-fornax_special-import-image",
{
"fitsimage": imagename,
"imagename": imagename_casa,
"overwrite": True,
},
input=pipeline.output,
output=pipeline.output,
label='Image in casa format')
recipe.add('cab/casa_importfits', step + "-fornax_special-import-image",
{
"fitsimage": fornax_namemask + ':output',
"imagename": mask_casa,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Mask in casa format')
recipe.add('cab/casa_imregrid', step + "-fornax_special-regrid",
{
"template": imagename_casa + ':output',
"imagename": mask_casa + ':output',
"output": mask_regrid_casa,
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Regridding mosaic to size and projection of dirty image')
recipe.add('cab/casa_exportfits', step + "-fornax_special-export-mosaic",
{
"fitsimage": fornax_namemask_regr + ':output',
"imagename": mask_regrid_casa + ':output',
"overwrite": True,
},
input=pipeline.input,
output=pipeline.output,
label='Extracted regridded mosaic')
recipe.add(change_header_and_type, step + "-fornax_special-change_header",
{
"filename": pipeline.output + '/' + fornax_namemask_regr,
"headfile": pipeline.output + '/' + imagename,
"copy_head": True,
},
input=pipeline.input,
output=pipeline.output,
label='Extracted regridded mosaic')
sofia_opts.update({"import.maskFile": fornax_namemask_regr})
recipe.add('cab/sofia', step,
sofia_opts,
input=pipeline.output,
output=pipeline.output + '/masking/',
label='{0:s}:: Make SoFiA mask'.format(step))
def breizorro_mask(trg, num, img_dir, field):
step = 'make-breizorro_mask-field{0:d}-iter{1:d}'.format(trg, num)
key = 'img_breizorro_settings'
if config['img_joinchans']:
imagename = '{0:s}/{1:s}_{2:s}_{3:d}-MFS-image.fits'.format(
img_dir, prefix, field, num)
else:
imagename = '{0:s}/{1:s}_{2:s}_{3:d}-image.fits'.format(
img_dir, prefix, field, num)
outmask = pipeline.prefix + '_' + field + '_' + str(num + 1) + '_clean'
outmaskName = outmask + '_mask.fits'
breizorro_opts = {
"restored-image": imagename,
"outfile": outmaskName,
"threshold": config['image']['cleanmask_thr'][num if len(config['image']['cleanmask_thr']) >= num + 1 else -1],
"boxsize": config[key]['boxsize'],
"dilate": config[key]['dilate'],
"fill-holes": config[key]['fill_holes']
}
recipe.add('cab/breizorro', step,
breizorro_opts,
input=pipeline.output,
output=pipeline.output + '/masking/',
label='{0:s}:: Make Breizorro'.format(step))
def make_cube(num, img_dir, field, imtype='model'):
im = '{0:s}/{1:s}_{2:s}_{3}-cube.fits:output'.format(
img_dir, prefix, field, num)
step = 'makecube-{}'.format(num)
images = ['{0:s}/{1:s}_{2:s}_{3}-{4:04d}-{5:s}.fits:output'.format(
img_dir, prefix, field, num, i, imtype) for i in range(config['img_nchans'])]
recipe.add('cab/fitstool', step,
{
"image": images,
"output": im,
"stack": True,
"fits-axis": 'FREQ',
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Make convolved model'.format(step))
return im
def extract_sources(trg, num, img_dir, field):
key = 'extract_sources'
if config[key]['detection_image']:
step = 'detection_image-field{0:d}-iter{1:d}'.format(trg, num)
detection_image = '{0:s}/{1:s}-detection_image_{0:s}_{1:d}.fits:output'.format(
img_dir, prefix, field, num)
recipe.add('cab/fitstool', step,
{
"image": ['{0:s}/{1:s}_{2:s}_{3:d}{4:s}-{5:s}.fits:output'.format(img_dir, prefix, field, num, im, mfsprefix) for im in ('image', 'residual')],
"output": detection_image,
"diff": True,
"force": True,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Make convolved model'.format(step))
else:
detection_image = None
sourcefinder = config[key]['sourcefinder']
if (sourcefinder == 'pybdsm' or sourcefinder == 'pybdsf'):
spi_do = config[key]['spi']
if spi_do:
im = make_cube(num, get_dir_path(
pipeline.continuum, pipeline) + '/' + img_dir.split("/")[-1], field, 'image')
im = im.split("/")[-1]
else:
im = '{0:s}_{1:s}_{2:d}{3:s}-image.fits:output'.format(
prefix, field, num, mfsprefix)
if config[key]['breizorro_image']['enable']:
step = "Breizorro_masked_image"
outmask_image = im.replace('image.fits:output', 'breiz-image.fits')
recipe.add('cab/breizorro', step,
{
"restored-image": im,
"outfile": outmask_image,
"threshold": config[key]['thr_pix'][num - 1 if len(config[key]['thr_pix']) >= num else -1],
"sum-peak": config[key]['breizorro_image']['sum_to_peak'],
"fill-holes": True
},
input=pipeline.input,
output=pipeline.output + '/' + img_dir,
label='{0:s}:: Make Breizorro'.format(step))
im = '{}:{}'.format(outmask_image, 'output')
step = 'extract-field{0:d}-iter{1:d}'.format(trg, num)
calmodel = '{0:s}_{1:s}_{2:d}-pybdsm'.format(prefix, field, num)
if detection_image:
blank_limit = 1e-9
else:
blank_limit = None
try:
os.remove(
'{0:s}/{1:s}/{2:s}.fits'.format(pipeline.output, img_dir, calmodel))
except BaseException:
caracal.log.info('No Previous fits log found.')
try:
os.remove(
'{0:s}/{1:s}/{2:s}.lsm.html'.format(pipeline.output, img_dir, calmodel))
except BaseException:
caracal.log.info('No Previous lsm.html found.')
recipe.add('cab/pybdsm', step,
{
"image": im,
"thresh_pix": config[key]['thr_pix'][num - 1 if len(config[key]['thr_pix']) >= num else -1],
"thresh_isl": config[key]['thr_isl'][num - 1 if len(config[key]['thr_isl']) >= num else -1],
"outfile": '{:s}.gaul:output'.format(calmodel),
"blank_limit": sdm.dismissable(blank_limit),
"adaptive_rms_box": config[key]['local_rms'],
"port2tigger": False,
"format": 'ascii',
"multi_chan_beam": spi_do,
"spectralindex_do": spi_do,
"detection_image": sdm.dismissable(detection_image),
"ncores": ncpu,
},
input=pipeline.input,
# Unfortuntaly need to do it this way for pybdsm
output=pipeline.output + '/' + img_dir,
label='{0:s}:: Extract sources'.format(step))
# In order to make sure that we actually find stuff in the images we execute the rec ipe here
recipe.run()
# Empty job que after execution
recipe.jobs = []
# and then check the proper file is produced
if not os.path.isfile('{0:s}/{1:s}/{2:s}.gaul'.format(pipeline.output, img_dir, calmodel)):
caracal.log.error(
"No model file is found after the PYBDSM run. This probably means no sources were found either due to a bad calibration or to stringent values. ")
raise caracal.BadDataError("No model file found after the PyBDSM run")
step = 'convert-field{0:d}-iter{1:d}'.format(trg, num)
recipe.add('cab/tigger_convert', step,
{
"input-skymodel": '{0:s}/{1:s}.gaul:output'.format(img_dir, calmodel),
"output-skymodel": '{0:s}/{1:s}.lsm.html:output'.format(img_dir, calmodel),
"type": 'Gaul',
"output-type": 'Tigger',
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Convert extracted sources to tigger model'.format(step))
def predict_from_fits(num, model, index, img_dir, mslist, field):
if isinstance(model, str) and len(model.split('+')) == 2:
combine = True
mm = model.split('+')
# Combine FITS models if more than one is given
step = 'combine_models-' + '_'.join(map(str, mm))
calmodel = '{0:s}/{1:s}_{2:s}_{3:d}-FITS-combined.fits:output'.format(
img_dir, prefix, field, num)
cubes = [make_cube(n, img_dir, field, 'model') for n in mm]
recipe.add('cab/fitstool', step,
{
"image": cubes,
"output": calmodel,
"sum": True,
"force": True,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Add clean components'.format(step))
else:
calmodel = make_cube(num, img_dir, field)
step = 'predict_from_fits-{}'.format(num)
recipe.add('cab/lwimager', 'predict', {
"msname": mslist[index],
"simulate_fits": calmodel,
"column": 'MODEL_DATA',
"img_nchan": config['img_nchans'],
"img_chanstep": 1,
# TODO: This should consider SPW IDs
"nchan": pipeline.nchans[index],
"cellsize": config['img_cell'],
"chanstep": 1,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Predict from FITS ms={1:s}'.format(step, mslist[index]))
def combine_models(models, num, img_dir, field, enable=True):
model_names = ['{0:s}/{1:s}_{2:s}_{3:s}-pybdsm.lsm.html:output'.format(get_dir_path("{0:s}/image_{1:d}".format(pipeline.continuum, int(m)), pipeline), prefix, field, m) for m in models]
model_names_fits = ['{0:s}/{1:s}_{2:s}_{3:s}-pybdsm.fits'.format(get_dir_path("{0:s}/image_{1:d}".format(pipeline.continuum, int(m)), pipeline), prefix, field, m) for m in models]
calmodel = '{0:s}/{1:s}_{2:d}-pybdsm-combined.lsm.html:output'.format(
img_dir, prefix, num)
if enable:
step = 'combine_models-' + '_'.join(map(str, models))
recipe.add('cab/tigger_convert', step,
{
"input-skymodel": model_names[0],
"append": model_names[1],
"output-skymodel": calmodel,
"rename": True,
"force": True,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Combined models'.format(step))
return calmodel, model_names_fits
def calibrate_meqtrees(trg, num, prod_path, img_dir, mslist, field):
key = 'calibrate'
global reset_cal, trace_SN, trace_matrix
# force to calibrate with model data column if specified by user
# If the mode is pybdsm_vis then we want to add the clean component model only at the last step,
# which is anyway achieved by the **above** statement; no need to further specify vismodel.
if config['cal_model_mode'] == 'pybdsm_vis':
if num == cal_niter:
vismodel = True
else:
vismodel = False
if len(config[key]['model']) >= num:
model = config[key]['model'][num - 1]
else:
model = str(num)
modelcolumn = 'MODEL_DATA'
if isinstance(model, str) and len(model.split('+')) > 1:
mm = model.split('+')
calmodel, fits_model = combine_models(mm, num, img_dir, field,
enable=False if pipeline.enable_task(
config, 'aimfast') else True)
else:
model = int(model)
calmodel = '{0:s}/{1:s}_{2:s}_{3:d}-pybdsm.lsm.html:output'.format(
img_dir, prefix, field, model)
fits_model = '{0:s}/{1:s}_{2:s}_{3:d}-pybdsm.fits'.format(
img_dir, prefix, field, model)
# If the mode is pybdsm_only, don't use any clean components. So, the same as above, but with
# vismodel =False
elif config['cal_model_mode'] == 'pybdsm_only':
vismodel = False
if len(config[key]['model']) >= num:
model = config[key]['model'][num - 1]
else:
model = str(num)
if isinstance(model, str) and len(model.split('+')) > 1:
mm = model.split('+')
calmodel, fits_model = combine_models(mm, num, img_dir, field,
enable=False if pipeline.enable_task(
config, 'aimfast') else True)
else:
model = int(model)
calmodel = '{0:s}/{1:s}_{2:s}_{3:d}-pybdsm.lsm.html:output'.format(
img_dir, prefix, field, model)
fits_model = '{0:s}/{1:s}_{2:s}_{2:s}-pybdsm.fits'.format(
img_dir, prefix, field, model)
modelcolumn = ''
# If the mode is vis_only, then there is need for an empty sky model (since meqtrees needs one).
# In this case, vis_model is always true, the model_column is always MODEL_DATA.
elif config['cal_model_mode'] == 'vis_only':
vismodel = True
modelcolumn = 'MODEL_DATA'
calmodel = '{0:s}_{1:d}-nullmodel.txt'.format(prefix, num)
with open(os.path.join(pipeline.output, img_dir, calmodel), 'w') as stdw:
stdw.write('#format: ra_d dec_d i\n')
stdw.write('0.0 -30.0 1e-99')
# Let's see the matrix type we are dealing with
if not config['cal_meqtrees']['two_step']:
matrix_type = config[key]['gain_matrix_type'][
num - 1 if len(config[key]['gain_matrix_type']) >= num else -1]
# If we have a two_step selfcal and Gaindiag we want to use CORRECTED_DATA_PHASE as input and write to CORRECTED_DATA
outcolumn = "CORRECTED_DATA"
incolumn = "DATA"
for i, msname in enumerate(mslist):
# Let's see the matrix type we are dealing with
gsols_ = [config[key]['gsols_timeslots'][num - 1 if num <= len(config[key]['gsols_timeslots']) else -1],
config[key]['gsols_chan'][num - 1 if num <= len(config[key]['gsols_chan']) else -1]]
# If we have a two_step selfcal we will calculate the intervals
matrix_type = config[key]['gain_matrix_type'][
num - 1 if len(config[key]['gain_matrix_type']) >= num else -1]
if config['cal_meqtrees']['two_step'] and pipeline.enable_task(config, 'aimfast'):
if num == 1:
matrix_type = 'GainDiagPhase'
SN = 3
else:
matrix_type = trace_matrix[num - 2]
SN = trace_SN[num - 2]
fidelity_data = get_aimfast_data()
obs_data = get_obs_data(msname)
int_time = obs_data['EXPOSURE']
tot_time = 0.0
for scan_key in obs_data['SCAN']['0']:
tot_time += obs_data['SCAN']['0'][scan_key]
no_ant = len(obs_data['ANT']['DISH_DIAMETER'])
DR = fidelity_data['{0}_{2}_{1}-residual'.format(
prefix, num, field)]['{0}_{2}_{1}-model'.format(prefix, num, field)]['DR']
Noise = fidelity_data['{0}_{2}_{1}-residual'.format(
prefix, num, field)]['STDDev']
flux = DR * Noise
solvetime = int(Noise**2 * SN**2 * tot_time *
no_ant / (flux**2 * 2.) / int_time)
if num > 1:
DR = fidelity_data['{0}_{2}_{1}-residual'.format(
prefix, num - 1, field)]['{0}_{2}_{1}-model'.format(prefix, num - 1, field)]['DR']
flux = DR * Noise
prev_solvetime = int(
Noise**2 * SN**2 * tot_time * no_ant / (flux**2 * 2.) / int_time)
else:
prev_solvetime = solvetime + 1
if (solvetime >= prev_solvetime or reset_cal == 1) and matrix_type == 'GainDiagPhase':
matrix_type = 'GainDiag'
SN = 8
solvetime = int(Noise**2 * SN**2 * tot_time *
no_ant / (flux**2 * 2.) / int_time)
gsols_[0] = int(solvetime / num)
elif solvetime >= prev_solvetime and matrix_type == 'GainDiag':
gsols_[0] = int(prev_solvetime / num - 1)
reset_cal = 2
else:
gsols_[0] = int(solvetime / num)
if matrix_type == 'GainDiagPhase':
minsolvetime = int(30. / int_time)
else:
minsolvetime = int(30. * 60. / int_time)
if minsolvetime > gsols_[0]:
gsols_[0] = minsolvetime
if matrix_type == 'GainDiag':
reset_cal = 2
trace_SN.append(SN)
trace_matrix.append(matrix_type)
if matrix_type == 'GainDiagPhase' and config['cal_meqtrees']['two_step']:
outcolumn = "CORRECTED_DATA_PHASE"
incolumn = "DATA"
elif config['cal_meqtrees']['two_step']:
outcolumn = "CORRECTED_DATA"
incolumn = "CORRECTED_DATA_PHASE"
elif config['cal_meqtrees']['two_step']:
# This mode is actually not accesible for now as aimfast is swithed on automatically
gasols_ = [config[key]['gasols_timeslots'][
num - 1 if num <= len(config[key]['gasols_timeslots']) else -1],
config[key]['gasols_chan'][
num - 1 if num <= len(config[key]['gasols_chan']) else -1]]
if gasols_[0] == -1:
outcolumn = "CORRECTED_DATA_PHASE"
incolumn = "DATA"
else:
outcolumn = "CORRECTED_DATA"
incolumn = "CORRECTED_DATA_PHASE"
matrix_type = 'GainDiag'
gsols_ = gasols_
bsols_ = [config[key]['bsols_timeslots'][num - 1 if num <= len(config[key]['bsols_timeslots']) else -1],
config[key]['bsols_chan'][num - 1 if num <= len(config[key]['bsols_chan']) else -1]]
step = 'calibrate-field{0:d}-iter{1:d}-ms{2:d}'.format(trg, num, i)
outdata = config[key]['output_data'][num - 1 if len(config[key]['output_data']) >= num else -1]
if outdata == 'CORRECTED_DATA':
outdata = 'CORR_DATA'
model_cal = calmodel.split("/")[-1]
model_cal = model_cal.split(":output")[0]
inp_dir = pipeline.output + "/" + img_dir + "/"
op_dir = pipeline.continuum + "/selfcal_products/"
msbase = os.path.splitext(msname)[0]
recipe.add('cab/calibrator', step,
{
"skymodel": model_cal,
"add-vis-model": vismodel,
"model-column": modelcolumn,
"msname": msname,
"threads": ncpu,
"column": incolumn,
"output-data": outdata,
"output-column": outcolumn,
"prefix": '{0:s}_{1:s}_{2:d}_meqtrees'.format(prefix, msbase, num),
"label": 'cal{0:d}'.format(num),
"read-flags-from-ms": True,
"read-flagsets": "-stefcal",
"write-flagset": "stefcal",
"write-flagset-policy": "replace",
"Gjones": True,
"Gjones-solution-intervals": sdm.dismissable(gsols_ or None),
"Gjones-matrix-type": matrix_type,
"Gjones-ampl-clipping": True,
"Gjones-ampl-clipping-low": config['cal_gain_cliplow'],
"Gjones-ampl-clipping-high": config['cal_gain_cliphigh'],
"Bjones": config['cal_bjones'],
"Bjones-solution-intervals": sdm.dismissable(bsols_ or None),
"Bjones-ampl-clipping": config['cal_bjones'],
"Bjones-ampl-clipping-low": config['cal_gain_cliplow'],
"Bjones-ampl-clipping-high": config['cal_gain_cliphigh'],
"make-plots": False,
"tile-size": time_chunk,
},
input=inp_dir,
output=op_dir,
label="{0:s}:: Calibrate step {1:d} ms={2:s}".format(step, num, msname))
def calibrate_cubical(trg, num, prod_path, img_dir, mslist, field):
key = 'calibrate'
modellist = []
# if model is unset for this iteration then just run with the model
if len(config[key]['model']) >= num:
model = config[key]['model'][num - 1]
else:
model = str(num)
# Defines the pybdsf models (and fitsmodels for some weird reasons)
# If the model string contains a +, then combine the appropriate models
if isinstance(model, str) and len(model.split('+')) > 1:
mm = model.split('+')
calmodel, fits_model = combine_models(mm, num, img_dir, field)
# If it doesn't then don't combine.
else:
model = int(model)
calmodel = '{0:s}/{1:s}_{2:s}_{3:d}-pybdsm.lsm.html:output'.format(
img_dir, prefix, field, model)
fits_model = '{0:s}/{1:s}_{2:s}_{3:d}-pybdsm.fits'.format(
img_dir, prefix, field, model)
# In pybdsm_vis mode, add the calmodel (pybdsf) and the MODEL_DATA.
if config['cal_model_mode'] == 'pybdsm_vis':
if (num == cal_niter):
cmodel = calmodel.split(":output")[0]
modellist = spf("MODEL_DATA+" + '{}/' + cmodel, "output")
# modellist = [calmodel, 'MODEL_DATA']
# otherwise, just calmodel (pybdsf)
else:
# modellist = [calmodel]
cmodel = calmodel.split(":output")[0]
modellist = spf("{}/" + cmodel, "output")
# This is incorrect and will result in the lsm being used in the first direction
# and the model_data in the others. They need to be added as + however
# that messes up the output identifier structure
if config['cal_model_mode'] == 'pybdsm_only':
cmodel = calmodel.split(":output")[0]
modellist = spf('{}/' + cmodel, "output")
if config['cal_model_mode'] == 'vis_only':
modellist = spf("MODEL_DATA")
matrix_type = config[key]['gain_matrix_type'][
num - 1 if len(config[key]['gain_matrix_type']) >= num else -1]
if matrix_type == 'Gain2x2':
take_diag_terms = False
else:
take_diag_terms = True
# set the update type correctly
if matrix_type == 'GainDiagPhase':
gupdate = 'phase-diag'
elif matrix_type == 'GainDiagAmp':
gupdate = 'amp-diag'
elif matrix_type == 'GainDiag':
gupdate = 'diag'
elif matrix_type == 'Gain2x2':
gupdate = 'full'
elif matrix_type == 'Fslope':
gupdate = 'phase-diag'
else:
raise ValueError('{} is not a viable matrix_type'.format(matrix_type))
jones_chain = 'G'
gsols_ = [config[key]['gsols_timeslots'][num - 1 if num <= len(config[key]['gsols_timeslots']) else -1],
config[key]['gsols_chan'][
num - 1 if num <= len(config[key]['gsols_chan']) else -1]]
bsols_ = [config[key]['bsols_timeslots'][num - 1 if num <= len(config[key]['bsols_timeslots']) else -1],
config[key]['bsols_chan'][
num - 1 if num <= len(config[key]['bsols_chan']) else -1]]
gasols_ = [
config[key]['gasols_timeslots'][num - 1 if num <=
len(config[key]['gasols_timeslots']) else -1],
config[key]['gasols_chan'][num - 1 if num <=
len(config[key]['gasols_chan']) else -1]]
if config['cal_bjones']:
jones_chain += ',B'
bupdate = gupdate
second_matrix_invoked = False
# If we are doing a calibration of phases and amplitudes on different timescale G is always phase
# This cannot be combined with the earlier statement as bupdate needs to be equal to the original matrix
# first check if we are doing amplitude and phase
if (matrix_type == 'GainDiag' or matrix_type == 'Gain2x2'):
# Then check whether the scales different
if (gasols_[0] != -1 and gasols_[0] != gsols_[0]) or (gasols_[1] != -1 and gasols_[1] != gsols_[1]):
gupdate = 'phase-diag'
jones_chain += ',DD'
second_matrix_invoked = True
if gasols_[0] == -1:
gasols_[0] = gsols_[0]
if gasols_[1] == -1:
gasols_[1] = gsols_[1]
# If we are using more than one matrix we need to set the matrix type to Gain2x2
if len(jones_chain.split(",")) > 1:
matrix_type = 'Gain2x2'
# Need to ad the solution term iterations
solterm_niter = config['cal_cubical']['solterm_niter']
sol_terms_add = []
for term in jones_chain.split(","):
sol_terms_add.append(str(solterm_niter[SOL_TERMS_INDEX[term]]))
flags = "-cubical"
for i, msname in enumerate(mslist):
# Due to a bug in cubical full polarization datasets are not compliant with sel-diag: True
# Hence this temporary fix.
corrs = pipeline.get_msinfo(msname)['CORR']['CORR_TYPE']
if len(corrs) > 2:
take_diag_terms = False
# End temp fix
step = 'calibrate-cubical-field{0:d}-iter{1:d}'.format(trg, num, i)
if gupdate == 'phase-diag' and matrix_type == 'Fslope':
g_table_name = "{0:s}/{3:s}-g-delay-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix)
elif gupdate == 'phase-diag':
g_table_name = "{0:s}/{3:s}-g-phase-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix)
elif gupdate == 'amp-diag':
g_table_name = "{0:s}/{3:s}-g-amp-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix)
elif gupdate == 'diag':
g_table_name = "{0:s}/{3:s}-g-amp-phase-diag-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix)
elif gupdate == 'full':
g_table_name = "{0:s}/{3:s}-g-amp-phase-full-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix)
else:
raise RuntimeError("Something has corrupted the selfcal run")
msbase = os.path.splitext(msname)[0]
cubical_opts = {
"data-ms": msname,
"data-column": 'DATA',
"model-list": modellist,
"sel-ddid": sdm.dismissable(spwid),
"dist-ncpu": ncpu,
"log-memory": True,
"sol-jones": jones_chain,
"sol-term-iters": ",".join(sol_terms_add),
"sel-diag": take_diag_terms,
"out-name": '{0:s}/{1:s}_{2:s}_{3:d}_cubical'.format(get_dir_path(prod_path,
pipeline), prefix, msbase, num),
"out-mode": CUBICAL_OUT[config[key]['output_data'][num - 1 if len(config[key]['output_data']) >= num else -1]],
"out-plots": True,
"dist-max-chunks": config['cal_cubical']['dist_max_chunks'],
"out-casa-gaintables": True,
"weight-column": config['cal_cubical']['weight_col'],
"montblanc-dtype": 'float',
"bbc-save-to": "{0:s}/bbc-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, msbase),
"g-solvable": True,
"g-type": CUBICAL_MT[matrix_type],
"g-update-type": gupdate,
"g-time-int": int(gsols_[0]),
"g-freq-int": int(gsols_[1]),
"out-overwrite": config['cal_cubical']['overwrite'],
"g-save-to": g_table_name,
"g-clip-low": config['cal_gain_cliplow'],
"g-clip-high": config['cal_gain_cliphigh'],
"g-max-prior-error": config['cal_cubical']['max_prior_error'],
"g-max-post-error": config['cal_cubical']['max_post_error'],
"madmax-enable": config['cal_cubical']['flag_madmax'],
"madmax-plot": True if (config['cal_cubical']['flag_madmax']) else False,
"madmax-threshold": config['cal_cubical']['madmax_flag_thr'],
"madmax-estimate": 'corr',
"log-boring": True,
"dd-dd-term": False,
"model-ddes": 'never',
}
if min_uvw > 0:
cubical_opts.update({"sol-min-bl": min_uvw})
if flags != "":
cubical_opts.update({
"flags-apply": flags,
})
if second_matrix_invoked:
cubical_opts.update({
"dd-update-type": 'amp-diag',
"dd-solvable": True,
"dd-type": CUBICAL_MT[matrix_type],
"dd-time-int": int(gasols_[0]),
"dd-freq-int": int(gasols_[1]),
"dd-save-to": "{0:s}/{3:s}-g-amp-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix),
"dd-clip-low": config['cal_gain_cliplow'],
"dd-clip-high": config['cal_gain_cliphigh'],
"dd-max-prior-error": config['cal_cubical']['max_prior_error'],
"dd-max-post-error": config['cal_cubical']['max_post_error'],
})
if config['cal_bjones']:
if bupdate == 'phase-diag':
b_table_name = "{0:s}/{3:s}-b-phase-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix)
elif bupdate == 'amp-diag':
b_table_name = "{0:s}/{3:s}-b-amp-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix)
elif bupdate == 'diag':
b_table_name = "{0:s}/{3:s}-b-amp-phase-diag-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix)
elif bupdate == 'full':
b_table_name = "{0:s}/{3:s}-b-amp-phase-full-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(msname)[0], prefix)
else:
raise RuntimeError("Something has corrupted the selfcal run")
cubical_opts.update({
"b-update-type": bupdate,
"b-solvable": True,
"b-time-int": int(bsols_[0]),
"b-freq-int": int(bsols_[1]),
"b-type": CUBICAL_MT[matrix_type],
"b-clip-low": config['cal_gain_cliplow'],
"b-save-to": b_table_name,
"b-clip-high": config['cal_gain_cliphigh'],
"b-max-prior-error": config['cal_cubical']['max_prior_error'],
"b-max-post-error": config['cal_cubical']['max_post_error'], }
)
# Time chunk and freq chunk have been checked and approved before so they are what they are
cubical_opts.update({
"data-time-chunk": time_chunk,
"data-freq-chunk": freq_chunk, }
)
recipe.add('cab/cubical', step, cubical_opts,
input=pipeline.input,
output=pipeline.output,
shared_memory=config['cal_cubical']['shared_mem'],
label="{0:s}:: Calibrate step {1:d} ms={2:s}".format(step, num, msname))
def restore(num, prod_path, mslist_out, enable_inter=True):
key = 'calibrate'
# to achieve accurate restauration we need to reset all parameters properly
matrix_type = config[key]['gain_matrix_type'][
num - 1 if len(config[key]['gain_matrix_type']) >= num else -1]
# Decide if take diagonal terms into account
if matrix_type == 'Gain2x2':
take_diag_terms = False
else:
take_diag_terms = True
# set the update type correctly
if matrix_type == 'GainDiagPhase':
gupdate = 'phase-diag'
elif matrix_type == 'GainDiagAmp':
gupdate = 'amp-diag'
elif matrix_type == 'GainDiag':
gupdate = 'diag'
elif matrix_type == 'Gain2x2':
gupdate = 'full'
elif matrix_type == 'Fslope':
gupdate = 'phase-diag'
else:
raise ValueError('{} is not a viable matrix_type'.format(matrix_type))
jones_chain = 'G'
gsols_ = [config[key]['gsols_timeslots'][num - 1 if num <= len(config[key]['gsols_timeslots']) else -1],
config[key]['gsols_chan'][
num - 1 if num <= len(config[key]['gsols_chan']) else -1]]
bsols_ = [config[key]['bsols_timeslots'][num - 1 if num <= len(config[key]['bsols_timeslots']) else -1],
config[key]['bsols_chan'][
num - 1 if num <= len(config[key]['bsols_chan']) else -1]]
gasols_ = [
config[key]['gasols_timeslots'][num - 1 if num <=
len(config[key]['gasols_timeslots']) else -1],
config[key]['gasols_chan'][num - 1 if num <=
len(config[key]['gasols_chan']) else -1]]
# If we are doing a calibration of phases and amplitudes on different timescale G is always phase
# This cannot be combined with the earlier statement as bupdate needs to be equal to the original matrix.
second_matrix_invoked = False
if (matrix_type == 'GainDiag' or matrix_type == 'Gain2x2'):
# Then check whether the scales different
if (gasols_[0] != -1 and gasols_[0] != gsols_[0]) or (gasols_[1] != -1 and gasols_[1] != gsols_[1]):
gupdate = 'phase-diag'
jones_chain += ',DD'
second_matrix_invoked = True
if gasols_[0] == -1:
gasols_[0] = gsols_[0]
if gasols_[1] == -1:
gasols_[1] = gsols_[1]
# If we want to interpolate our we get the interpolation interval
if config['cal_bjones']:
jones_chain += ',B'
bupdate = gupdate
# select the right datasets
if enable_inter:
apmode = 'ac'
else:
if CUBICAL_OUT[
config[key]['output_data'][num - 1 if len(config[key]['output_data']) >= num else -1]] == 'sr':
apmode = 'ar'
else:
apmode = 'ac'
# if we have more than one matrix set the matrixtype correctly
if len(jones_chain.split(",")) > 1:
matrix_type = 'Gain2x2'
# Cubical does not at the moment apply the gains when the matrix is not complex2x2 (https://github.com/ratt-ru/CubiCal/issues/324).
# Hence the following fix. This should be removed once the fix makes it into stimela.
matrix_type = 'Gain2x2'
# Does solterm_niter matter for applying?????
solterm_niter = config['cal_cubical']['solterm_niter']
sol_terms_add = []
for term in jones_chain.split(","):
sol_terms_add.append(str(solterm_niter[SOL_TERMS_INDEX[term]]))
# loop through measurement sets
for i, msname_out in enumerate(mslist_out):
# Due to a bug in cubical full polarization datasets are not compliant with sel-diag: True
# Hence this temporary fix.
corrs = pipeline.get_msinfo(msname_out)['CORR']['CORR_TYPE']
if len(corrs) > 2:
take_diag_terms = False
# End temp fix
# Python is really the dumbest language ever so need deep copies else the none apply variables change along with apply
gsols_apply = copy.deepcopy(gsols_)
bsols_apply = copy.deepcopy(bsols_)
gasols_apply = copy.deepcopy(gasols_)
if enable_inter and config['transfer_apply_gains']['interpolate']['enable']:
time_chunk_apply = config['transfer_apply_gains']['interpolate']['timeslots_chunk']
freq_chunk_apply = config['transfer_apply_gains']['interpolate']['chan_chunk']
else:
time_chunk_apply = copy.deepcopy(time_chunk)
freq_chunk_apply = copy.deepcopy(freq_chunk)
if enable_inter:
# Read the time and frequency channels of the 'fullres'
fullres_data = get_obs_data(msname_out)
int_time_fullres = fullres_data['EXPOSURE']
channelsize_fullres = fullres_data['SPW']['TOTAL_BANDWIDTH'][0] / fullres_data['SPW']['NUM_CHAN'][0]
caracal.log.info("Integration time of full-resolution data is: {}".format(int_time_fullres))
caracal.log.info("Channel size of full-resolution data is: {}".format(channelsize_fullres))
# Corresponding numbers for the self-cal -ed MS:
avg_data = get_obs_data(mslist[i])
int_time_avg = avg_data['EXPOSURE']
channelsize_avg = avg_data['SPW']['TOTAL_BANDWIDTH'][0] / avg_data['SPW']['NUM_CHAN'][0]
caracal.log.info("Integration time of averaged data is: {}".format(int_time_avg))
caracal.log.info("Channel size of averaged data is:{}".format(channelsize_avg))
# Compare the channel and timeslot ratios:
ratio_timeslot = int_time_avg / int_time_fullres
ratio_channelsize = channelsize_avg / channelsize_fullres
fromname = msname_out.replace(label_tgain, label)
if not config['transfer_apply_gains']['interpolate']['enable']:
gsols_apply[0] = int(ratio_timeslot * gsols_[0])
gsols_apply[1] = int(ratio_channelsize * gsols_[1])
time_chunk_apply = int(max(int(ratio_timeslot * gsols_[0]), time_chunk_apply)) if not (
int(gsols_[0]) == 0 or time_chunk_apply == 0) else 0
freq_chunk_apply = int(max(int(ratio_channelsize * gsols_[1]), freq_chunk_apply)) if not (
int(gsols_[1]) == 0 or freq_chunk_apply == 0) else 0
else:
if config['transfer_apply_gains']['interpolate']['timeslots_int'] < 0:
gsols_apply[0] = int(ratio_timeslot * gsols_[0])
else:
gsols_apply[0] = config['transfer_apply_gains']['interpolate']['timeslots_int']
if config['transfer_apply_gains']['interpolate']['chan_int'] < 0:
gsols_apply[1] = int(ratio_timeslot * gsols_[1])
else:
gsols_apply[1] = config['transfer_apply_gains']['interpolate']['chan_int']
time_chunk_apply = int(max(int(ratio_timeslot * gsols_[0]), time_chunk_apply)) if not (
int(gsols_[0]) == 0 or time_chunk_apply == 0) else 0
freq_chunk_apply = int(max(int(ratio_channelsize * gsols_[1]), freq_chunk_apply)) if not (
int(gsols_[1]) == 0 or freq_chunk_apply == 0) else 0
else:
fromname = msname_out
# First remove the later flags
counter = num + 1
remainder_flags = "step_{0:d}_2gc_flags".format(counter)
while counter < cal_niter:
counter += 1
remainder_flags += ",step_{0:d}_2gc_flags".format(counter)
mspref = msname_out.split(".ms")[0].replace("-", "_")
recipe.add("cab/flagms", "remove_2gc_flags-{0:s}".format(mspref),
{
"msname": msname_out,
"remove": remainder_flags,
},
input=pipeline.input,
output=pipeline.output,
label="remove-2gc_flags-{0:s}:: Remove 2GC flags".format(mspref))
# build cubical commands
msbase = os.path.splitext(msname_out)[0]
cubical_gain_interp_opts = {
"data-ms": msname_out,
"data-column": 'DATA',
"sel-ddid": sdm.dismissable(spwid),
"sol-jones": jones_chain,
"sol-term-iters": ",".join(sol_terms_add),
"sel-diag": take_diag_terms,
"dist-ncpu": ncpu,
"dist-max-chunks": config['cal_cubical']['dist_max_chunks'],
"log-memory": True,
"out-name": '{0:s}/{1:s}-{2:s}_{3:d}_restored_cubical'.format(get_dir_path(prod_path,
pipeline), prefix,
msbase, num),
"out-mode": apmode,
# "out-overwrite": config[key]['overwrite'],
"out-overwrite": True,
"weight-column": config['cal_cubical']['weight_col'],
"montblanc-dtype": 'float',
"g-solvable": True,
"g-update-type": gupdate,
"g-type": CUBICAL_MT[matrix_type],
"g-time-int": int(gsols_apply[0]),
"g-freq-int": int(gsols_apply[1]),
"madmax-enable": config['cal_cubical']['flag_madmax'],
"madmax-plot": False,
"madmax-threshold": config['cal_cubical']['madmax_flag_thr'],
"madmax-estimate": 'corr',
"madmax-offdiag": False,
"dd-dd-term": False,
"model-ddes": 'never',
}
# Set the table name
if gupdate == 'phase-diag' and matrix_type == 'Fslope':
g_table_name = "{0:s}/{3:s}-g-delay-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
elif gupdate == 'phase-diag':
g_table_name = "{0:s}/{3:s}-g-phase-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
elif gupdate == 'amp-diag':
g_table_name = "{0:s}/{3:s}-g-amp-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
elif gupdate == 'diag':
g_table_name = "{0:s}/{3:s}-g-amp-phase-diag-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
elif gupdate == 'full':
g_table_name = "{0:s}/{3:s}-g-amp-phase-full-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
else:
raise RuntimeError("Something has corrupted the application of the tables")
if config['transfer_apply_gains']['interpolate']['enable']:
cubical_gain_interp_opts.update({
"g-xfer-from": g_table_name
})
else:
cubical_gain_interp_opts.update({
"g-load-from": g_table_name
})
# expand
if config['cal_bjones']:
if enable_inter:
if not config['transfer_apply_gains']['interpolate']['enable']:
bsols_apply[0] = int(ratio_timeslot * bsols_[0])
bsols_apply[1] = int(ratio_channelsize * bsols_[1])
time_chunk_apply = int(max(int(ratio_timeslot * bsols_[0]), time_chunk_apply)) if not (
int(bsols_[0]) == 0 or time_chunk_apply == 0) else 0
freq_chunk_apply = int(max(int(ratio_channelsize * bsols_[1]), freq_chunk_apply)) if not (
int(bsols_[1]) == 0 or freq_chunk_apply == 0) else 0
else:
if config['transfer_apply_gains']['interpolate']['timeslots_int'] < 0:
bsols_apply[0] = int(ratio_timeslot * bsols_[0])
else:
bsols_apply[0] = config['transfer_apply_gains']['interpolate']['timeslots_int']
if config['transfer_apply_gains']['interpolate']['chan_int'] < 0:
bsols_apply[1] = int(ratio_timeslot * bsols_[1])
else:
bsols_apply[1] = config['transfer_apply_gains']['interpolate']['chan_int']
time_chunk_apply = int(max(int(ratio_timeslot * bsols_[0]), time_chunk_apply)) if not (
int(bsols_[0]) == 0 or time_chunk_apply == 0) else 0
freq_chunk_apply = int(max(int(ratio_channelsize * bsols_[1]), freq_chunk_apply)) if not (
int(bsols_[1]) == 0 or freq_chunk_apply == 0) else 0
cubical_gain_interp_opts.update({
"b-update-type": bupdate,
"b-type": CUBICAL_MT[matrix_type],
"b-time-int": int(bsols_apply[0]),
"b-freq-int": int(bsols_apply[1]),
"b-solvable": False
})
# Set the table name
if bupdate == 'phase-diag':
b_table_name = "{0:s}/{3:s}-b-phase-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
elif bupdate == 'amp-diag':
b_table_name = "{0:s}/{3:s}-b-amp-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
elif bupdate == 'diag':
b_table_name = "{0:s}/{3:s}-b-amp-phase-diag-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
elif bupdate == 'full':
b_table_name = "{0:s}/{3:s}-b-amp-phase-full-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
else:
raise RuntimeError("Something has corrupted the application of the tables")
if config['transfer_apply_gains']['interpolate']['enable']:
cubical_gain_interp_opts.update({
"b-xfer-from": b_table_name
})
else:
cubical_gain_interp_opts.update({
"b-load-from": b_table_name
})
if second_matrix_invoked:
if not config['transfer_apply_gains']['interpolate']['enable']:
gasols_apply[0] = int(ratio_timeslot * gasols_[0])
gasols_apply[1] = int(ratio_channelsize * gasols_[1])
time_chunk_apply = int(max(int(ratio_timeslot * gasols_[0]), time_chunk_apply)) if not (
int(gasols_[0]) == 0 or time_chunk_apply == 0) else 0
freq_chunk_apply = int(max(int(ratio_channelsize * gasols_[1]), freq_chunk_apply)) if not (
int(gasols_[1]) == 0 or freq_chunk_apply == 0) else 0
else:
if config['transfer_apply_gains']['interpolate']['timeslots_int'] < 0:
gasols_apply[0] = int(ratio_timeslot * gasols_[0])
else:
gasols_apply[0] = config['transfer_apply_gains']['interpolate']['timeslots_int']
if config['transfer_apply_gains']['interpolate']['chan_int'] < 0:
gasols_apply[1] = int(ratio_timeslot * gasols_[1])
else:
gasols_apply[1] = config['transfer_apply_gains']['interpolate']['chan_int']
time_chunk_apply = int(max(int(ratio_timeslot * gasols_[0]), time_chunk_apply)) if not (
int(gasols_[0]) == 0 or time_chunk_apply == 0) else 0
freq_chunk_apply = int(max(int(ratio_channelsize * gasols_[1]), freq_chunk_apply)) if not (
int(gasols_[1]) == 0 or freq_chunk_apply == 0) else 0
cubical_gain_interp_opts.update({
"dd-update-type": 'amp-diag',
"dd-type": CUBICAL_MT[matrix_type],
"dd-time-int": int(gasols_apply[0]),
"dd-freq-int": int(gasols_apply[1]),
"dd-solvable": False
})
if config['transfer_apply_gains']['interpolate']['enable']:
cubical_gain_interp_opts.update({
"dd-xfer-from": "{0:s}/{3:s}-g-amp-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
})
else:
cubical_gain_interp_opts.update({
"dd-load-from": "{0:s}/{3:s}-g-amp-gains-{1:d}-{2:s}.parmdb:output".format(get_dir_path(prod_path,
pipeline), num, os.path.splitext(fromname)[0], prefix)
})
cubical_gain_interp_opts.update({
"data-time-chunk": time_chunk_apply,
"data-freq-chunk": int(freq_chunk_apply)
})
# ensure proper logging for restore or interpolation
if not enable_inter:
step = 'restore_cubical_gains-{0:d}-{1:d}'.format(num, i)
stim_label = "{0:s}:: restore cubical gains ms={1:s}".format(step, msname_out)
else:
step = 'apply_cubical_gains-{0:d}-{1:d}'.format(num, i)
stim_label = "{0:s}:: Apply cubical gains ms={1:s}".format(step, msname_out)
recipe.add('cab/cubical', step, cubical_gain_interp_opts,
input=pipeline.input,
output=pipeline.output,
shared_memory=config['cal_cubical']['shared_mem'],
label=stim_label)
recipe.run()
# Empty job que after execution
recipe.jobs = []
def get_aimfast_data(filename='{0:s}/{1:s}_fidelity_results.json'.format(
pipeline.output, prefix)):
"Extracts data from the json data file"
with open(filename) as f:
data = json.load(f)
return data
def get_obs_data(msname):
"Extracts data from the json data file"
return pipeline.get_msinfo(msname)
def quality_check(n, field, enable=True):
"Examine the aimfast results to see if they meet specified conditions"
# If total number of iterations is reached stop
global reset_cal
if enable:
# The recipe has to be executed at this point to get the image fidelity results
recipe.run()
# Empty job que after execution
recipe.jobs = []
if reset_cal >= 2:
return False
key = 'aimfast'
tol = config[key]['tol']
conv_crit = config[key]['convergence_criteria']
# Ensure atleast one iteration is ran to compare previous and subsequent images
# And atleast one convergence criteria is specified
if n >= 2 and not config['cal_meqtrees']['two_step'] and conv_crit:
fidelity_data = get_aimfast_data()
conv_crit = [cc.upper() for cc in conv_crit]
# Ensure atleast one iteration is ran to compare previous and subsequent images
residual0 = fidelity_data['{0}_{1}_{2}-residual'.format(
prefix, field, n - 1)]
residual1 = fidelity_data['{0}_{1}_{2}-residual'.format(
prefix, field, n)]
# Unlike the other ratios DR should grow hence n-1/n < 1.
if not pipeline.enable_task(config, 'extract_sources'):
drratio = fidelity_data['{0}_{1}_{2}-restored'.format(prefix, field, n - 1)]['DR'] / fidelity_data[
'{0}_{1}_{2}-restored'.format(prefix, field, n)]['DR']
else:
drratio = residual0['{0}_{1}_{2}-model'.format(prefix, field,
n - 1)]['DR'] / residual1['{0}_{1}_{2}-model'.format(prefix, field, n)]['DR']
# Dynamic range is important,
if any(cc == "DR" for cc in conv_crit):
drweight = 0.8
else:
drweight = 0.
# The other parameters should become smaller, hence n/n-1 < 1
skewratio = residual1['SKEW'] / residual0['SKEW']
# We care about the skewness when it is large. What is large?
# Let's go with 0.005 at that point it's weight is 0.5
if any(cc == "SKEW" for cc in conv_crit):
skewweight = residual1['SKEW'] / 0.01
else:
skewweight = 0.
kurtratio = residual1['KURT'] / residual0['KURT']
# Kurtosis goes to 3 so this way it counts for 0.5 when normal distribution
if any(cc == "KURT" for cc in conv_crit):
kurtweight = residual1['KURT'] / 6.
else:
kurtweight = 0.
meanratio = residual1['MEAN'] / residual0['MEAN']
# We only care about the mean when it is large compared to the noise
# When it deviates from zero more than 20% of the noise this is a problem
if any(cc == "MEAN" for cc in conv_crit):
meanweight = residual1['MEAN'] / (residual1['STDDev'] * 0.2)
else:
meanweight = 0.
noiseratio = residual1['STDDev'] / residual0['STDDev']
# The noise should not change if the residuals are gaussian in n-1.
# However, they should decline in case the residuals are non-gaussian.
# We want a weight that goes to 0 in both cases
if any(cc == "STDDEV" for cc in conv_crit):
if residual0['KURT'] / 6. < 0.52 and residual0['SKEW'] < 0.01:
noiseweight = abs(1. - noiseratio)
else:
# If declining then noiseratio is small and that's good, If rising it is a real bad thing.
# Hence we can just square the ratio
noiseweight = noiseratio
else:
noiseweight = 0.
# A huge increase in DR can increase the skew and kurtosis significantly which can mess up the calculations
if drratio < 0.6:
skewweight = 0.
kurtweight = 0.
# These weights could be integrated with the ratios however while testing I
# kept them separately such that the idea behind them is easy to interpret.
# This combines to total weigth of 1.2+0.+0.5+0.+0. so our total should be LT 1.7*(1-tol)
# it needs to be slightly lower to avoid keeping fitting without improvement
# Ok that is the wrong philosophy. Their weighted mean should be less than 1-tol that means improvement.
# And the weights control how important each parameter is.
HolisticCheck = (drratio * drweight + skewratio * skewweight + kurtratio * kurtweight + meanratio * meanweight + noiseratio * noiseweight) \
/ (drweight + skewweight + kurtweight + meanweight + noiseweight)
if (1 - tol) < HolisticCheck:
caracal.log.info(
'Stopping criterion: ' + ' '.join([cc for cc in conv_crit]))
caracal.log.info('The calculated ratios DR={:f}, Skew={:f}, Kurt={:f}, Mean={:f}, Noise={:f} '.format(
drratio, skewratio, kurtratio, meanratio, noiseratio))
caracal.log.info('The weights used DR={:f}, Skew={:f}, Kurt={:f}, Mean={:f}, Noise={:f} '.format(
drweight, skewweight, kurtweight, meanweight, noiseweight))
caracal.log.info('{:f} < {:f}'.format(
1 - tol, HolisticCheck))
# If we stop we want change the final output model to the previous iteration
global self_cal_iter_counter
reset_cal += 1
if reset_cal == 1:
self_cal_iter_counter -= 1
else:
self_cal_iter_counter -= 2
if self_cal_iter_counter < 1:
self_cal_iter_counter = 1
return False
# If we reach the number of iterations we want to stop.
if n == cal_niter + 1:
caracal.log.info(
'Number of iterations to be done: {:d}'.format(cal_niter))
return False
# If no condition is met return true to continue
return True
def image_quality_assessment(num, img_dir, field):
# Check if more than two calibration iterations to combine successive models
# Combine models <num-1> (or combined) to <num> creat <num+1>-pybdsm-combine
# This was based on thres_pix but change to model as when extract_sources = True is will take the last settings
if len(config['calibrate']['model']) >= num:
model = config['calibrate']['model'][num - 1]
if isinstance(model, str) and len(model.split('+')) == 2:
mm = model.split('+')
combine_models(mm, num, img_dir, field)
else:
model = str(num)
# in case we are in the last round, imaging has made a model that is longer then the expected model column
# Therefore we take this last model if model is not defined
if num == cal_niter + 1:
try:
model.split()
except NameError:
model = str(num)
step = 'aimfast'
aimfast_settings = {
"residual-image": '{0:s}/{1:s}_{2:s}_{3:d}{4:s}-residual.fits:output'.format(img_dir, prefix, field, num, mfsprefix),
"normality-test": config[step]['normality_model'],
"area-factor": config[step]['area_factor'],
"label": "{0:s}_{1:s}_{2:d}".format(prefix, field, num),
"outfile": "{0:s}_fidelity_results.json".format(prefix)
}
# if we run pybdsm we want to use the model as well. Otherwise we want to use the image.
if pipeline.enable_task(config, 'extract_sources'):
if config['calibrate'].get('output_data')[-1] == 'CORR_DATA':
aimfast_settings.update(
{"tigger-model": '{0:s}/{1:s}_{2:s}_{3:d}-pybdsm.lsm.html:output'.format(
img_dir, prefix, field, num)})
else:
# In the case of RES_DATA we need the combined models to compute the dynamic range.
aimfast_settings.update(
{"tigger-model": '{0:s}/{1:s}_{2:s}_{3:d}-pybdsm{4:s}.lsm.html:output'.format(
img_dir, prefix, field, num if num <= len(config['calibrate'].get('model'))
else len(config['calibrate'].get('model')),
'-combined' if len(model.split('+')) >= 2 else '')})
else:
# Use the image
if config['calibrate']['output_data'][num - 1 if num <= len(config['calibrate']['output_data']) else -1] == "CORR_DATA" or \
config['calibrate']['output_data'][num - 1 if num <= len(config['calibrate']['output_data']) else -1] == "CORRECTED_DATA":
aimfast_settings.update({"restored-image": '{0:s}/{1:s}_{2:s}_{3:d}{4:s}-image.fits:output'.format(img_dir, prefix, field, num, mfsprefix)})
else:
try:
im = config['calibrate']['output_data'].index("CORR_RES") + 1
except ValueError:
im = num
aimfast_settings.update({"restored-image": '{0:s}/{1:s}_{2:s}_{3:d}{4:s}-image.fits:output'.format(img_dir,
prefix, field, im, mfsprefix)})
recipe.add('cab/aimfast', step,
aimfast_settings,
input=pipeline.output,
output=pipeline.output,
label="{0:s}_{1:d}:: Image fidelity assessment for {2:d}".format(step, num, num))
def aimfast_plotting(field):
"""Plot comparisons of catalogs and residuals"""
cont_dir = get_dir_path(pipeline.continuum, pipeline)
# Get residuals to compare
res_files = []
residuals_compare = []
for ii in range(1, cal_niter + 2):
res_file = glob.glob("{0:s}/image_{1:d}/{2:s}_{3:s}_?-MFS-residual.fits".format(
pipeline.continuum, ii, prefix, field))
if res_file:
res_files.append(res_file[0])
res_files = sorted(res_files)
for ii in range(0, len(res_files) - 1):
residuals_compare.append('{0:s}:output'.format(
res_files[ii].split(pipeline.output)[-1]))
residuals_compare.append('{0:s}:output'.format(
res_files[ii + 1].split(pipeline.output)[-1]))
# Get models to compare
model_files = []
models_compare = []
for ii in range(1, cal_niter + 2):
model_file = glob.glob(
"{0:s}/image_{1:d}/{2:s}_{3:s}_?-pybdsm.lsm.html".format(pipeline.continuum, ii, prefix, field))
if model_file:
model_files.append(model_file[0])
model_files = sorted(model_files)
models = []
for ii in range(0, len(model_files) - 1):
models_compare.append('{0:s}:output'.format(
model_files[ii].split(pipeline.output)[-1]))
models_compare.append('{0:s}:output'.format(
model_files[ii + 1].split(pipeline.output)[-1]))
if len(model_files) > 1:
step = "aimfast-compare-models"
recipe.add('cab/aimfast', step,
{
"compare-models": models_compare,
"tolerance": config['aimfast']['radius']
},
input=pipeline.input,
output=pipeline.output,
label="Plotting model comparisons")
if len(res_files) > 1:
step = "aimfast-compare-random_residuals"
recipe.add('cab/aimfast', step,
{
"compare-residuals": residuals_compare,
"area-factor": config['aimfast']['area_factor'],
"data-points": 100
},
input=pipeline.input,
output=pipeline.output,
label="Plotting random residuals comparisons")
if len(res_files) > 1 and len(model_files) > 1:
step = "aimfast-compare-source_residuals"
recipe.add('cab/aimfast', step,
{
"compare-residuals": residuals_compare,
"area-factor": config['aimfast']['area_factor'],
"tigger-model": '{:s}:output'.format(model_files[-1].split(
pipeline.output)[-1])
},
input=pipeline.input,
output=pipeline.output,
label="Plotting source residuals comparisons")
def aimfast_compare_online_catalog(field):
"""Compare local models to online catalog"""
model_files = []
# Get models to compare
for ii in range(1, cal_niter + 2):
model_file = glob.glob(
"{0:s}/image_{1:d}/{2:s}_{3:s}_?-pybdsm.lsm.html".format(
pipeline.continuum, ii, prefix, field))
if model_file:
model_files.append(model_file[0].split(pipeline.output)[-1] + ':output')
online_compare = sorted(model_files)
if online_compare:
step = "aimfast-compare-online_catalog"
recipe.add('cab/aimfast', step,
{
"compare-online": online_compare,
"online-catalog": config['aimfast']['online_catalog']['catalog_type'],
},
input=pipeline.continuum,
output=pipeline.output,
label="Plotting online source catalog comparisons")
recipe.run()
recipe.jobs = []
def ragavi_plotting_cubical_tables():
"""Plot self-cal gain tables"""
B_tables = glob.glob('{0:s}/{1:s}/{2:s}/{3:s}'.format(pipeline.output,
get_dir_path(pipeline.continuum, pipeline), 'selfcal_products', 'g-gains*B.casa'))
if len(B_tables) > 1:
step = 'plot-btab'
gain_table_name = [table.split('output/')[-1] for table in B_tables] # This probably needs changing?
recipe.add('cab/ragavi', step,
{
"table": [tab + ":output" for tab in gain_table_name],
"gaintype": config['cal_cubical']['ragavi_plot']['gaintype'],
"field": config['cal_cubical']['ragavi_plot']['field'],
"htmlname": '{0:s}/{1:s}/{2:s}_self-cal_G_gain_plots'.format(get_dir_path(pipeline.diagnostic_plots,
pipeline), 'selfcal', prefix)
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Plot gaincal phase : {1:s}'.format(step, ' '.join(B_tables)))
D_tables = glob.glob('{0:s}/{1:s}/{2:s}/{3:s}'.format(pipeline.output,
get_dir_path(pipeline.continuum, pipeline), 'selfcal_products', 'g-gains*D.casa'))
if len(D_tables) > 1:
step = 'plot_dtab'
gain_table_name = [table.split(pipeline.output)[-1] for table in D_tables]
recipe.add('cab/ragavi', step,
{
"table": [tab + ":output" for tab in gain_table_name],
"gaintype": config['cal_cubical']['ragavi_plot']['gaintype'],
"field": config['cal_cubical']['ragavi_plot']['field'],
"htmlname": '{0:s}/{1:s}/{2:s}_self-cal_D_gain_plots'.format(get_dir_path(pipeline.diagnostic_plots,
pipeline), 'selfcal', prefix)
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Plot gain tables : {1:s}'.format(step, ' '.join(D_tables)))
# decide which tool to use for calibration
calwith = config['calibrate_with'].lower()
if calwith == 'meqtrees':
calibrate = calibrate_meqtrees
elif calwith == 'cubical':
calibrate = calibrate_cubical
# if we use the new two_step analysis aimfast has to be run
if config['cal_meqtrees']['two_step'] and calwith == 'meqtrees':
config['aimfast']['enable'] = True
# if we do not run pybdsm we always need to output the corrected data column
if not pipeline.enable_task(config, 'extract_sources'):
config['calibrate']['output_data'] = [k.replace(
'CORR_RES', 'CORR_DATA') for k in config['calibrate']['output_data']]
if pipeline.enable_task(config, 'aimfast'):
# If aimfast plotting is enabled run source finder
if config['aimfast']['plot']:
config['extract_sources']['enable'] = True
target_iter = 0
for target in all_targets:
mslist = ms_dict[target]
field = utils.filter_name(target)
global self_cal_iter_counter
self_cal_iter_counter = config['start_iter']
global reset_cal
reset_cal = 0
global trace_SN
trace_SN = []
global trace_matrix
trace_matrix = []
image_path = "{0:s}/image_{1:d}".format(
pipeline.continuum, self_cal_iter_counter)
# I think it is best to always define selfcal_products as it might be needed for transfer gains or restore
selfcal_products = "{0:s}/{1:s}".format(
pipeline.continuum, 'selfcal_products')
# When we do not start at iteration 1 we need to restore the data set
if self_cal_iter_counter != 1:
if not os.path.exists(image_path):
raise IOError(
"Trying to restore step {0:d} but the correct direcory ({1:s}) does not exist.".format(self_cal_iter_counter - 1, image_path))
restore(self_cal_iter_counter - 1, selfcal_products, mslist, enable_inter=False)
if not os.path.exists(image_path):
os.mkdir(image_path)
mask_key = config['image']['cleanmask_method'][self_cal_iter_counter - 1 if len(config['image']['cleanmask_method']) >= self_cal_iter_counter else -1]
if pipeline.enable_task(config, 'image'):
if mask_key == 'sofia':
image_path = "{0:s}/image_0".format(
pipeline.continuum, self_cal_iter_counter)
if not os.path.exists(image_path):
os.mkdir(image_path)
fake_image(target_iter, 0, get_dir_path(
image_path, pipeline), mslist, field)
sofia_mask(target_iter, 0, get_dir_path(
image_path, pipeline), field)
recipe.run()
recipe.jobs = []
config['image']['cleanmask_method'].insert(1, config['image']['cleanmask_method'][self_cal_iter_counter if len(config['image']['cleanmask_method']) > self_cal_iter_counter else -1])
image_path = "{0:s}/image_{1:d}".format(
pipeline.continuum, self_cal_iter_counter)
image(target_iter, self_cal_iter_counter, get_dir_path(
image_path, pipeline), mslist, field)
elif mask_key == 'breizorro':
image_path = "{0:s}/image_{1:d}".format(
pipeline.continuum, self_cal_iter_counter)
breizorro_mask(target_iter, self_cal_iter_counter, get_dir_path(
image_path, pipeline), field)
recipe.run()
recipe.jobs = []
image(target_iter, self_cal_iter_counter, get_dir_path(
image_path, pipeline), mslist, field)
else:
image(target_iter, self_cal_iter_counter, get_dir_path(
image_path, pipeline), mslist, field)
if pipeline.enable_task(config, 'extract_sources'):
extract_sources(target_iter, self_cal_iter_counter, get_dir_path(
image_path, pipeline), field)
if pipeline.enable_task(config, 'aimfast'):
image_quality_assessment(
self_cal_iter_counter, get_dir_path(image_path, pipeline), field)
while quality_check(self_cal_iter_counter, field, enable=pipeline.enable_task(config, 'aimfast')):
if pipeline.enable_task(config, 'calibrate'):
if not os.path.exists(selfcal_products):
os.mkdir(selfcal_products)
calibrate(target_iter, self_cal_iter_counter, selfcal_products,
get_dir_path(image_path, pipeline), mslist, field)
mask_key = config['image']['cleanmask_method'][self_cal_iter_counter if len(config['image']['cleanmask_method']) > self_cal_iter_counter else -1]
if mask_key == 'sofia' and self_cal_iter_counter != cal_niter + 1 and pipeline.enable_task(config, 'image'):
sofia_mask(target_iter, self_cal_iter_counter, get_dir_path(
image_path, pipeline), field)
recipe.run()
recipe.jobs = []
elif mask_key == 'breizorro' and self_cal_iter_counter != cal_niter + 1 and pipeline.enable_task(config, 'image'):
breizorro_mask(target_iter, self_cal_iter_counter,
get_dir_path(image_path, pipeline), field)
recipe.run()
recipe.jobs = []
self_cal_iter_counter += 1
image_path = "{0:s}/image_{1:d}".format(
pipeline.continuum, self_cal_iter_counter)
if not os.path.exists(image_path):
os.mkdir(image_path)
if pipeline.enable_task(config, 'image'):
image(target_iter, self_cal_iter_counter, get_dir_path(
image_path, pipeline), mslist, field)
if pipeline.enable_task(config, 'extract_sources'):
extract_sources(target_iter, self_cal_iter_counter, get_dir_path(
image_path, pipeline), field)
if pipeline.enable_task(config, 'aimfast'):
image_quality_assessment(
self_cal_iter_counter, get_dir_path(image_path, pipeline), field)
# Copy plots from the selfcal_products to the diagnotic plots IF calibrate OR transfer_gains is enabled
if pipeline.enable_task(config, 'calibrate') or pipeline.enable_task(config, 'transfer_apply_gains'):
selfcal_products = "{0:s}/{1:s}".format(
pipeline.continuum, 'selfcal_products')
plot_path = "{0:s}/{1:s}".format(
pipeline.diagnostic_plots, 'selfcal')
if not os.path.exists(plot_path):
os.mkdir(plot_path)
selfcal_plots = glob.glob(
"{0:s}/{1:s}*.png".format(selfcal_products, pipeline.prefix))
for plot in selfcal_plots:
shutil.copyfile(plot, '{0:s}/{1:s}'.format(plot_path, os.path.basename(plot)))
if pipeline.enable_task(config, 'transfer_apply_gains'):
mslist_out = ms_dict_tgain[target]
if (self_cal_iter_counter > cal_niter):
restore(
self_cal_iter_counter - 1, selfcal_products, mslist_out, enable_inter=True)
else:
restore(
self_cal_iter_counter, selfcal_products, mslist_out, enable_inter=True)
if pipeline.enable_task(config, 'aimfast'):
if config['aimfast']['plot']:
aimfast_plotting(field)
recipe.run()
# Empty job que after execution
recipe.jobs = []
if config['aimfast']['online_catalog']:
aimfast_compare_online_catalog(field)
recipe.run()
# Empty job que after execution
recipe.jobs = []
# Move the aimfast html plots
plot_path = "{0:s}/{1:s}".format(
pipeline.diagnostic_plots, 'selfcal')
if not os.path.exists(plot_path):
os.mkdir(plot_path)
aimfast_plots = glob.glob(
"{0:s}/{1:s}".format(pipeline.output, '*.html'))
for plot in aimfast_plots:
shutil.copyfile(plot, '{0:s}/{1:s}'.format(plot_path, os.path.basename(plot)))
os.remove(plot)
if pipeline.enable_task(config, 'calibrate'):
if config['cal_cubical']['ragavi_plot']['enable']:
ragavi_plotting_cubical_tables()
if pipeline.enable_task(config, 'restore_model'):
if config['restore_model']['model']:
num = config['restore_model']['model']
if isinstance(num, str) and len(num.split('+')) == 2:
mm = num.split('+')
if int(mm[-1]) > self_cal_iter_counter:
num = str(self_cal_iter_counter)
else:
extract_sources = len(config['extract_sources']['thr_isl'])
if extract_sources > 1:
num = '{:d}+{:d}'.format(self_cal_iter_counter -
1, self_cal_iter_counter)
else:
num = self_cal_iter_counter
if isinstance(num, str) and len(num.split('+')) == 2:
mm = num.split('+')
models = ['{0:s}/image_{1:s}/{2:s}_{3:s}_{4:s}-pybdsm.lsm.html\
:output'.format(get_dir_path(pipeline.continuum, pipeline),
m, prefix, field, m) for m in mm]
final = '{0:s}/image_{1:s}/{2:s}_{3:s}_final-pybdsm.lsm.html\
:output'.format(get_dir_path(pipeline.continuum, pipeline), mm[-1], prefix, field)
step = 'create-final_lsm-{0:s}-{1:s}'.format(*mm)
recipe.add('cab/tigger_convert', step,
{
"input-skymodel": models[0],
"append": models[1],
"output-skymodel": final,
"rename": True,
"force": True,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Combined models'.format(step))
elif isinstance(num, str) and num.isdigit():
inputlsm = '{0:s}/image_{1:s}/{2:s}_{3:s}_{4:s}-pybdsm.lsm.html\
:output'.format(get_dir_path(pipeline.continuum, pipeline), num, prefix, field, num)
final = '{0:s}/image_{1:s}/{2:s}_{3:s}_final-pybdsm.lsm.html\
:output'.format(get_dir_path(pipeline.continuum, pipeline), num, prefix, field)
step = 'create-final_lsm-{0:s}'.format(num)
recipe.add('cab/tigger_convert', step,
{
"input-skymodel": inputlsm,
"output-skymodel": final,
"rename": True,
"force": True,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Combined models'.format(step))
else:
raise ValueError(
"restore_model_model should be integer-valued string or indicate which models to be appended, eg. 2+3")
if config['restore_model']['clean_model']:
num = int(config['restore_model']['clean_model'])
if num > self_cal_iter_counter:
num = self_cal_iter_counter
conv_model = '{0:s}/image_{1:d}/{2:s}_{3:s}-convolved_model.fits\
:output'.format(get_dir_path(pipeline.continuum, pipeline), num, prefix, field)
recipe.add('cab/fitstool', 'subtract-model',
{
"image": ['{0:s}/image_{1:d}/{2:s}_{3:s}_{4:d}{5:s}-{6:s}.fits\
:output'.format(get_dir_path(pipeline.continuum, pipeline), num, prefix, target, num,
mfsprefix, im) for im in ('image', 'residual')],
"output": conv_model,
"diff": True,
"force": True,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Make convolved model'.format(step))
with_cc = '{0:s}/image_{1:d}/{2:s}_{3:s}-with_cc.fits:output'.format(get_dir_path(pipeline.continuum,
pipeline), num, prefix, field)
recipe.add('cab/fitstool', 'add-cc',
{
"image": ['{0:s}/image_{1:d}/{2:s}_{3:s}_{4:d}{5:s}-image.fits:output'.format(get_dir_path(pipeline.continuum,
pipeline), num, prefix, field, num, mfsprefix), conv_model],
"output": with_cc,
"sum": True,
"force": True,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Add clean components'.format(step))
recipe.add('cab/tigger_restore', 'tigger-restore',
{
"input-image": with_cc,
"input-skymodel": final,
"output-image": '{0:s}/image_{1:d}/{2:s}_{3:s}.fullrest.fits'.format(get_dir_path(pipeline.continuum,
pipeline), num, prefix, field),
"force": True,
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Add extracted skymodel'.format(step))
for i, msname in enumerate(mslist):
if pipeline.enable_task(config, 'flagging_summary'):
step = 'flagging_summary-selfcal-ms{0:d}'.format(i)
recipe.add('cab/flagstats', step,
{
"msname": msname,
"plot": True,
"outfile": ('{0:s}-{1:s}-'
'selfcal-summary-{2:d}.json').format(
prefix, wname, i),
"htmlfile": ('{0:s}-{1:s}-'
'selfcal-summary-plots-{2:d}.html').format(
prefix, wname, i)
},
input=pipeline.input,
output=pipeline.diagnostic_plots,
label='{0:s}:: Flagging summary ms={1:s}'.format(step, msname))
if pipeline.enable_task(config, 'transfer_model'):
image_path = "{0:s}/image_{1:d}".format(pipeline.continuum,
self_cal_iter_counter)
crystalball_model = config['transfer_model']['model']
mslist_out = ms_dict_tmodel[target]
if crystalball_model == 'auto':
crystalball_model = '{0:s}/{1:s}_{2:s}_{3:d}-sources.txt'.format(get_dir_path(image_path,
pipeline), prefix, field, self_cal_iter_counter)
for i, msname in enumerate(mslist_out):
step = 'transfer_model-field{0:d}-ms{1:d}'.format(target_iter, i)
recipe.add('cab/crystalball', step,
{
"ms": msname,
"sky-model": crystalball_model + ':output',
"row-chunks": config['transfer_model']['row_chunks'],
"model-chunks": config['transfer_model']['model_chunks'],
"within": sdm.dismissable(config['transfer_model']['within'] or None),
"points-only": config['transfer_model']['points_only'],
"num-sources": sdm.dismissable(config['transfer_model']['num_sources']),
"num-workers": sdm.dismissable(config['transfer_model']['num_workers']),
"memory-fraction": config['transfer_model']['mem_frac'],
},
input=pipeline.input,
output=pipeline.output,
label='{0:s}:: Transfer model {2:s} to ms={1:s}'.format(step, msname, crystalball_model))
target_iter += 1
i = 0
# Write and manage flag versions only if flagging tasks are being
# executed on these .MS files
if flag_main_ms:
for i, m in enumerate(all_msfile):
substep = 'save-{0:s}-ms{1:d}'.format(flags_after_worker, i)
manflags.add_cflags(pipeline, recipe, flags_after_worker, m,
cab_name=substep, overwrite=config['overwrite_flagvers'])
i += 1
if pipeline.enable_task(config, 'transfer_apply_gains'):
for j, m in enumerate(all_msfile_tgain):
substep = 'save-{0:s}-ms{1:d}'.format(flags_after_worker, i + j)
manflags.add_cflags(pipeline, recipe, flags_after_worker, m,
cab_name=substep, overwrite=config['overwrite_flagvers'])
| 133,533 | 52.692803 | 281 | py |
caracal | caracal-master/caracal/workers/utils/manage_flagsets.py | # -*- coding: future_fstrings -*-
import os
import sys
import json
import math
from caracal import log
def conflict(conflict_type, pipeline, wname, ms, config, flags_bw, flags_aw, read_version='version'):
av_flagversions = get_flags(pipeline, ms)
req_version = config['rewind_flags'][read_version]
if req_version == 'auto':
req_version = flags_bw
if conflict_type == 'would_overwrite_bw' or conflict_type == 'rewind_too_little':
log.error('Flag version conflicts for {0:s} . If you are running Caracal on multiple targets'.format(ms))
log.error('and/or .MS files please read the warning at the end of this message.')
log.error('---------------------------------------------------------------------------------------------------')
log.error('A worker named "{0:s}" was already run on the .MS file {1:s} with pipeline prefix "{2:s}".'.format(wname, ms, pipeline.prefix))
if conflict_type == 'rewind_too_little':
log.error('and you are rewinding to a later flag version: {0:s} .'.format(req_version))
log.error('Running "{0:s}" again will attempt to overwrite existing flag versions, it might get messy.'.format(wname))
log.error('Caracal will not overwrite the "{0:s}" flag versions unless you explicitely request that.'.format(wname))
log.error('The current flag versions of this MS are (from the oldest to the most recent):')
for vv in av_flagversions:
if vv == flags_bw:
log.error(' {0:s} <-- (this worker)'.format(vv))
elif vv == flags_aw:
log.error(' {0:s} <-- (this worker)'.format(vv))
elif config['rewind_flags']["enable"] and vv == req_version:
log.error(' {0:s} <-- (rewinding to this version)'.format(vv))
else:
log.error(' {0:s}'.format(vv))
log.error('You have the following options:')
log.error(' 1) If you are happy with the flags currently stored in the FLAG column of this MS and')
log.error(' want to append new flags to them, change the name of this worker in the configuration')
log.error(' file by appending "__n" to it (where n is an integer not already taken in the list')
log.error(' above). The new flags will be appended to the FLAG column, and new flag versions will')
log.error(' be added to the list above.')
log.error(' 2) If you want to discard the flags obtained during the previous run of "{0:s}" (and,'.format(wname))
log.error(' necessarily, all flags obtained thereafter; see list above) reset the "{0:s}" worker'.format(wname))
log.error(' to its starting flag version by setting in the configuration file:'.format(flags_bw))
log.error(' {0:s}:'.format(wname))
log.error(' rewind_flags:')
log.error(' enable: true')
log.error(' mode: reset_worker')
log.error(' This will rewind to the flag version {0:s}. You will loose all flags'.format(flags_bw))
log.error(' appended to the FLAG column after that version, and take it from there.')
log.error(' 3) If you want to discard the flags obtained during the previous run of "{0:s}" and'.format(wname))
log.error(' rewind to an even earlier flag version from the list above set:')
log.error(' {0:s}:'.format(wname))
log.error(' rewind_flags:')
log.error(' enable: true')
log.error(' mode: rewind_to_version')
log.error(' {0:s}: <version_name>'.format(read_version))
log.error(' This will rewind to the requested flag version. You will loose all flags appended')
log.error(' to the FLAG column after that version, and take it from there.')
log.error(' 4) If you really know what you are doing, allow Caracal to overwrite flag versions by setting:')
log.error(' {0:s}:'.format(wname))
log.error(' overwrite_flagvers: true')
log.error(' The worker "{0:s}" will be run again; the new flags will be appended to the current'.format(wname))
log.error(' FLAG column (or to whatever flag version you are rewinding to); the flag version')
log.error(' "{0:s}" will be overwritten and appended to the list above (or to'.format(flags_bw))
log.error(' that list truncated to the flag version you are rewinding to).')
log.error('---------------------------------------------------------------------------------------------------')
log.error('Warning - Your choice will be applied to all .MS files being processed by the worker "{0:s}".'.format(wname))
log.error('If using the rewind_flags mode "rewind_to_version", make sure to rewind to a flag version that')
log.error('exists for all .MS files. If using the rewind_flags mode "reset_worker" each .MS file is taken')
log.error('care of automatically and you do not need to worry about it.')
elif conflict_type == 'rewind_to_non_existing':
log.error('You have asked to rewind the flags of {0:s} to the version "{1:s}" but this version'.format(ms, req_version))
log.error('does not exist. The available flag versions for this .MS file are:')
for vv in av_flagversions:
log.error(' {0:s}'.format(vv))
log.error('Note that if you are running Caracal on multiple targets and/or .MS files you should rewind to a flag')
log.error('version that exists for all of them.')
raise RuntimeError('Flag version conflicts.')
def get_flags(pipeline, ms):
flaglist_file = "{folder:s}/{ms:s}.flagversions/FLAG_VERSION_LIST".format(folder=pipeline.msdir, ms=ms)
flaglist = []
if not os.path.exists(flaglist_file):
return []
with open(flaglist_file) as stdr:
for line in stdr.readlines():
flag = line.split()[0]
flaglist.append(flag)
return flaglist
def delete_cflags(pipeline, recipe, flagname, ms, cab_name="rando_cab", label=""):
flaglist = get_flags(pipeline, ms)
if flagname == "all":
remove_us = flaglist
elif flagname in flaglist:
index = flaglist.index(flagname)
remove_us = flaglist[index:]
else:
return
for i, flag in enumerate(remove_us):
recipe.add("cab/casa_flagmanager", '{0:s}_{1:d}'.format(cab_name, i), {
"vis": ms,
"mode": "delete",
"versionname": flag,
},
input=pipeline.input,
output=pipeline.output,
label="{0:s}:: Delete flags (step {1:d})".format(label or cab_name, i))
def restore_cflags(pipeline, recipe, flagname, ms, cab_name="rando_cab", label="", merge=False):
if flagname in get_flags(pipeline, ms):
recipe.add("cab/casa_flagmanager", cab_name, {
"vis": ms,
"mode": "restore",
"versionname": flagname,
"merge": "replace",
},
input=pipeline.input,
output=pipeline.output,
label="{0:s}:: Restoring flags to flag version [{1:s}]".format(label or cab_name, flagname))
else:
log.warn("Flag version [{0:s}] could not be found".format(flagname))
def add_cflags(pipeline, recipe, flagname, ms, cab_name="rando_cab", label="", overwrite=False):
if flagname in get_flags(pipeline, ms) and overwrite:
recipe.add("cab/casa_flagmanager", cab_name.replace('save', 'delete'), {
"vis": ms,
"mode": "delete",
"versionname": flagname,
},
input=pipeline.input,
output=pipeline.output,
label="{0:s}:: Delete flag version".format(label or cab_name.replace('save', 'delete')))
recipe.add("cab/casa_flagmanager", cab_name, {
"vis": ms,
"mode": "save",
"versionname": flagname,
},
input=pipeline.input,
output=pipeline.output,
label="{0:s}:: Save flag version".format(label or cab_name))
def delete_flagset(pipeline, recipe, flagset, ms, clear_existing=True, cab_name="rando_cab", label=""):
""" Add flagset if it does not exist, clear its flags if exists"""
recipe.add("cab/pycasacore", cab_name, {
"msname": ms,
"script": """
import Owlcat.Flagger
import os
import subprocess
Owlcat.Flagger.has_purr = False
MSDIR = os.environ["MSDIR"]
ms = os.path.join(MSDIR, "{ms:s}")
fms = Owlcat.Flagger.Flagger(ms)
fms.add_bitflags()
if hasattr(fms.flagsets, "names"):
names = fms.flagsets.names()
else:
names = []
fms.close()
flagset = "{flagset:s}"
if names and flagset in names:
idx = names.index(flagset)
remove_us = names[idx:]
subprocess.check_call(["flag-ms.py", "--remove", ",".join(remove_us), ms])
else:
print("INFO::: Flagset does not exist. Will exit gracefully (exit status 0).")
""".format(ms=ms, flagset=flagset),
},
input=pipeline.input,
output=pipeline.output, label=label or cab_name)
def clear_flagset(pipeline, recipe, flagset, ms, clear_existing=True, cab_name="rando_cab", label=""):
""" Add flagset if it does not exist, clear its flags if exists"""
recipe.add("cab/pycasacore", cab_name, {
"msname": ms,
"script": """
import Owlcat.Flagger
import os
import subprocess
Owlcat.Flagger.has_purr = False
MSDIR = os.environ["MSDIR"]
ms = os.path.join(MSDIR, "{ms:s}")
fms = Owlcat.Flagger.Flagger(ms)
fms.add_bitflags()
if hasattr(fms.flagsets, "names"):
names = fms.flagsets.names()
else:
names = []
fms.close()
flagset = "{flagset:s}"
if flagset in names:
subprocess.check_call(["flag-ms.py", "--unflag", flagset, ms])
""".format(ms=ms, flagset=flagset),
},
input=pipeline.input,
output=pipeline.output, label=label or cab_name)
def update_flagset(pipeline, recipe, flagset, ms, clear_existing=True, cab_name="rando_cab", label=""):
""" Add flagset if it does not exist, clear its flags if exists"""
recipe.add("cab/pycasacore", cab_name, {
"msname": ms,
"script": """
import Owlcat.Flagger
import os
import subprocess
Owlcat.Flagger.has_purr = False
MSDIR = os.environ["MSDIR"]
ms = os.path.join(MSDIR, "{ms:s}")
fms = Owlcat.Flagger.Flagger(ms)
fms.add_bitflags()
if hasattr(fms.flagsets, "names"):
names = fms.flagsets.names()
else:
names = []
fms.close()
flagset = "{flagset:s}"
if flagset not in names:
subprocess.check_call(["flag-ms.py", "--flag", flagset, "--flagged-any", "+L", "--create", ms])
else:
subprocess.check_call(["flag-ms.py", "--flag", flagset, "--flagged-any", "+L", ms])
""".format(ms=ms, flagset=flagset),
},
input=pipeline.input,
output=pipeline.output,
label=label or cab_name)
| 10,969 | 42.019608 | 146 | py |
caracal | caracal-master/caracal/workers/utils/flag_Uzeros.py | #!/bin/bash
import gc
from caracal.workers.utils import remove_output_products
from caracal import log
import caracal
import shutil
import argparse
import time
from collections import OrderedDict
import matplotlib.dates as mdat
from matplotlib import gridspec
from matplotlib import rc
from matplotlib import pyplot as plt
import datetime
from casacore.measures import dq
import casacore.measures as measures
import casacore.images as images
import casacore.tables as tables
import stimela.recipe
import sys
import os
import numpy as np
import yaml
from caracal.utils.requires import extras
dm = measures.measures()
timeInit = time.time()
class UzeroFlagger:
global u, SkyCoord, astviz, WCS, Table, Column, fits, astasc
global optimize, scconstants, stats
@extras(packages=["astropy", "scipy"])
def __init__(self, config):
from astropy import units as u
from astropy.coordinates import SkyCoord
import astropy.visualization as astviz
from astropy.wcs import WCS
from astropy.table import Table, Column
from astropy.io import fits
import astropy.io.ascii as astasc
import scipy.optimize as optimize
import scipy.constants as scconstants
from scipy import stats
self.config = config
def setDirs(self, output):
self.config['flag_u_zeros']['stripeDir'] = output + '/stripeAnalysis/'
if not os.path.exists(self.config['flag_u_zeros']['stripeDir']):
os.mkdir(self.config['flag_u_zeros']['stripeDir'])
self.config['flag_u_zeros']['stripeLogDir'] = self.config['flag_u_zeros']['stripeDir'] + 'logs/'
if not os.path.exists(self.config['flag_u_zeros']['stripeLogDir']):
os.mkdir(self.config['flag_u_zeros']['stripeLogDir'])
self.config['flag_u_zeros']['stripeMSDir'] = self.config['flag_u_zeros']['stripeDir'] + 'msdir/'
if not os.path.exists(self.config['flag_u_zeros']['stripeMSDir']):
os.mkdir(self.config['flag_u_zeros']['stripeMSDir'])
self.config['flag_u_zeros']['stripeCubeDir'] = self.config['flag_u_zeros']['stripeDir'] + 'cubes/'
if not os.path.exists(self.config['flag_u_zeros']['stripeCubeDir']):
os.mkdir(self.config['flag_u_zeros']['stripeCubeDir'])
self.config['flag_u_zeros']['stripeFFTDir'] = self.config['flag_u_zeros']['stripeDir'] + 'fft/'
if not os.path.exists(self.config['flag_u_zeros']['stripeFFTDir']):
os.mkdir(self.config['flag_u_zeros']['stripeFFTDir'])
self.config['flag_u_zeros']['stripePlotDir'] = self.config['flag_u_zeros']['stripeDir'] + 'plots/'
if not os.path.exists(self.config['flag_u_zeros']['stripePlotDir']):
os.mkdir(self.config['flag_u_zeros']['stripePlotDir'])
self.config['flag_u_zeros']['stripeTableDir'] = self.config['flag_u_zeros']['stripeDir'] + 'tables/'
if not os.path.exists(self.config['flag_u_zeros']['stripeTableDir']):
os.mkdir(self.config['flag_u_zeros']['stripeTableDir'])
self.config['flag_u_zeros']['stripeSofiaDir'] = self.config['flag_u_zeros']['stripeDir'] + 'sofiaOut/'
if not os.path.exists(self.config['flag_u_zeros']['stripeSofiaDir']):
os.mkdir(self.config['flag_u_zeros']['stripeSofiaDir'])
return
def saveFlags(self, pipeline, inVis, msdir, flagname):
recipe = stimela.Recipe('saveFlagZeros',
ms_dir=msdir,
singularity_image_dir=pipeline.singularity_image_dir,
log_dir=self.config['flag_u_zeros']['stripeLogDir'],
logfile=False, # no logfiles for recipes
)
recipe.JOB_TYPE = pipeline.container_tech
step = 'saveFlag'
recipe.add("cab/casa_flagmanager", step, {
"vis": inVis,
"mode": "save",
"versionname": flagname,
},
input=pipeline.input,
output=pipeline.output,
label="{0:s}:: Save flag version")
recipe.run()
def deleteFlags(self, pipeline, inVis, msdir, flagname):
recipe = stimela.Recipe('saveFlagZeros',
ms_dir=msdir,
singularity_image_dir=pipeline.singularity_image_dir,
log_dir=self.config['flag_u_zeros']['stripeLogDir'],
logfile=False, # no logfiles for recipes
)
recipe.JOB_TYPE = pipeline.container_tech
step = 'deleteFlag'
recipe.add("cab/casa_flagmanager", step, {
"vis": inVis,
"mode": "delete",
"versionname": flagname,
},
input=pipeline.input,
output=pipeline.output,
label="Delete flag version")
recipe.run()
def restoreFlags(self, pipeline, inVis, msdir, flagname):
recipe = stimela.Recipe('saveFlagZeros',
ms_dir=msdir,
singularity_image_dir=pipeline.singularity_image_dir,
log_dir=self.config['flag_u_zeros']['stripeLogDir'],
logfile=False, # no logfiles for recipes
)
recipe.JOB_TYPE = pipeline.container_tech
step = 'restoreFlag'
recipe.add("cab/casa_flagmanager", step, {
"vis": inVis,
"mode": "restore",
"versionname": flagname,
},
input=pipeline.input,
output=pipeline.output,
label="Restore flag version")
recipe.run()
def splitScans(self, pipeline, msdir, inVis, scanNums):
scanVisList = []
scanVisNames = []
for scan in scanNums:
baseVis = os.path.basename(inVis)
outVis = baseVis.split('.ms')[0] + '_scn' + str(scan) + '.ms'
# if os.path.exists(self.config['flag_u_zeros']['stripeMSDir']+outVis):
# shutil.rmtree(self.config['flag_u_zeros']['stripeMSDir']+outVis)
# if os.path.exists(self.config['flag_u_zeros']['stripeMSDir']+outVis+'.flagversions'):
# shutil.rmtree(self.config['flag_u_zeros']['stripeMSDir']+outVis+'.flagversions')
remove_output_products((outVis, outVis + '.flagversions'), directory=self.config['flag_u_zeros']['stripeMSDir'])
recipe = stimela.Recipe('flagUzerosMST',
ms_dir=msdir,
singularity_image_dir=pipeline.singularity_image_dir,
log_dir=self.config['flag_u_zeros']['stripeLogDir'],
logfile=False, # no logfiles for recipes
)
recipe.JOB_TYPE = pipeline.container_tech
step = 'splitScans'
recipe.add('cab/casa_mstransform',
step,
{"msname": baseVis,
"outputvis": outVis + ":output",
"datacolumn": 'data',
"scan": str(scan),
},
input=msdir,
output=self.config['flag_u_zeros']['stripeMSDir'],
label='{0:s}:: Image Line'.format(step))
recipe.run()
scanVisList.append(self.config['flag_u_zeros']['stripeMSDir'] + outVis)
scanVisNames.append(outVis)
caracal.log.info("All Scans splitted")
return scanVisList, scanVisNames
def gaussian(self, x, cent, amp, sigma):
"""
Gaussian function
Input:
cent (float): centre
amp (float) : amplitude
sigma (float) : sigma
Return:
gaussian() Gaussian
"""
return amp * np.exp(-0.5 * np.power((x - cent) / sigma, 2))
def convToStokesI(self, data, flags):
stflags = np.logical_not(flags).astype(float)
# if polarisation is i, then take either average or single value, flag the rest
# Calculate stokes i, reduce the number of polarizations to one, flag if not at least one pol is available
with np.errstate(divide='ignore', invalid='ignore'):
data = (data * np.logical_not(flags)) / stflags
flags = stflags < 1.
data[flags] = np.nan
return data, flags
def makeCube(self, pipeline, msdir, inVis, outCubePrefix, kind='scan'):
robust = self.config['flag_u_zeros']['robust']
imsize = int(self.config['flag_u_zeros']['imsize'])
cell = self.config['flag_u_zeros']['cell']
chanMin = int(self.config['flag_u_zeros']['chans'][0])
chanMax = int(self.config['flag_u_zeros']['chans'][1])
recipe = stimela.Recipe('flagUzeros',
ms_dir=msdir,
singularity_image_dir=pipeline.singularity_image_dir,
log_dir=self.config['flag_u_zeros']['stripeLogDir'],
logfile=False, # no logfiles for recipes
)
recipe.JOB_TYPE = pipeline.container_tech
# print(inVis,outCubePrefix)
if kind == 'scan':
chMin = 0
chMax = chanMax - chanMin
else:
chMin = chanMin
chMax = chanMax
# imsize=400,scale=20.asec
line_image_opts = {
"msname": inVis,
"prefix": outCubePrefix,
"npix": imsize,
"scale": cell,
"weight": 'briggs {0:.3f}'.format(robust),
"channelsout": 1,
"channelrange": [chanMin, chanMax],
"niter": 0,
"gain": 0.2,
"mgain": 0.85,
"auto-threshold": 10.0,
"multiscale": False,
"multiscale-scale-bias": 0.6,
"no-update-model-required": True,
"auto-threshold": 0.5,
"auto-mask": 10.0,
"gain": 0.2,
}
if self.config['flag_u_zeros']['taper']:
line_image_opts.update({"taper-gaussian": str(self.config['flag_u_zeros']['taper'])})
step = 'makeCube'
recipe.add('cab/wsclean',
step, line_image_opts,
input=pipeline.input,
output=self.config['flag_u_zeros']['stripeCubeDir'],
label='{0:s}:: Image Line'.format(step))
recipe.run()
# cmd = """singularity exec /idia/software/containers/wsclean-v3.0.simg wsclean -name {outCubePrefix} -j 64 -mem 100 -no-update-model-required -weight briggs {robust} -taper-gaussian {taper} -size {imsize} {imsize} -scale {cell}asec -channels-out 1 -pol I -channel-range {chanmin} {chanmax} -niter 0 -auto-threshold 0.5 -auto-mask 10.0 -gain 0.2 -mgain 0.85 -multiscale-scale-bias 0.6 -padding 1.2 -quiet {inVis}""".format(
# outCubePrefix=outCubePrefix,robust=robust,
# taper=taper,imsize=imsize,cell=cell,chanmin=chMin,chanmax=chMax,inVis=inVis)
# # os.system("singularity exec /idia/software/containers/wsclean-v3.0-idg.simg wsclean -name {outCubePrefix} -j 64 -mem 100 -no-update-model-required -weight briggs {robust} -taper-gaussian {taper} -size {imsize} {imsize} -scale {cell}asec -channels-out 1 -pol I -channel-range {chanmin} {chanmax} -niter 0 -auto-threshold 0.5 -auto-mask 10.0 -gain 0.2 -mgain 0.85 -multiscale-scale-bias 0.6 -padding 1.2 -quiet {inVis}".format(outCubePrefix=outCubePrefix,robust=robust,taper=taper,imsize=imsize,cell=cell,chanmin=chMin,chanmax=chMax,inVis=inVis))
# caracal.log.info("\t-weight briggs {} -taper-gaussian {} -size {} {} -scale {}asec -channels-out 1 -channel-range {} {}".format(robust,taper,imsize,imsize,cell,chMin,chMax))
# os.system(cmd)
# else: #cell =2.asec imsize=3600
# cmd = """singularity exec /idia/software/containers/wsclean-v3.0.simg wsclean -name {outCubePrefix} -j 64 -mem 100 -no-update-model-required -weight briggs {robust} -size {imsize} {imsize} -scale {cell}asec -channels-out 1 -pol I -channel-range {chanmin} {chanmax} -niter 0 -auto-threshold 0.5 -auto-mask 10.0 -gain 0.2 -mgain 0.85 -multiscale-scale-bias 0.6 -padding 1.2 -quiet {inVis}""".format(
# outCubePrefix=outCubePrefix,robust=robust,taper=taper,imsize=imsize,cell=cell,chanmin=chMin,chanmax=chMax,inVis=inVis)
# caracal.log.info("\t-weight briggs {} -taper-gaussian {} -size {} {} -scale {}asec -channels-out 1 -channel-range {} {}".format(robust,taper,imsize,imsize,cell,chMin,chMax))
# os.system(cmd)
# os.system("singularity exec /idia/software/containers/wsclean-v3.0-idg.simg wsclean -name {outCubePrefix} -j 64 -mem 100 -no-update-model-required -weight briggs {robust} -size {imsize} {imsize} -scale {cell}asec -channels-out 1 -pol I -channel-range {chanmin} {chanmax} -niter 0 -auto-threshold 0.5 -auto-mask 10.0 -gain 0.2 -mgain 0.85 -multiscale-scale-bias 0.6 -padding 1.2 -quiet {inVis}".format(outCubePrefix=outCubePrefix,robust=robust,taper=taper,imsize=imsize,cell=cell,chanmin=chanMin,chanmax=chanMax,inVis=inVis))
caracal.log.info("Image Done")
# caracal.log.info("----------------------------------------------------")
return 0
# def makeFFT(self, inCube,outFFT):
def makeFFT(self, inCube):
with fits.open(inCube) as hdul:
hdu = hdul[0]
dFFT = np.abs(np.fft.fftshift(np.fft.fft2(np.squeeze(hdu.data))))
hdr = fits.Header()
hdr["CTYPE1"] = 'UU---SIN'
hdr["CDELT1"] = 1 / (np.deg2rad(hdu.header["NAXIS1"] * hdu.header["CDELT1"]))
hdr["CRVAL1"] = 0
hdr["CRPIX1"] = hdu.header["NAXIS1"] / 2
hdr["CUNIT1"] = 'lambda'
hdr["CTYPE2"] = 'VV---SIN'
hdr["CDELT2"] = 1 / (np.deg2rad(hdu.header["NAXIS2"] * hdu.header["CDELT2"]))
hdr["CRVAL2"] = 0
hdr["CRPIX2"] = hdu.header["NAXIS2"] / 2
hdr["CUNIT2"] = 'lambda'
caracal.log.info('\tFFT cell size = {0:.2f}'.format(hdr['CDELT2']))
caracal.log.info("FFT Done")
gc.collect()
return dFFT, hdr
def plotAll(self, fig, gs, NS, kk, outCubeName, inFFTData, inFFTHeader, galaxy, track, scan, percent, common_vmax, ctff, type=None):
fitsdata = fits.open(outCubeName)
fitsim = fitsdata[0].data[0, 0]
fitshdr = fitsdata[0].header
fitswcs = WCS(fitshdr).sub(2)
rms1 = np.std(fitsim)
ax = fig.add_subplot(gs[kk, 0], projection=fitswcs)
ax.imshow(fitsim, cmap='Greys', vmin=-rms1, vmax=2 * rms1)
if scan != 0:
ax.annotate("Scan: " + str(scan) + r" rms = " + str(np.round(rms1 * 1e6, 3)) + r" $\mu$Jyb$^{-1}$", xy=(0.05, 0.95), xycoords='axes fraction', horizontalalignment='left', verticalalignment='top', backgroundcolor='w', fontsize=12)
else:
ax.annotate("rms = " + str(np.round(rms1 * 1e6, 3)) + r" $\mu$Jyb$^{-1}$", xy=(0.05, 0.95), xycoords='axes fraction', horizontalalignment='left', verticalalignment='top', backgroundcolor='w', fontsize=12)
if type == 'postFlag':
ax.annotate(r"Flags {percent} $\%$".format(percent=str(np.round(percent, 2))), xy=(0.95, 0.05), xycoords='axes fraction', horizontalalignment='right', verticalalignment='bottom', backgroundcolor='w', fontsize=12)
lon = ax.coords[0]
lat = ax.coords[1]
c = SkyCoord('00:02:00.', '00:01:00.0', unit=(u.hourangle, u.deg))
lon.set_ticks(spacing=c.ra.degree * u.degree)
lat.set_ticks(spacing=c.ra.degree * u.degree)
lon.set_auto_axislabel(False)
lat.set_auto_axislabel(False)
lon.set_ticklabel(exclude_overlapping=True)
lat.set_ticklabel(exclude_overlapping=True)
if kk == NS / 2 or kk == NS / 2 + 1 or (kk == 0 and NS == 1):
lat.set_axislabel(r'Dec (J2000)')
lat.set_ticklabel_visible(True)
else:
lat.set_ticklabel_visible(True)
if kk == NS - 1:
lon.set_axislabel(r'RA (J2000)')
lon.set_ticklabel_visible(True)
else:
lon.set_ticklabel_visible(False)
udelt = inFFTHeader['CDELT1']
vdelt = inFFTHeader['CDELT2']
ax.set_autoscale_on(False)
ax2 = fig.add_subplot(gs[kk, 1])
w = int(2000. / vdelt)
cx, cy = inFFTData.shape[0] // 2, inFFTData.shape[1] // 2
extent = [-w * udelt, w * udelt, -w * vdelt, w * vdelt]
if common_vmax == 0:
common_vmax = np.nanpercentile(inFFTData[cx - w:cx + w + 1, cy - w:cy + w + 1], 99)
fftim = ax2.imshow(inFFTData[cx - w:cx + w + 1, cy - w:cy + w + 1], vmin=0, vmax=common_vmax, extent=extent, origin='upper')
if ctff:
ax2.contour(inFFTData[cx - w:cx + w + 1, cy - w:cy + w + 1], levels=[ctff,], colors=['r'], linewidths=[1,], extent=extent, origin='upper')
ax2.yaxis.set_label_position("right")
ax2.yaxis.tick_right()
ax2.yaxis.set_ticks_position('right')
if kk == NS - 1:
ax2.set_xlabel(r'u [$\lambda$]')
ax2.set_xticks([-1500, 0, 1500])
else:
ax2.set_xticks([])
if kk == NS / 2 or kk == NS / 2 + 1 or (kk == 0 and NS == 1):
ax2.set_ylabel(r'v [$\lambda$]')
ax2.set_xlim(-2000, 2000)
ax2.set_ylim(-2000, 2000)
ax2.set_yticks([-1500, 0, 1500])
ax2.set_autoscale_on(False)
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.97, top=0.97, wspace=0, hspace=0)
return fig, common_vmax
def baselineStats(self, galaxy, flags, uvw, avspecchan):
lambdal = scconstants.c / avspecchan
index = flags[:, 0, 0]
flagCoords = uvw[index, :]
baseFlags = np.sqrt(np.power(flagCoords[:, 0], 2) + np.power(flagCoords[:, 1], 2) + np.power(flagCoords[:, 2], 2)) * lambdal
baseAll = np.sqrt(np.power(uvw[:, 0], 2) + np.power(uvw[:, 1], 2) + np.power(uvw[:, 2], 2)) * lambdal
figBase = plt.figure(figsize=(7.24409, 7.24409), constrained_layout=False)
gsBase = gridspec.GridSpec(nrows=1, ncols=1, figure=figBase, hspace=0, wspace=0.0)
axBase = figBase.add_subplot(gsBase[0, 0])
axBase.yaxis.set_label_position("left")
axBase.yaxis.tick_right()
axBase.yaxis.set_ticks_position('left')
axBase.set_xlabel(r'Baseline Lenght [m]')
axBase.set_ylabel(r'Percentage of u=0 flags')
bins = [0, 5, 25, 50, 100, 250, 500, 1000, 8000]
nFlags, binEdgesFlags = np.histogram(baseFlags, bins)
nAll, binEdgesAll = np.histogram(baseAll, bins)
nPerc = nFlags / nAll * 100.
axBase.set_ylim(0, 100)
np.save(self.config['flag_u_zeros']['stripePlotDir'] + 'baseflags.npy', baseFlags)
np.save(self.config['flag_u_zeros']['stripePlotDir'] + 'baseAll.npy', baseAll)
axBase.plot(bins[:-1], nPerc, 'k-', drawstyle='steps-pre')
axBase.set_autoscale_on(False)
outPlot = "{0}baselines_plot_{1}.png".format(self.config['flag_u_zeros']['stripePlotDir'], galaxy)
figBase.savefig(outPlot, bbox_inches='tight', overwrite=True, dpi=200) # save the figure to file
plt.close(figBase)
def cleanUp(self, galaxy):
caracal.log.info("====================================================")
caracal.log.info("Cleanup")
caracal.log.info("Deleting images")
if os.path.exists(self.config['flag_u_zeros']['stripeCubeDir']):
shutil.rmtree(self.config['flag_u_zeros']['stripeCubeDir'])
caracal.log.info("Deleting FFTs")
if os.path.exists(self.config['flag_u_zeros']['stripeFFTDir']):
shutil.rmtree(self.config['flag_u_zeros']['stripeFFTDir'])
caracal.log.info("Deleting .ms scans")
if os.path.exists(self.config['flag_u_zeros']['stripeMSDir']):
shutil.rmtree(self.config['flag_u_zeros']['stripeMSDir'])
caracal.log.info("Cleanup done")
return 0
def saveFFTTable(self, inFFT, inFFTHeader, visName, U, V, galaxy, msid, track, scan, el, az, method, threshold, dilateU, dilateV, makePlots):
xCol = np.zeros([len(U) * len(V)])
yCol = np.zeros([len(U) * len(V)])
BIN_ID = np.arange(0, len(U) * len(V), 1)
tabGen = np.column_stack([BIN_ID, xCol, yCol])
dt = np.dtype([('BIN_ID', np.int32), ('U', np.int32), ('V', np.int32)])
tabGen = np.array(list(map(tuple, tabGen)), dtype=dt)
namBins = tuple(['BIN_ID', 'U', 'V', 'Amp'])
tabArr = np.zeros([len(tabGen)], dtype={'names': namBins, 'formats': ('i4', 'f8', 'f8', 'f8')})
indexBin = 0
for i in range(0, len(U)):
for j in range(0, len(V)):
tabArr['BIN_ID'][indexBin] = indexBin
tabArr['U'][indexBin] = U[i]
tabArr['V'][indexBin] = V[j]
tabArr['Amp'][indexBin] = inFFT[j, i]
indexBin += 1
hdr = fits.Header()
hdr['COMMENT'] = "This is the table of the FFT"
hdr['COMMENT'] = "Ext 1 = FFT table"
if method == 'madThreshold':
cutoff = self.sunBlockStats(inFFT, galaxy, msid, track, scan, makePlots, 'mad', threshold, ax=None, title='', verb=True)
else:
if self.config['flag_u_zeros']['taper']:
cutoff = np.nanpercentile(tabArr['Amp'], 99.99)
else:
cutoff = np.nanpercentile(tabArr['Amp'], 99.9999)
empty_primary = fits.PrimaryHDU(header=hdr)
inFFT1D = np.nansum(inFFT, axis=0)
if cutoff > np.nanmax(tabArr['Amp']):
willflag = False
caracal.log.warn("Cutoff is larger than max amplitude. Notihng will be flagged.")
# cutoff = tabArr['Amp'].max()
else:
willflag = True
# This is where we decide where to flag
index = np.where(tabArr['Amp'] >= cutoff)[0]
# And this is where we apply that flagging selection to the U,V,Amp arrays
newtab = Table(names=['u', 'v', 'amp'], data=(tabArr['U'][index], tabArr['V'][index], tabArr['Amp'][index]))
# Some stats ...
if willflag:
statsArray = [galaxy, track, scan, len(np.where(newtab['u'] <= 60.0)[0]) / len(newtab['u']), cutoff, el, az]
else:
statsArray = [galaxy, track, scan, 0., cutoff, el, az]
caracal.log.info("FFT Table saved")
caracal.log.info("Flagging scan".format(scanNumber=str(scan), galaxy=galaxy, track=track))
# the following scanFlags are the stripe flags for this scan
scanFlags, percent = self.flagQuartile(visName, newtab, inFFTHeader, method, dilateU, dilateV, qrtdebug=False)
return statsArray, scanFlags, percent, cutoff
def plotSunblocker(self, bin_centers, bin_edges, npoints, widthes, average, stdev, med, mad, popt, hist, threshold, galaxy, msid, track, scan, cut):
caracal.log.info("\tPlotting stats")
figS = plt.figure(figsize=(7.24409, 7.24409), constrained_layout=False)
figS.set_tight_layout(False)
gsS = gridspec.GridSpec(nrows=1, ncols=1, figure=figS, hspace=0, wspace=0.0)
showgouse = np.linspace(1.5 * bin_centers[0] - 0.5 * bin_centers[1], 1.5 * bin_centers[-1] - 0.5 * bin_centers[-2], 200)
calculated = self.gaussian(showgouse, average, widthes[0] * npoints / (np.sqrt(2 * np.pi) * stdev), stdev)
# mad
madded = self.gaussian(showgouse, med, widthes[0] * npoints / (np.sqrt(2 * np.pi) * mad), mad)
# In case of using only stats, this is right on top
fitted = self.gaussian(showgouse, popt[0], popt[1], popt[2])
ax = figS.add_subplot(gsS[0, 0])
ax.bar(bin_centers, hist, width=widthes, color='y', edgecolor='y')
ax.plot(showgouse, calculated, 'g-')
ax.plot(showgouse, fitted, 'r-')
ax.plot(showgouse, madded, 'b-')
ax.axvline(x=cut, linewidth=2, color='k')
ax.set_xlim(min(bin_edges), max(bin_edges))
plt.legend(['avg,std: {0:.1e}, {1:.1e}'.format(average, stdev), 'fit: {0:.1e}, {1:.1e}'.format(popt[0], popt[2]), 'med,mad: {0:.1e}, {1:.1e}'.format(med, mad)], loc='upper right')
ax.set_ylim(0.5,)
plt.yscale('log')
outPlot = "{0}{2}_{3}_fftstats.png".format(self.config['flag_u_zeros']['stripePlotDir'], galaxy, msid, scan)
figS.savefig(outPlot, bbox_inches='tight', dpi=200) # save the figure to file
plt.close(figS)
caracal.log.info("\tPlot Done")
return 0
def sunBlockStats(self, inFFTData, galaxy, msid, track, scan, makePlots, threshmode='mad', threshold=300., ax=None, title='', verb=True):
av = np.copy(inFFTData)
# Average data, then look for shape
# av = np.nanmean(ampar,axis=1)
npoints = inFFTData[np.isfinite(inFFTData)].size
if verb:
caracal.log.info('\tFFT grid has {:d} nonzero points.'.format(npoints))
if npoints < 3:
caracal.log.info('\tThis is not sufficient for any statistics, returning no flags.')
return np.zeros(av.shape, dtype=bool)
# Find average and standard deviation
average = np.nanmean(inFFTData)
stdev = np.nanstd(inFFTData)
if average == np.nan:
caracal.log.info('\tCannot calculate average, returing no flags.')
return np.zeros(av.shape, dtype=bool)
if stdev == np.nan:
caracal.log.info('\tCannot calculate standard deviation, returing no flags.')
return np.zeros(av.shape, dtype=bool)
med = np.nanmedian(inFFTData)
mad = stats.median_abs_deviation(inFFTData, scale='normal', nan_policy='omit', axis=None)
# Build a histogram
hist, bin_edges = np.histogram(inFFTData[np.isfinite(inFFTData)], bins=int(np.sqrt(npoints)) + 1)
bin_centers = bin_edges[:-1] + 0.5 * (bin_edges[1:] - bin_edges[:-1])
widthes = bin_edges[1:] - bin_edges[:-1]
# Find maximum in histogram
maxhi = np.amax(hist)
maxhiposval = bin_centers[np.argmax(hist)]
# Fit a Gaussian
try:
popt, pcov = optimize.curve_fit(self.gaussian, bin_centers, hist, p0=[maxhiposval, maxhi, stdev / 2.])
except BaseException:
popt = np.array([average, widthes[0] * npoints / (np.sqrt(2 * np.pi) * stdev), stdev])
if threshmode == 'abs':
std = 1.
ave = 0.
if threshmode == 'std':
std = stdev
ave = average
if threshmode == 'mad':
std = mad
ave = med
stdev = mad
average = med
if threshmode == 'fit':
std = popt[2]
ave = popt[0]
try:
makePlots
except NameError:
makePlots = None
if makePlots:
self.plotSunblocker(bin_centers, bin_edges, npoints, widthes, average, stdev, med, mad, popt, hist, threshold, galaxy, msid, track, scan, ave + float(threshold) * std)
else:
caracal.log.warn('For some reasons I am not making the fftstats plots!')
caracal.log.info("FFT image flagging cutoff = median + {threshold} * mad = {cutoff:.5f}".format(threshold=float(threshold), cutoff=ave + float(threshold) * std))
return ave + float(threshold) * std
def flagQuartile(self, inVis, tableFlags, inFFTHeader, method, dilateU, dilateV, qrtdebug=False):
U = tableFlags['u']
V = tableFlags['v']
UV = np.array([U, V])
t = tables.table(inVis, readonly=False, ack=False)
# Take existing flags from MS of this scan to estimate flagged starting flagged fraction
flags = t.getcol('FLAG')
percTot = np.nansum(flags) / float(flags.shape[0] * flags.shape[1] * flags.shape[2]) * 100.
# Reset to no flags and build up stripe flags
flags = np.zeros(flags.shape, bool)
caracal.log.info("Scan flags before stripe-flagging: {percent:.3f}%".format(percent=percTot))
# uvw=np.array(t.getcol('UVW'),dtype=float) # This is never used
spw = tables.table(inVis + '/SPECTRAL_WINDOW', ack=False)
avspecchan = np.average(spw.getcol('CHAN_FREQ'))
uv = t.getcol('UVW')[:, :2] * avspecchan / scconstants.c
caracal.log.info('{0:d} UV cells in the FFT image selected for flagging'.format(U.shape[0]))
if qrtdebug and U.shape[0]:
caracal.log.info('\tamplitude of selected cells in range {0:.3f} - {1:.3f}'.format(np.nanmin(tableFlags['amp']), np.nanmax(tableFlags['amp'])))
caracal.log.info('\t{0} total rows in scan MS'.format(flags.shape))
if U.shape[0]:
caracal.log.info('Finding MS rows within flagged cells +/- {0:d} U cell(s) and +/- {1:d} V cell(s)'.format(dilateU, dilateV))
percent = 0.
for i in range(0, UV.shape[1]):
indexU = np.where(np.logical_and(uv[:, 0] > UV[0, i] - (1 / 2 + dilateU) * inFFTHeader['CDELT2'], uv[:, 0] <= UV[0, i] + (1 / 2 + dilateU) * inFFTHeader['CDELT2']))[0]
indexV = np.where(np.logical_and(uv[:, 1] > UV[1, i] - (1 / 2 + dilateV) * inFFTHeader['CDELT2'], uv[:, 1] <= UV[1, i] + (1 / 2 + dilateV) * inFFTHeader['CDELT2']))[0]
if qrtdebug:
caracal.log.info('\tcell {0:d}, [U,V] = {1}'.format(i, UV[:, i]))
caracal.log.info('\t\tflagging u range = {0:.3f} - {1:.3f}'.format(UV[0, i] - (1 / 2 + dilateU) * inFFTHeader['CDELT2'], UV[0, i] + (1 / 2 + dilateU) * inFFTHeader['CDELT2']))
caracal.log.info('\t\tflagging v range = {0:.3f} - {1:.3f}'.format(UV[1, i] - (1 / 2 + dilateV) * inFFTHeader['CDELT2'], UV[1, i] + (1 / 2 + dilateV) * inFFTHeader['CDELT2']))
# Combine U and V selection into final selection
indexTot = np.intersect1d(indexU, indexV)
if qrtdebug:
caracal.log.info('\t\t{0:d} rows found'.format(indexTot.shape[0]))
if indexTot.shape[0]:
caracal.log.info('\t\tSelected rows have uv in the following ranges')
caracal.log.info('\t\tu: {0:.3f} - {1:.3f}'.format(np.nanmin(uv[indexTot, 0]), np.nanmax(uv[indexTot, 0])))
caracal.log.info('\t\tv: {0:.3f} - {1:.3f}'.format(np.nanmin(uv[indexTot, 1]), np.nanmax(uv[indexTot, 1])))
# Add to stripe flags of this scan
flags[indexTot, :, :] = True
percent += float(len(indexTot)) / float(flags.shape[0]) * 100.
# Save modified flags to MS of this scan
t.putcol('FLAG', t.getcol('FLAG') + flags)
t.close()
caracal.log.info("Flag scan done")
return flags, percent
def putFlags(self, pipeline, pf_inVis, pf_inVisName, pf_stripeFlags):
caracal.log.info("Opening full MS file to add stripe flags".format(pf_inVisName))
t = tables.table(pf_inVis, readonly=False, ack=False)
flagOld = t.getcol('FLAG')
percTotBefore = np.nansum(flagOld) / float(flagOld.shape[0] * flagOld.shape[1] * flagOld.shape[2]) * 100.
caracal.log.info("Total Flags Before: {percent:.3f} %".format(percent=percTotBefore))
flagNew = np.sum([pf_stripeFlags, flagOld], axis=0)
percTotAfter = np.nansum(flagNew) / float(flagNew.shape[0] * flagNew.shape[1] * flagNew.shape[2]) * 100.
caracal.log.info("Total Flags After: {percent:.3f} %".format(percent=percTotAfter))
t.putcol('FLAG', flagNew)
del flagOld
del flagNew
gc.collect()
t.close()
caracal.log.info("MS flagged")
caracal.log.info("Before we close, save flag version 'stripe_flag_after'")
self.saveFlags(pipeline, pf_inVisName, msdir=pipeline.msdir, flagname='stripe_flag_after')
return 0
def run_flagUzeros(self, pipeline, targets, msname):
method = self.config['flag_u_zeros']['method']
makePlots = self.config['flag_u_zeros']['make_plots']
doCleanUp = self.config['flag_u_zeros']['cleanup']
thresholds = self.config['flag_u_zeros']['thresholds']
dilateU = self.config['flag_u_zeros']['dilateU']
dilateV = self.config['flag_u_zeros']['dilateV']
flagCmd = True
galaxies = targets
datapath = pipeline.output
mfsOb = msname
self.setDirs(pipeline.output)
if makePlots:
font = 16
params = {'figure.autolayout': True,
'font.family': 'serif',
'figure.facecolor': 'white',
'pdf.fonttype': 3,
'font.serif': 'times',
'font.style': 'normal',
'font.weight': 'book',
'font.size': font,
'axes.linewidth': 1.5,
'lines.linewidth': 1,
'xtick.labelsize': font,
'ytick.labelsize': font,
'legend.fontsize': font,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.size': 3,
'xtick.major.width': 1.5,
'xtick.minor.size': 2.5,
'xtick.minor.width': 1.,
'ytick.major.size': 3,
'ytick.major.width': 1.5,
'ytick.minor.size': 2.5,
'ytick.minor.width': 1.,
'text.usetex': False
}
plt.rcParams.update(params)
# MAIN MAIN MAIN
superArr = np.empty((0, 7))
galaxy = str.split(mfsOb, self.config['label_in'])[0]
comvmax_tot, comvmax_scan = 0, 0
runtime = time.strftime("%d-%m-%Y") + '_' + time.strftime("%H-%M")
caracal.log.info("====================================================")
caracal.log.info('Starting the flag_u_zeros segment')
obsIDs = []
rootMS = str.split(mfsOb, self.config['label_in'])[0]
obsIDs.append(mfsOb)
lws = self.config['flag_u_zeros']['transfer_flags']
if lws == ['']:
lws = []
if len(lws):
for lw in lws:
# obsIDs.append('{}{}.ms'.format(rootMS,lw))
obsIDs.append(mfsOb.replace(self.config['label_in'], lw))
lws = [self.config['label_in']] + lws
stripeFlags = None
for ii in range(0, len(obsIDs)):
track = lws[ii]
inVis = pipeline.msdir + '/' + obsIDs[ii]
inVisName = obsIDs[ii]
caracal.log.info("====================================================")
caracal.log.info("\tWorking on {} ".format(inVisName))
caracal.log.info("====================================================")
if os.path.exists(inVis + '.flagversions'):
fvers = [ii.split(' :')[0] for ii in open(inVis + '.flagversions/FLAG_VERSION_LIST').readlines()]
if 'stripe_flag_before' in fvers:
caracal.log.info("Before we start, restore existing flag version 'stripe_flag_before'")
self.restoreFlags(pipeline, inVisName, msdir=pipeline.msdir, flagname='stripe_flag_before')
while fvers[-1] != 'stripe_flag_before':
self.deleteFlags(pipeline, inVisName, msdir=pipeline.msdir, flagname=fvers[-1])
fvers = fvers[:-1]
else:
caracal.log.info("Before we start, save flag version 'stripe_flag_before'")
self.saveFlags(pipeline, inVisName, msdir=pipeline.msdir, flagname='stripe_flag_before')
else:
caracal.log.info("Before we start, save flag version 'stripe_flag_before'")
self.saveFlags(pipeline, inVisName, msdir=pipeline.msdir, flagname='stripe_flag_before')
# For lw's other than the first one, just copy the flags and skip the rest of the for loop
if ii != 0 and stripeFlags is not None:
self.putFlags(pipeline, inVis, inVisName, stripeFlags)
continue
# For the first lw, do all that follows
caracal.log.info("Opening full MS file".format(inVisName))
t = tables.table(inVis, readonly=True, ack=False)
scans = t.getcol('SCAN_NUMBER')
FlagTot = t.getcol('FLAG')
scanNums = np.unique(scans)
timestamps = t.getcol("TIME")
field_id = t.getcol("FIELD_ID")
spw = tables.table(inVis + '/SPECTRAL_WINDOW', ack=False)
avspecchan = np.average(spw.getcol('CHAN_FREQ'))
uvw = t.getcol("UVW")
spw.close()
t.close()
percTot = np.nansum(FlagTot) / float(FlagTot.shape[0] * FlagTot.shape[1] * FlagTot.shape[2]) * 100.
caracal.log.info("Flagged visibilites so far: {percTot:.3f} %".format(percTot=percTot))
anttab = tables.table(inVis + "::ANTENNA", ack=False)
ant_xyz = anttab.getcol("POSITION", 0, 1)[0]
anttab.close()
caracal.log.info("----------------------------------------------------")
caracal.log.info("Imaging full MS for stripe analysis")
outCubePrefix = galaxy + track + '_tot'
outCubeName = self.config['flag_u_zeros']['stripeCubeDir'] + outCubePrefix + '-dirty.fits'
if os.path.exists(outCubeName):
os.remove(outCubeName)
self.makeCube(pipeline, pipeline.msdir, inVisName, outCubePrefix)
caracal.log.info("Making FFT of image")
inFFTData, inFFTHeader = self.makeFFT(outCubeName)
if makePlots:
if flagCmd:
fig0 = plt.figure(figsize=(7.24409, 7.24409), constrained_layout=False)
fig0.set_tight_layout(False)
gs0 = gridspec.GridSpec(nrows=2, ncols=2, figure=fig0, hspace=0, wspace=0.0)
fig0, comvmax_tot = self.plotAll(fig0, gs0, 2, 0, outCubeName, inFFTData, inFFTHeader, galaxy, track, 0, 0, comvmax_tot, 0, type=None)
else:
outPlot = "{0}{2}_tot.png".format(self.config['flag_u_zeros']['stripePlotDir'], galaxy, mfsOb)
fig0 = plt.figure(figsize=(7.24409, 7.24409), constrained_layout=False)
fig0.set_tight_layout(False)
gs0 = gridspec.GridSpec(nrows=1, ncols=2, figure=fig0, hspace=0, wspace=0.0)
fig0, comvmax_tot = self.plotAll(fig0, gs0, 1, 0, outCubeName, inFFTData, inFFTHeader, galaxy, track, 0, 0, comvmax_tot, 0, type=None)
fig0.subplots_adjust(left=0.05, bottom=0.05, right=0.97, top=0.97, wspace=0, hspace=0)
fig0.savefig(outPlot, bbox_inches='tight', dpi=200) # save the figure to file
plt.close(fig0)
caracal.log.info("----------------------------------------------------")
caracal.log.info("Splitting scans".format(galaxy=galaxy, track=track))
scanVisList, scanVisNames = self.splitScans(pipeline, pipeline.msdir, inVis, scanNums)
arr = np.empty((0, 7))
NS = len(scanNums)
if makePlots:
fig1 = plt.figure(figsize=(8, 21.73227), constrained_layout=False)
fig1.set_tight_layout(False)
fig2 = plt.figure(figsize=(8, 21.73227), constrained_layout=False)
fig2.set_tight_layout(False)
gs1 = gridspec.GridSpec(nrows=NS, ncols=2, figure=fig1, hspace=0, wspace=0.0)
gs2 = gridspec.GridSpec(nrows=NS, ncols=2, figure=fig2, hspace=0, wspace=0.0)
# Initialising the stripeFlags array, to which scans will be added one by one
stripeFlags = np.empty(((0), FlagTot.shape[1], FlagTot.shape[2]))
percTotAv = []
del FlagTot
gc.collect()
for kk in range(len(scanNums)):
scan = scanNums[kk]
caracal.log.info("----------------------------------------------------")
caracal.log.info("\tWorking on scan {}".format(str(scan)))
visName = scanVisNames[kk]
visAddress = scanVisList[kk]
caracal.log.info("----------------------------------------------------")
# Save flag version before start iterating over all thresholds
self.saveFlags(pipeline, visName, msdir=self.config['flag_u_zeros']['stripeMSDir'], flagname='scan_flags_start')
caracal.log.info("Imaging scan for stripe analysis".format(scanNumber=str(scan), galaxy=galaxy, track=track))
outCubePrefix_0 = galaxy + track + '_scan' + str(scan)
outCubeName_0 = self.config['flag_u_zeros']['stripeCubeDir'] + outCubePrefix_0 + '-dirty.fits'
if os.path.exists(outCubeName_0):
os.remove(outCubeName_0)
self.makeCube(pipeline, self.config['flag_u_zeros']['stripeMSDir'], visName, outCubePrefix_0)
caracal.log.info("Making FFT of image")
inFFTData, inFFTHeader = self.makeFFT(outCubeName_0)
U = ((np.linspace(1, inFFTData.shape[1], inFFTData.shape[1]) - inFFTHeader['CRPIX1']) * inFFTHeader['CDELT1'] + inFFTHeader['CRVAL1'])
V = ((np.linspace(1, inFFTData.shape[1], inFFTData.shape[1]) - inFFTHeader['CRPIX2'] - 1) * inFFTHeader['CDELT2'] + inFFTHeader['CRVAL2'])
el = 0
az = 0
outCubePrefix = galaxy + track + '_scan' + str(scan) + '_stripeFlag'
outCubeName = self.config['flag_u_zeros']['stripeCubeDir'] + outCubePrefix + '-dirty.fits'
rms_thresh = []
if len(thresholds) > 1:
caracal.log.info('Start iterating over all requested thresholds {} to find the optimal one'.format(thresholds))
# iterate over all thresholds
for threshold in thresholds:
if len(thresholds) > 1:
caracal.log.info('New iter')
# Rewind flags of this scan to their initial state
fvers = [ii.split(' :')[0] for ii in open(visAddress + '.flagversions/FLAG_VERSION_LIST').readlines()]
self.restoreFlags(pipeline, visName, msdir=self.config['flag_u_zeros']['stripeMSDir'], flagname='scan_flags_start')
while fvers[-1] != 'scan_flags_start':
self.deleteFlags(pipeline, visName, msdir=self.config['flag_u_zeros']['stripeMSDir'], flagname=fvers[-1])
fvers = fvers[:-1]
caracal.log.info("Computing statistics on FFT and flagging scan for threshold {0}".format(threshold))
# scanFlags below are the stripe flags for this scan
statsArray, scanFlags, percent, cutoff_scan = self.saveFFTTable(inFFTData, inFFTHeader, visAddress, np.flip(U), V, galaxy, mfsOb, track, scan, el, az, method, threshold, dilateU, dilateV, makePlots)
caracal.log.info("Scan flags from stripe-flagging: {percent:.3f}%".format(percent=percent))
caracal.log.info("Making post-flagging image")
if os.path.exists(outCubeName):
os.remove(outCubeName)
self.makeCube(pipeline, self.config['flag_u_zeros']['stripeMSDir'], visName, outCubePrefix)
fitsdata = fits.open(outCubeName)
rms_thresh.append(np.std(fitsdata[0].data[0, 0]))
caracal.log.info("Image noise = {0:.3e} Jy/beam".format(rms_thresh[-1]))
fitsdata.close()
# Select best threshold (minimum noise), re-flag and re-image
if len(thresholds) > 1:
caracal.log.info('Done iterating over all requested thresholds')
threshold = thresholds[rms_thresh.index(min(rms_thresh))]
caracal.log.info('\tThe threshold that minimises the image noise is {}'.format(threshold))
caracal.log.info('Repeating flagging and imaging steps with the selected threshold (yes, the must be a better way...)')
# Rewind flags of this scan to their initial state
fvers = [ii.split(' :')[0] for ii in open(visAddress + '.flagversions/FLAG_VERSION_LIST').readlines()]
self.restoreFlags(pipeline, visName, msdir=self.config['flag_u_zeros']['stripeMSDir'], flagname='scan_flags_start')
while fvers[-1] != 'scan_flags_start':
self.deleteFlags(pipeline, visName, msdir=self.config['flag_u_zeros']['stripeMSDir'], flagname=fvers[-1])
fvers = fvers[:-1]
# Re-flag with selected threshold
caracal.log.info("Computing statistics on FFT and flagging scan for threshold {0}".format(threshold))
statsArray, scanFlags, percent, cutoff_scan = self.saveFFTTable(inFFTData, inFFTHeader, visAddress, np.flip(U), V, galaxy, mfsOb, track, scan, el, az, method, threshold, dilateU, dilateV, makePlots)
caracal.log.info("Scan flags from stripe-flagging: {percent:.3f}%".format(percent=percent))
# Re-image
caracal.log.info("Making post-flagging image")
if os.path.exists(outCubeName):
os.remove(outCubeName)
self.makeCube(pipeline, self.config['flag_u_zeros']['stripeMSDir'], visName, outCubePrefix)
# Save stats for the selected threshold
arr = np.vstack((arr, statsArray))
percTotAv.append(percent)
# Add the stripe flags of this scan to the stripe flags of all the scans done previously
stripeFlags = np.concatenate([stripeFlags, scanFlags])
if makePlots:
fig1, comvmax_scan = self.plotAll(fig1, gs1, NS, kk, outCubeName_0, inFFTData, inFFTHeader, galaxy, track, scan, None, comvmax_scan, cutoff_scan, type=None)
caracal.log.info("Making FFT of post-flagging image")
inFFTData, inFFTHeader = self.makeFFT(outCubeName)
if makePlots:
fig2, comvmax_scan = self.plotAll(fig2, gs2, NS, kk, outCubeName, inFFTData, inFFTHeader, galaxy, track, scan, percent, comvmax_scan, 0, type='postFlag')
if makePlots:
caracal.log.info("----------------------------------------------------")
caracal.log.info("Saving scans diagnostic plots")
outPlot = "{0}{2}_perscan_preFlag.png".format(self.config['flag_u_zeros']['stripePlotDir'], galaxy, mfsOb)
outPlotFlag = "{0}{2}_perscan_postFlag.png".format(self.config['flag_u_zeros']['stripePlotDir'], galaxy, mfsOb)
fig1.subplots_adjust(left=0.05, bottom=0.05, right=0.97, top=0.97, wspace=0, hspace=0)
fig1.savefig(outPlot, bbox_inches='tight', dpi=200) # save the figure to file
plt.close(fig1)
fig2.subplots_adjust(left=0.05, bottom=0.05, right=0.97, top=0.97, wspace=0, hspace=0)
fig2.savefig(outPlotFlag, bbox_inches='tight', dpi=200) # save the figure to file
plt.close(fig2)
superArr = np.vstack((superArr, arr))
caracal.log.info("Saving stats table")
newtab = Table(names=['galaxy', 'track', 'scan', 'perc', 'cutoff', 'el', 'az'], data=(superArr))
outTablePercent = "{tableDir}stats_{galaxy}{track}.ecsv".format(tableDir=self.config['flag_u_zeros']['stripeTableDir'], galaxy=galaxy, track=track)
astasc.write(newtab, outTablePercent, overwrite=True, format='ecsv')
if flagCmd:
caracal.log.info("====================================================")
caracal.log.info("\tWorking on {}".format(inVisName))
caracal.log.info("====================================================")
self.putFlags(pipeline, inVis, inVisName, stripeFlags)
caracal.log.info("Making post-flagging image")
outCubePrefix = galaxy + track + '_tot_stripeFlag'
outCubeName = self.config['flag_u_zeros']['stripeCubeDir'] + outCubePrefix + '-dirty.fits'
if os.path.exists(outCubeName):
os.remove(outCubeName)
self.makeCube(pipeline, pipeline.msdir, inVisName, outCubePrefix)
caracal.log.info("Making FFT of post-flagging image")
inFFTData, inFFTHeader = self.makeFFT(outCubeName)
U = ((np.linspace(1, inFFTData.shape[1], inFFTData.shape[1]) - inFFTHeader['CRPIX1']) * inFFTHeader['CDELT1'] + inFFTHeader['CRVAL1'])
V = ((np.linspace(1, inFFTData.shape[1], inFFTData.shape[1]) - inFFTHeader['CRPIX2'] - 1) * inFFTHeader['CDELT2'] + inFFTHeader['CRVAL2'])
caracal.log.info("Saving total stripe flagging diagnostic plots".format(galaxy=galaxy, track=track))
percTotAfter = np.nansum(stripeFlags) / float(stripeFlags.shape[0] * stripeFlags.shape[1] * stripeFlags.shape[2]) * 100.
caracal.log.info("Total stripe flags: {percent:.3f} %".format(percent=percTotAfter))
percRel = percTotAfter - percTot
caracal.log.info("----------------------------------------------------")
caracal.log.info("Mean stripe flagging per scan: {percent:.3f}%".format(percent=np.nanmean(percTotAv)))
if makePlots:
caracal.log.info("----------------------------------------------------")
caracal.log.info("----------------------Plotting----------------------")
outPlot = "{0}{2}_fullMS_prepostFlag.png".format(self.config['flag_u_zeros']['stripePlotDir'], galaxy, mfsOb)
fig0, comvmax_tot = self.plotAll(fig0, gs0, 2, 1, outCubeName, inFFTData, inFFTHeader, galaxy, track, 0, np.nanmean(percTotAv), comvmax_tot, 0, type='postFlag')
fig0.subplots_adjust(left=0.05, bottom=0.05, right=0.97, top=0.97, wspace=0, hspace=0)
fig0.savefig(outPlot, bbox_inches='tight', dpi=200) # save the figure to file
plt.close(fig0)
timeFlag = (time.time() - timeInit) / 60.
if doCleanUp is True:
self.cleanUp(galaxy)
return timeFlag
| 50,992 | 47.611058 | 559 | py |
caracal | caracal-master/caracal/workers/utils/manage_fields.py | # -*- coding: future_fstrings -*-
def get_field(pipeline, i, field):
"""
gets field ids parsed previously in the pipeline
params:
field: list of ids or comma-seperated list of ids where
ids are in bpcal, gcal, target, fcal or an actual field name
"""
return ','.join(filter(lambda s: s != "", map(lambda x: ','.join(getattr(pipeline, x)[i].split(',')
if isinstance(getattr(pipeline, x)[i], str) and getattr(pipeline, x)[i] != "" else getattr(pipeline, x)[i])
if x in ['bpcal', 'gcal', 'target', 'fcal', 'xcal']
else x.split(','),
field.split(',') if isinstance(field, str) else field)))
| 866 | 56.8 | 176 | py |
caracal | caracal-master/caracal/workers/utils/callibs.py | from os import pipe
import stimela
import os.path
from collections import OrderedDict, Iterable
_MODES = dict(
K="delay_cal",
B="bp_cal",
F="gain_cal",
G="gain_cal", # both F and G serve the same purpose, so same mode
Gpol="gain_xcal",
Kcrs='cross_delay',
Xref='cross_phase_ref',
Xf='cross_phase',
Dref='leakage_ref',
Df='leakage',
Gxyamp='cross_gain',
Xfparang='cross_phase',
Df0gen='leakage'
)
def new_callib():
return dict()
def add_callib_recipe(callib, gt, interp, fldmap, field=None, calwt=False):
"""Adds gaintable to a callib
gt: gain table path
interp: interpolation policy
fldmap: field mapping policy
field: if set, then this table must appy to a specific field. Otherwise set as default.
"""
# get extension of gain table, and strip off digits at end
_, ext = os.path.splitext(gt)
ext = ext and ext[1:].rstrip("0123456789")
mode = _MODES.get(ext, "unknown")
cal_entries = callib.setdefault(mode, {})
entry = dict(caltable=gt, fldmap=fldmap, interp=interp, calwt=bool(calwt))
# field can be a single entry or a list -- check
if isinstance(field, str):
fields = [x.strip() for x in field.split(",")]
elif field is None:
fields = [None]
elif isinstance(field, Iterable):
fields = field
else:
raise TypeError(f"invalid 'field' argument of type {type(field)}")
# fields is now a list, so iterate
for field in fields:
if field:
default = cal_entries.get("default")
if default and all(val == default.get(key) for key, val in entry.items()):
return
else:
field = "default"
cal_entries[field] = entry
def resolve_calibration_library(pipeline, msprefix, cal_lib, cal_label, output_fields=None, default_interpolation_types={}):
"""
Reads callib specified by configuration. Figures out how to apply it to the given set of output fields.
Writes a CASA-compatible callib.txt file describing same.
Returns a tupe of:
callib_filename, (gaintables, gainfields, interps, calwts, fields)
where the latter are five lists suitable to the CASA applycal task:
- gain tables
- field mapping policies (gainfield)
- interpolation type
- calwt
- field (to apply to)
Arguments:
pipeline: worker administrator object
msprefix: filename prefix (based on MS name etc.)
cal_lib: name of callib given in config, if supplied. Overrides label, if given.
cal_label: label given in config, if supplied.
worker_label: label of worker. This is used to form up the output .txt filename.
output_fields: set of fields that the calibration is applied to. If None, assume target fields.
default_interpolation_types:
interpolation types for default fields, as a mapping e.g. {'delay_cal': 'nearest', 'gain_cal': 'linear'}, which
will override the default specified in the yml library
"""
cal_lists = [], [], [], [], [] # init 5 empty lists for output values
# get name from callib name and/or from prefix
if not cal_lib:
if cal_label:
cal_lib = f"{msprefix}-{cal_label}"
else:
return None, cal_lists
caldict = pipeline.load_callib(cal_lib)
outfile = pipeline.get_callib_name(cal_lib, "txt", pipeline.CURRENT_WORKER)
with open(outfile, 'w') as stdw:
for gain_type, cal_entries in caldict.items():
cal_fields = set(cal_entries.keys())
# specific fields -- set of fields for which a separate caltable is defined
# default_fields -- set of fields for which the default caltable is used
if output_fields is None:
specific_fields = {}
default_fields = {""} # this will turn into '' post-join below, which CASA recognizes as default
else:
specific_fields = set(output_fields).intersection(cal_fields)
default_fields = set(output_fields).difference(cal_fields)
# go through all tables, skip the ones that don't apply
for field, entry in cal_entries.items():
calwt = entry.get('calwt', False)
interp = entry.get('interp', False)
if field == "default":
if not default_fields:
continue
field = ",".join(default_fields)
# override interpolation type, if suppplied
override_interp = default_interpolation_types.get(gain_type, 'default')
if override_interp != 'default':
interp = override_interp
elif field not in specific_fields:
continue
# interp comes from interpolation_typs map, else from library, else set to default of nearest
cal_lists[0].append(entry['caltable'])
cal_lists[1].append(entry['fldmap'])
cal_lists[2].append(interp)
cal_lists[3].append(calwt)
cal_lists[4].append(field)
filename = os.path.join(stimela.recipe.CONT_IO["output"], 'caltables', entry['caltable'])
stdw.write(f"""caltable="{filename}" calwt={calwt} tinterp='{interp}' """
f"""finterp='linear' fldmap='{entry['fldmap']}' field='{field}' spwmap=0\n""")
return outfile[len(pipeline.output) + 1:], cal_lists
| 5,641 | 42.068702 | 135 | py |
caracal | caracal-master/caracal/workers/utils/__init__.py | import os
def remove_output_products(files, directory=None, log=None):
"""
Removes output products (given by a list of files), in a directory (if specified)
"""
for fullpath in files:
if directory:
fullpath = os.path.join(directory, fullpath)
if os.path.exists(fullpath):
if log is not None:
log.info(f'removing pre-existing {fullpath}')
os.system(f'rm -rf {fullpath}')
| 456 | 29.466667 | 85 | py |
caracal | caracal-master/caracal/workers/utils/manage_caltabs.py | import json
import tempfile
import os
import codecs
| 52 | 9.6 | 15 | py |
caracal | caracal-master/caracal/workers/utils/image_contsub.py | #! /usr/bin/env python
import sys
from datetime import datetime
import numpy as np
import argparse
import textwrap
from caracal.utils.requires import extras
version = '1.0.2'
def printime(string):
now = datetime.now().strftime("%H:%M:%S")
print('{} {}'.format(now, string))
@extras(packages=["scipy", "astropy"])
def imcontsub(
incubus, outcubus=None, fitmode='median', length=0,
polyorder=None, mask=None, sgiters=0, kertyp='gauss', kersiz=0,
fitted=None, confit=None, clobber=False):
"""Continuum subtraction in a fits data cube
Parameters:
incubus (str): Input cube
outcubus (str): Name of continuum-subtracted output data cube
fitmode (str): Type of fit ('poly' or 'savgol')
length (int): Length of the sliding window in channels (must be
odd)
polyorder (int): Polynomial order
mask (string): Mask cube indicating regions to be excluded;
excluded are voxels where the mask cube is not 0
sgiters (int): Number of Savitzky-Golay filter iterations
kertyp (str): Kernel type to convolve the polynomial fit with
('gauss' 'tophat')
kersiz (int): Kernel size to convolve the polynomial fit with (pixel)
fitted (str): Name of fitted continuum cube (optinal output)
confit (str): Name of fitted and convolved continuum cube (optional
output)
clobber (bool): Overwrite output if set
Returns:
None
Takes input cube incubus, which is expected to have velocity or
frequency along the third axis. A fourth axis is allowed but
expected to have a length of 1 ("Stokes I"). The script fits a
function or filter along the third axis, subtracts it from the
original, and writes the result onto disk (outcubus). Possible is
a polynomial fit (fitmode = 'poly') or a Savitzky-Golay filter. In
case of the Savitzky-Golay filter the window length is given by the
parameter length. The polynomial order of either polynomial or the
filter is specified with the parameter polyorder. A Savitzky-Golay
filter with polynomial order 0 is a median filter. Optionally a
mask data cube with the same dimensions of the input data cube can
be provided. Voxels for which the mask data cube is not equal to
zero are ignored. For the polynomial fit the voxels are simply
ignored. In case of the Savitzky Golay filter, an iterative
process is started. All masked voxels are set to zero and a
median filter is run along the frequency axis. After that the
Savitzky-Golay filter is run sgiters times. If the parameter
sgiters is set to 0, only one Savitzky-Golay filter is
applied. After the fitting procedure, the fitted data cube can
optionally be convolved in the spatial domain (axes 1 and 2)
before it gets subtracted from the original cube. The type of the
convolving kernel is given by the parameter kertyp ('gauss' or
'tophat') and the size of the kernel in pixels is given by the
parameter kersiz. With the parameter fitted the user can
optionally supply the name of the output fitted data cube and with
the parameter confit the user specifies the name of the fitted and
convolved output data cube. The parameter clobber determines
whether the output will be overwritten (if True).
"""
import astropy.io.fits as astropy_io_fits
import scipy
import scipy.signal as scipy_signal
# Read cube
begin = datetime.now()
print('')
print('Welcome to image_contsub.py')
if isinstance(incubus, type('')):
printime('Reading input cube {}'.format(incubus))
hdul_incubus = astropy_io_fits.open(incubus)
else:
hdul_incubus = incubus
incubus_data = hdul_incubus[0].data
# Reduce to 3 dims if necessary
stokes = False
if len(incubus_data.shape) == 4:
stokes = True
incubus_data = incubus_data[0, :]
# Read mask and apply
if not isinstance(mask, type(None)):
if isinstance(mask, type('')):
printime('Reading and applying mask {}'.format(mask))
hdul_mask = astropy_io_fits.open(mask)
else:
hdul_mask = mask
mask_data = hdul_mask[0].data
# Reduce to 3 dims if necessary
if len(mask_data.shape) == 4:
mask_data = mask_data[0, :]
# Create a masked cube
# incubus_data_masked = np.ma.masked_array(incubus_data, mask_data > 0)
incubus_data_masked = np.ma.masked_array(
incubus_data, (mask_data > 0) + np.isnan(incubus_data))
hdul_mask.close()
else:
incubus_data_masked = np.ma.masked_array(
incubus_data, np.isnan(incubus_data))
# incubus_data_masked = incubus_data
if fitmode == 'poly':
if isinstance(polyorder, type(None)):
polyorder = length
# Flatten and fit
incubus_data_flat = incubus_data_masked.reshape(
(incubus_data.shape[0], incubus_data.shape[1]
* incubus_data.shape[2]))
x = np.ma.masked_array(np.arange(incubus_data.shape[0]), False)
printime('Fitting polynomial of order {}'.format(polyorder))
fitpars = np.array(
np.flip(np.ma.polyfit(x, incubus_data_flat, polyorder)))
printime('Creating continuum cube')
fit = np.flip(np.polynomial.polynomial.polyval(
np.flip(np.array(x), 0), fitpars).transpose()).reshape(
(incubus_data.shape[0], incubus_data.shape[1],
incubus_data.shape[2]))
# Make sure that the fit cube can be convolved
if np.nanmax(incubus_data) > 0.:
maxincube = np.nanmax(incubus_data) * 10.
else:
maxincube = 0
if np.nanmin(incubus_data) < 0.:
minincube = np.nanmin(incubus_data) * 10.
else:
minincube = 0.
fit[fit > maxincube] = maxincube
fit[fit < minincube] = minincube
# To be completely sure
fit[np.logical_not(np.isfinite(fit))] = 0.
elif fitmode == 'median':
if length == 0:
printime('Length is 0, no median-filtering.')
else:
printime('Median-filtering cube')
fit = scipy.ndimage.median_filter(
incubus_data, (length, 1, 1))
elif fitmode == 'savgol':
if isinstance(polyorder, type(None)):
polyorder = 0
if length == 0:
printime('Length is 0, no Savitzky-Golay-filtering.')
else:
printime('Savitzky-Golay-filtering cube (order {})'.format(polyorder))
sgmask = np.ma.getmask(incubus_data_masked)
sgincubus = incubus_data.copy()
sgincubus[sgmask] = 0.0
# First stitch holes in the data
if sgiters > 0:
fit = scipy.ndimage.median_filter(
sgincubus, (length, 1, 1))
# Then iterate n times with better guesses for the
# stitched data
for i in range(sgiters):
print('Iteration {}'.format(i))
sgincubus = fit
fit = scipy_signal.savgol_filter(
sgincubus, length, polyorder, axis=0, mode='interp')
else:
fit = scipy_signal.savgol_filter(
sgincubus, length, polyorder, axis=0, mode='interp')
else:
printime('No valid filter chosen, not filtering.')
fit = incubus_data_masked * 0.
if not isinstance(fitted, type(None)):
printime('Writing continuum cube')
if stokes:
hdul_incubus[0].data = fit.astype('float32').reshape(
(1, fit.shape[0], fit.shape[1], fit.shape[2]))
else:
hdul_incubus[0].data = fit.astype('float32')
hdul_incubus[0].header['DATAMAX'] = np.nanmax(hdul_incubus[0].data)
hdul_incubus[0].header['DATAMIN'] = np.nanmax(hdul_incubus[0].data)
hdul_incubus.writeto(fitted, overwrite=clobber)
if kersiz > 0:
printime('Spatially convolving continuum cube')
if kertyp == 'gauss':
kernel = scipy_signal.gaussian(
int(10. * kersiz / np.sqrt(np.log(256.))) // 2 * 2 + 1,
kersiz / np.sqrt(np.log(256.)))
else:
klength = int(10. * kersiz) // 2 * 2 + 1
coordinates = np.arange(klength, dtype=int) - int(klength) // 2
kernel = (np.fabs(coordinates) < (kersiz // 2 + 1))
kernel = np.outer(
kernel, kernel).reshape((1, kernel.size, kernel.size))
kernel = np.repeat(kernel, fit.shape[0], axis=0)
fitmask = np.isnan(fit)
fit[fitmask] = 0.
convolved = scipy_signal.fftconvolve(
fit, kernel, mode='same', axes=(1, 2)) / kernel[0].sum()
convolved[fitmask] = np.nan
else:
convolved = fit
if not isinstance(confit, type(None)):
printime('Writing convolved continuum cube')
if stokes:
hdul_incubus[0].data = convolved.astype('float32').reshape(
(1, convolved.shape[0], convolved.shape[1],
convolved.shape[2]))
else:
hdul_incubus[0].data = convolved.astype('float32')
hdul_incubus[0].header['DATAMAX'] = np.nanmax(hdul_incubus[0].data)
hdul_incubus[0].header['DATAMIN'] = np.nanmax(hdul_incubus[0].data)
hdul_incubus.writeto(confit, overwrite=clobber)
printime('Subtracting continuum.')
subtracted = incubus_data - convolved
if stokes:
hdul_incubus[0].data = subtracted.astype('float32').reshape(
(1, subtracted.shape[0], subtracted.shape[1], subtracted.shape[2]))
else:
hdul_incubus[0].data = subtracted.astype('float32')
printime('Writing subtracted cube.')
hdul_incubus[0].header['DATAMAX'] = np.nanmax(hdul_incubus[0].data)
hdul_incubus[0].header['DATAMIN'] = np.nanmax(hdul_incubus[0].data)
hdul_incubus.writeto(outcubus, overwrite=clobber)
hdul_incubus.close()
now = datetime.now()
printime(
'Time elapsed: {:.1f} minutes'.format((now - begin).total_seconds() / 60.))
print('')
def description():
"""
Verbose description of the module
"""
return textwrap.fill(
'Takes input cube incubus, which is expected to have velocity'
'or frequency along the third axis. A fourth axis is allowed but'
'expected to have a length of 1 ("Stokes I"). The script fits a'
'function or filter along the third axis, subtracts it from the'
'original, and writes the result onto disk (outcubus). Possible is'
"a polynomial fit (fitmode = 'poly') or a Savitzky-Golay filter. In"
'case of the Savitzky-Golay filter the window length is given by the'
'parameter length. The polynomial order of either polynomial or the'
'filter is specified with the parameter polyorder. A Savitzky-Golay'
'filter with polynomial order 0 is a median filter. Optionally a'
'mask data cube with the same dimensions of the input data cube can'
'be provided. Voxels for which the mask data cube is not equal to'
'zero are ignored. For the polynomial fit the voxels are simply'
'ignored. In case of the Savitzky Golay filter, an iterative'
'process is started. All masked voxels are set to zero and a'
'median filter is run along the frequency axis. After that the'
'Savitzky-Golay filter is run sgiters times. If the parameter'
'sgiters is set to 0, only one Savitzky-Golay filter is'
'applied. After the fitting procedure, the fitted data cube can'
'optionally be convolved in the spatial domain (axes 1 and 2)'
'before it gets subtracted from the original cube. The type of the'
"convolving kernel is given by the parameter kertyp ('gauss' or"
"'tophat') and the size of the kernel in pixels is given by the"
'parameter kersiz. With the parameter fitted the user can'
'optionally supply the name of the output fitted data cube and with'
'the parameter confit the user specifies the name of the fitted and'
'convolved output data cube. The parameter clobber determines'
'whether the output will be overwritten (if True).')
def parsing():
if '-v' in sys.argv or '--verb' in sys.argv:
epilog = description()
else:
epilog = 'Use \'equolver -h -v\' for verbose description.'
parser = argparse.ArgumentParser(
description='Continuum subtraction of a fits data cube',
formatter_class=argparse.RawTextHelpFormatter,
prog='image_contsub.py',
usage='%(prog)s [options]', epilog=epilog,
fromfile_prefix_chars='@',
argument_default=argparse.SUPPRESS)
# Common
parser.add_argument(
'--incubus', '-i', help='Name of input data cube.', type=str)
parser.add_argument(
'--outcubus', '-o',
help='Name of continuum-subtracted output data cube', type=str)
parser.add_argument(
'--fitmode', '-f',
help='Type of fit (\'poly\' or \'savgol\')', type=str)
parser.add_argument(
'--length', '-l',
help='Length of the sliding window in channels', type=str)
parser.add_argument(
'--polyorder', '-p', help='Polynomial order', type=str)
parser.add_argument(
'--mask', '-m',
help='Mask cube indicating regions to be excluded; excluded are voxels'
' where the mask cube is not 0', type=str)
parser.add_argument(
'--sgiters', '-s',
help='Number of Savitzky-Golay filter iterations', type=int)
parser.add_argument(
'--kertyp', '-t',
help="Kernel type to convolve the polynomial fit with ('gauss',"
" 'tophat\')", type=str)
parser.add_argument(
'--kersiz', '-k',
help='Kernel size to convolve the polynomial fit with (pixel)',
type=str)
parser.add_argument(
'--fitted', help='Name of fitted continuum cube (optinal output)',
type=str)
parser.add_argument(
'--confit', help='Name of fitted and convolved continuum cube '
'(optional output)', type=str)
parser.add_argument(
'--clobber', '-c', help='overwrite output if set', default=False,
action='store_true')
whatnot = parser.parse_args()
inpars = vars(whatnot)
for key in list(inpars.keys()):
try:
result = eval(inpars[key])
except Exception:
result = inpars[key]
inpars[key] = result
# if 'inc_cubes' in inpars.keys():
# print(inpars['inc_cubes'])
# if inpars['inc_cubes'] == True:
# print('yo')
# sys.exit()
return inpars
if __name__ == "__main__":
kwargs = parsing()
for argument in ['help', 'version']:
if argument in kwargs.keys():
sys.exit()
imcontsub(**kwargs)
| 15,030 | 38.976064 | 83 | py |
caracal | caracal-master/caracal/workers/utils/manage_antennas.py | import json
import numpy as np
def get_refant(pipeline, recipe, prefix, msname, fields, min_baseline, max_dist, index):
"""Get reference antenna based on max distances to the array centre,
min baseline length and amount of flagged data."""
step = "antenna_flag_summary"
filename = "{0:s}-flag-{1:s}.json".format(prefix, step)
recipe.add('cab/flagstats', step, {
"msname": msname,
"outfile": filename
},
input=pipeline.input,
output=pipeline.msdir,
label='{0:s}:: Flagging summary ms={1:s}'.format(step, msname))
recipe.run()
recipe.jobs = []
flag_stats = get_antenna_data(pipeline.msdir, filename)
core_ants = _get_core_antennas(flag_stats, min_baseline, max_dist)
# Sort antenna by increasing flag data percentage
sorted_ants = sorted(core_ants.items(), key=lambda x: x[1])
ref_ants = _prioritised_antennas(sorted_ants)
return ref_ants
def get_antenna_data(directory, filename):
"""Extract antenna data from the json summary file"""
with open(f"{directory}/{filename}") as f:
flag_stats = json.load(f)
return flag_stats
def _prioritised_antennas(sorted_ants):
"""Get top 1,2 or 3 antennas with minimum flags"""
if len(sorted_ants) > 2:
ref_ants_info = sorted_ants[:3]
elif len(sorted_ants) > 1:
ref_ants_info = sorted_ants[:2]
elif len(sorted_ants) > 0:
ref_ants_info = sorted_ants[:1]
else:
return ''
ref_ants = [ref_ant[1][0] for ref_ant in ref_ants_info]
return ','.join(ref_ants)
def _get_core_antennas(flag_stats, min_baseline, max_dist):
"""Select antenna with a array centre distance less than max_dist
and baseline lengths greater than min_baseline"""
core_ants = {}
min_base_ants = {}
antenna_stats = flag_stats['Flag stats'][1]['antennas']
for i, ant in antenna_stats.items():
name = ant['name']
flagged = ant['frac']
array_centre_dist = ant['array_centre_dist']
position = ant['position']
if array_centre_dist <= max_dist:
core_ants[i] = (name, flagged, position, array_centre_dist)
for i, ant in core_ants.items():
baselines = _baseline_calculator(antenna_stats, i)
if all(baseline >= min_baseline for baseline in baselines):
min_base_ants[i] = ant
return min_base_ants
def _baseline_calculator(flag_stats, ant_id):
"""Get list of baseline lengths for ant_id"""
def distance(xyz1, xyz2):
"""Distance between two points in a three dimension coordinate system"""
x = xyz2[0] - xyz1[0]
y = xyz2[1] - xyz1[1]
z = xyz2[2] - xyz1[2]
d2 = (x * x) + (y * y) + (z * z)
d = np.sqrt(d2)
return d
baselines = []
ant1_pos = flag_stats[ant_id]['position']
for i, ant in flag_stats.items():
if i != ant_id:
baselines.append(distance(ant1_pos, ant['position']))
return baselines
| 2,984 | 33.310345 | 88 | py |
caracal | caracal-master/caracal/dispatch_crew/stream_director.py | import sys
import logging
from io import StringIO
class stream_director(object):
def __init__(self, logger, log_level=logging.INFO):
class stream_logger(StringIO):
def __init__(self, logger, log_level, fileno=0, *args, **kwargs):
self.logger = logger
self.log_level = log_level
self.__fileno = fileno
StringIO.__init__(self, *args, **kwargs)
@property
def fileno(self):
return lambda: self.__fileno
def is_not_log(self, line):
# avoid recursive writeout by checking for a tag
return line.find("caracal -") < 0 and line.find("INFO -") < 0 and \
line.find("ERROR -") < 0 and line.find("WARNING -") < 0 and \
line.find("DEBUG -") < 0 and line.find("CRITICAL -") < 0 and \
line.find("INFO:caracal") < 0 and line.find("ERROR:caracal") < 0 and \
line.find("WARNING:caracal") < 0 and line.find("DEBUG:caracal") < 0 and \
line.find("CRITICAL:caracal") < 0
def writelines(self, lines):
StringIO.writelines(self, lines)
for line in lines:
if self.is_not_log(line):
self.logger.log(self.log_level, line.rstrip())
def write(self, buf):
StringIO.write(self, buf)
for line in buf.rstrip().splitlines():
if self.is_not_log(line):
self.logger.log(self.log_level, line.rstrip())
self.__stdout_logger = stream_logger(
logger, logging.INFO, fileno=sys.stdout.fileno())
self.__stderr_logger = stream_logger(
logger, logging.CRITICAL, fileno=sys.stderr.fileno())
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush()
self.old_stderr.flush()
sys.stdout = self.__stdout_logger
sys.stderr = self.__stderr_logger
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
| 2,210 | 37.789474 | 93 | py |
caracal | caracal-master/caracal/dispatch_crew/caltables.py | import caracal.dispatch_crew.catalog_parser as cp
import caracal
import os
__DB_FILENAME = os.path.join(
caracal.pckgdir, "data/southern_calibrators.txt")
__DB_CASA_FILENAME = os.path.join(
caracal.pckgdir, "data/casa_calibrators.txt")
__CALIBRATOR_DB = None
__CASA_CALIBRATOR_DB = None
def calibrator_database():
""" Return the Southern standard calibrator database """
global __CALIBRATOR_DB
# Do a lazy load
if __CALIBRATOR_DB is not None:
return __CALIBRATOR_DB
# OK its not loaded, read it in
# There isn't a Southern standard in CASA
# so construct a little database of them for reference
caracal.log.info("Obtaining divine knowledge from %s" % __DB_FILENAME)
__CALIBRATOR_DB = cp.catalog_parser(__DB_FILENAME)
# caracal.log.info("\n" + str(__CALIBRATOR_DB))
return __CALIBRATOR_DB
def casa_calibrator_database():
""" Return the CASA standard calibrator database """
# same as in calibrator_database
global __CASA_CALIBRATOR_DB
if __CASA_CALIBRATOR_DB is not None:
return __CASA_CALIBRATOR_DB
caracal.log.info("Obtaining divine knowledge from %s" % __DB_CASA_FILENAME)
__CASA_CALIBRATOR_DB = cp.catalog_parser(__DB_CASA_FILENAME)
return __CASA_CALIBRATOR_DB
| 1,272 | 27.931818 | 79 | py |
caracal | caracal-master/caracal/dispatch_crew/worker_help.py | import yaml
import sys
from argparse import ArgumentParser
class worker_options(object):
def __init__(self, name, worker_dict, parser=None):
"""
Prints out help for a worker
"""
self.worker = name
self.desc = worker_dict["desc"]
self.parser = parser or ArgumentParser(
"{0:s}: {1:s}".format(self.worker, self.desc))
self.worker_dict = worker_dict
def traverse_worker(self, section, lineage=None):
"""
Recursively add options to worker help
"""
if section["type"] == "map":
for name in section["mapping"]:
segment = section["mapping"][name]
# Find segment lineage
if lineage is None:
_lineage = name
else:
_lineage = "{0:s}-{1:s}".format(lineage, name.replace("-", "_"))
# send back if its a mapping
if segment.get("type", False) == "map":
self.traverse_worker(segment, _lineage)
continue
args = {}
dtype = None
if "seq" in segment:
args["action"] = "append"
dtype = segment["seq"][0]["type"]
ptype = "list:" + dtype
elif segment["type"] not in ["bool"]:
args["type"] = eval(dtype or segment["type"])
if "enum" in segment:
args["choices"] = segment["enum"]
ptype = segment["type"]
else:
args["action"] = "store_true"
ptype = "bool"
desc = segment.get("desc",
"!!! option %s missing schema description. Please file this bug !!!" % name).replace("%", "%%")
desc = desc + " [type: %s]" % ptype
self.parser.add_argument(
"--{0:s}".format(_lineage), help=desc, **args)
else:
return
def print_worker(self):
"""
Print worker options
"""
self.traverse_worker(section=self.worker_dict, lineage=self.worker)
self.parser.parse_args(["--help"])
| 2,276 | 34.578125 | 130 | py |
caracal | caracal-master/caracal/dispatch_crew/noisy.py | ######################
### IMPORT MODULES ###
######################
import numpy as np
import pyrap.tables as tables
import sys
import os
import caracal
########################
### DEFINE FUNCTIONS ###
########################
# Get Tsys/eff (possibly from file)
def GetTsyseff(tsyseff):
if os.path.exists(tsyseff):
caracal.log.info(' ( Tsys/eff from file {0:s} )'.format(tsyseff))
tsyseffFile, tsyseff = tsyseff, np.loadtxt(tsyseff)
else:
try:
tsyseff = float(tsyseff)
except ValueError:
caracal.log.info('')
caracal.log.info(' CATASTROPHE!')
caracal.log.info(' You set Tsys/eff = {0:s}'.format(tsyseff))
caracal.log.info(' This is either a file that cannot be found or a value that cannot be converted to float')
caracal.log.info(' Correct any mistakes and try again')
caracal.log.info(' Aborting ...')
sys.exit()
tsyseffFile = None
return tsyseffFile, tsyseff
# Interpolate input Tsys/eff table to observed frequencies
def InterpolateTsyseff(tsyseff, chans):
caracal.log.info('Interpolating Tsys/eff table to observed frequencies ...')
return np.interp(np.ravel(chans), tsyseff[:, 0], tsyseff[:, 1])
# Get single-MS flags, intervals, channel widths, channel frequencies and calculate natural rms (ignoring flags)
def ProcessSingleMS(ms, kB, tsyseff, tsyseffFile, Aant, selectFieldName, verbose=0):
if verbose > 1:
caracal.log.info(' Processing MS file {0:s}'.format(ms))
t = tables.table(ms, ack=False)
fieldIDs = t.getcol('FIELD_ID')
ant1 = t.getcol('ANTENNA1')
ant2 = t.getcol('ANTENNA2')
fieldNames = tables.table(ms + '/FIELD', ack=False).getcol('NAME')
spw = tables.table(ms + '/SPECTRAL_WINDOW', ack=False)
channelWidths = spw.getcol('CHAN_WIDTH')
channelFreqs = spw.getcol('CHAN_FREQ')
stokesdef = 'Undefined,I,Q,U,V,RR,RL,LR,LL,XX,XY,YX,YY,RX,RY,LX,LY,XR,XL,YR,YL,PP,PQ,QP,QQ,RCircular,LCircular,Linear,Ptotal,Plinear,PFtotal,PFlinear,Pangle'.split(',')
corrs = [stokesdef[cc] for cc in tables.table(ms + '/POLARIZATION', ack=False).getcol('CORR_TYPE')[0]] # taking the correlations of the first SPW
if selectFieldName:
try:
selectFieldID = fieldNames.index(selectFieldName)
except ValueError:
caracal.log.info(' CATASTROPHE!')
caracal.log.info(' Cannot find the field you want to process, {0:s}'.format(selectFieldName))
caracal.log.info(' Available fields are {0:}'.format(fieldNames))
caracal.log.info(' Aborting ...')
sys.exit()
if verbose > 1:
caracal.log.info(' Successfully selected Field with name {0:s} (Field ID = {1:d})'.format(selectFieldName, selectFieldID))
selection = fieldIDs == selectFieldID
else:
if verbose > 1:
caracal.log.info(' Will process all available fields: {0:}'.format(fieldNames))
selection = fieldIDs >= fieldIDs.min()
autoCorr = ant1 == ant2
if verbose > 1:
if autoCorr.sum():
caracal.log.info(' Successfully selected crosscorrelations only')
else:
caracal.log.info(' Found crosscorrelations only')
selection *= ant1 != ant2
nrAnt = np.unique(np.concatenate((ant1, ant2))).shape[0]
nrBaseline = nrAnt * (nrAnt - 1) // 2
if verbose > 1:
caracal.log.info(' Number of antennas = {0:d}'.format(nrAnt))
caracal.log.info(' Number of baselines = {0:d}'.format(nrBaseline))
caracal.log.info(' Frequency coverage = {0:.5e} Hz - {1:.5e} Hz'.format(channelFreqs.min(), channelFreqs.max()))
if np.unique(channelWidths).shape[0] == 1:
caracal.log.info(' Channel width = {0:.5e} Hz'.format(np.unique(channelWidths)[0]))
else:
caracal.log.info(' The channel width takes the following unique values: {0:} Hz'.format(np.unique(channelWidths)))
if verbose > 1:
caracal.log.info(' Loading flags and intervals ...')
flag = t.getcol('FLAG')[selection] # flagged data have flag = True
# select Stokes I-related corrs
cc = 0
while cc < len(corrs):
if corrs[cc] not in 'I,RR,LL,XX,YY,'.split(','):
if verbose > 1:
caracal.log.info(' Discarding correlation {0:s} for predicting the Stokes I noise'.format(corrs[cc]))
flag = np.delete(flag, cc, axis=2)
del (corrs[cc])
else:
cc += 1
if verbose > 1:
caracal.log.info(' Retained correlations {0:}'.format(corrs))
interval = t.getcol('INTERVAL')[selection]
if verbose > 1:
if np.unique(interval).shape[0] == 1:
caracal.log.info(' Interval = {0:.5e} sec'.format(np.unique(interval)[0]))
else:
caracal.log.info(' The interval takes the following unique values: {0:} sec'.format(np.unique(interval)))
t.close()
if verbose > 1:
caracal.log.info(' The *flag* array has shape (Nr_integrations, Nr_channels, Nr_polarisations) = {0:}'.format(flag.shape))
caracal.log.info(' The *interval* array has shape (Nr_integrations) = {0:}'.format(interval.shape))
caracal.log.info(' The *channel* width array has shape (-, Nr_channels) = {0:}'.format(channelWidths.shape))
if verbose > 1:
caracal.log.info(' Total Integration on selected field(s) = {0:.2f} h ({1:d} polarisations)'.format(interval.sum() / nrBaseline / 3600, flag.shape[2]))
if tsyseffFile is not None:
rms = np.sqrt(2) * kB * InterpolateTsyseff(tsyseff, channelFreqs) / Aant / np.sqrt(channelWidths * interval.sum() * flag.shape[2])
else:
rms = np.sqrt(2) * kB * tsyseff / Aant / np.sqrt(channelWidths * interval.sum() * flag.shape[2])
if len(rms.shape) == 2 and rms.shape[0] == 1:
rms = rms[0]
if verbose > 1:
caracal.log.info(' SINGLE MS median natural noise ignoring flags = {0:.3e} Jy/beam'.format(np.nanmedian(rms)))
return flag, interval, channelWidths, channelFreqs, rms
# Predict natural rms for an arbitrary number of MS files (both ignoring and applying flags)
def PredictNoise(MS, tsyseff, diam, selectFieldName, verbose=0):
# Get Tsys/eff either from table (col1 = frequency, col2 = Tsys/eff) or as a float values (frequency independent Tsys/eff value)
tsyseffFile, tsyseff = GetTsyseff(tsyseff)
# Derive quantities
kB = 1380.6 # Boltzmann constant (Jy m^2 / K)
Aant = np.pi * (diam / 2)**2 # collecting area of 1 antenna (m^2)
if tsyseffFile is None:
SEFD = 2 * kB * tsyseff / Aant # frequency independent system equivalent flux density (Jy)
else:
SEFD = 2 * kB * np.median(tsyseff[:, 1]) / Aant # median system equivalent flux density (Jy)
# Read MS files to get the flags and calculate single-MS natural rms values (ignoring flags)
# Start with first file ...
flag0, interval0, channelWidths0, channelFreqs0, rms0 = ProcessSingleMS(MS[0], kB, tsyseff, tsyseffFile, Aant, selectFieldName, verbose=verbose)
rmsAll = [rms0]
# ... and do the same for all other MS's appending to the flag array, checking that the channelisation is the same
for ii in range(1, len(MS)):
flagi, intervali, channelWidthsi, channelFreqsi, rmsi = ProcessSingleMS(MS[ii], kB, tsyseff, tsyseffFile, Aant, selectFieldName, verbose=verbose)
if channelWidths0.shape != channelWidthsi.shape or (channelWidths0 != channelWidthsi).sum() or (channelFreqs0 != channelFreqsi).sum():
caracal.log.info('')
caracal.log.info(' CATASTROPHE!')
caracal.log.info(' The input .MS file {1:s} has different channelization than the first input .MS file {2:s}'.format(ii, MS[ii], MS[0]))
caracal.log.info(' Cannot combine files to estimate their joint theoretical noise')
caracal.log.info(' Aborting ...')
sys.exit()
else:
flag0 = np.concatenate((flag0, flagi), axis=0)
interval0 = np.concatenate((interval0, intervali), axis=0)
rmsAll.append(rmsi)
# Message concatenated files
if verbose > 1 and len(MS) > 1:
caracal.log.info(' Concatenating all {0:d} MS files ...'.format(len(MS)))
caracal.log.info(' The concatenated *flag* array has shape (Nr_integrations, Nr_channels, Nr_polarisations) = {0:}'.format(flag0.shape))
caracal.log.info(' The concatenated *interval* array has shape (Nr_integrations) = {0:}'.format(interval0.shape))
caracal.log.info(' The concatenated *channel* width array has shape (-, Nr_channels) = {0:}'.format(channelWidths0.shape))
# Reshape arrays
if verbose > 1 and len(MS) > 1:
caracal.log.info(' Reshaping arrays ...')
interval0.resize((interval0.shape[0], 1, 1))
channelWidths0.resize((channelWidths0.shape[1]))
channelFreqs0.resize((channelFreqs0.shape[1]))
# Interpolate Tsys
if tsyseffFile is not None:
tsyseff = InterpolateTsyseff(tsyseff, channelFreqs0)
# Calculate theoretical natural rms
rmsAll = np.array(rmsAll)
rmsAll = 1. / np.sqrt((1. / rmsAll**2).sum(axis=0))
unflaggedIntegration = (interval0 * (1 - flag0.astype(int))).sum(axis=(0, 2)) # total integration per channel adding up all UNFLAGGED integrations and polarisations (sec)
unflaggedIntegration[unflaggedIntegration == 0] = np.nan
rmsUnflagged = np.sqrt(2) * kB * tsyseff / Aant / np.sqrt(channelWidths0 * unflaggedIntegration)
if verbose >= 1:
caracal.log.info(' Natural noise ignoring flags: median = {0:.3e} Jy/beam, range = ({1:.3e} - {2:.3e}) Jy/beam'.format(np.nanmedian(rmsAll), np.nanmin(rmsAll), np.nanmax(rmsAll)))
if not (~np.isnan(unflaggedIntegration)).sum():
caracal.log.info('')
caracal.log.info(' Natural noise applying flags: N/A, all data are flagged!')
else:
caracal.log.info(' Natural noise applying flags: median = {0:.3e} Jy/beam, range = ({1:.3e} - {2:.3e}) Jy/beam'.format(np.nanmedian(rmsUnflagged), np.nanmin(rmsUnflagged), np.nanmax(rmsUnflagged)))
| 10,329 | 49.390244 | 212 | py |
caracal | caracal-master/caracal/dispatch_crew/utils.py | import ruamel.yaml
import numpy
import yaml
import caracal
import caracal.dispatch_crew.caltables as mkct
import re
import codecs
from caracal.utils.requires import extras
def angular_dist_pos_angle(ra1, dec1, ra2, dec2):
"""Computes the angular distance between the two points on a sphere, and
the position angle (North through East) of the direction from 1 to 2."""
# Knicked from ska-sa/tigger
ra = ra2 - ra1
sind0, sind, cosd0, cosd = numpy.sin(dec1), numpy.sin(
dec2), numpy.cos(dec1), numpy.cos(dec2)
sina, cosa = numpy.sin(ra) * cosd, numpy.cos(ra) * cosd
x = cosa * sind0 - sind * cosd0
y = sina
z = cosa * cosd0 + sind * sind0
PA = numpy.arctan2(y, -x)
R = numpy.arccos(z)
return R, PA
def categorize_fields(info):
if isinstance(info, str):
with open(info, 'r') as f:
info = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
names = info['FIELD']['NAME']
ids = info['FIELD']['SOURCE_ID']
intents = info['FIELD']['INTENTS']
intent_ids = info['FIELD']['STATE_ID']
mapping = {
'fcal': (['CALIBRATE_FLUX'], []),
'gcal': (['CALIBRATE_AMPL', 'CALIBRATE_PHASE'], []),
'bpcal': (['CALIBRATE_BANDPASS'], []),
'target': (['TARGET'], []),
'xcal': (['CALIBRATE_POLARIZATION'], [])
}
if intents:
for i, field in enumerate(names):
ints = intents[intent_ids[i]].split(',')
for intent in ints:
# for the intents with #, the string after the # does not look useful for us
# This can be reviewed if need be (Issue 1130)
intent = intent.split("#")[0]
for ftype in mapping:
if intent in mapping[ftype][0]:
mapping[ftype][-1].append(field)
return mapping
def get_field_id(info, field_name):
""" Gets field id """
if not isinstance(field_name, str) and not isinstance(field_name, list):
raise ValueError(
"field_name argument must be comma-separated string or list")
if isinstance(info, str):
with open(info, 'r') as f:
info = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
names = info['FIELD']['NAME']
ids = info['FIELD']['SOURCE_ID']
results = []
for fn in field_name.split(",") if isinstance(field_name, str) else field_name:
if fn not in names:
raise KeyError("Could not find field '{0:s}' in the field list {1:}".format(fn, names))
else:
results.append(names.index(fn))
return results
def select_gcal(info, targets, calibrators, mode='nearest'):
"""
Automatically select gain calibrator
"""
if isinstance(info, str):
with open(info, 'r') as f:
info = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
names = info['FIELD']['NAME']
ids = info['FIELD']['SOURCE_ID']
dirs = info['FIELD']['REFERENCE_DIR']
def index(field):
if isinstance(field, str):
idx = names.index(field)
elif isinstance(field, int):
idx = ids.index(field)
return idx
if mode == 'most_scans':
most_scans = 0
gcal = None
for fid in calibrators:
idx = index(fid)
field = str(ids(idx))
if most_scans < len(info['SCAN'][field]):
most_scans = len(info['SCAN'][field])
gcal = names[idx]
elif mode == 'nearest':
tras = []
tdecs = []
for target in targets:
idx = index(target)
tras.append(dirs[idx][0][0])
tdecs.append(dirs[idx][0][1])
mean_ra = numpy.mean(tras)
mean_dec = numpy.mean(tdecs)
nearest_dist = numpy.inf
gcal = None
for field in calibrators:
idx = index(field)
ra = dirs[idx][0][0]
dec = dirs[idx][0][1]
distance = angular_dist_pos_angle(mean_ra, mean_dec, ra, dec)[0]
if nearest_dist > distance:
nearest_dist = distance
gcal = names[idx]
return gcal
def observed_longest(info, bpcals):
"""
Automatically select bandpass calibrator
"""
if isinstance(info, str):
with open(info, 'r') as f:
info = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
names = info['FIELD']['NAME']
ids = info['FIELD']['SOURCE_ID']
dirs = info['FIELD']['REFERENCE_DIR']
def index(field):
if isinstance(field, str):
idx = names.index(field)
elif isinstance(field, int):
idx = ids.index(field)
return idx
most_time = 0
field = None
for bpcal in bpcals:
idx = index(bpcal)
bpcal = str(ids[idx])
total_time = numpy.sum(list(info['SCAN'][bpcal].values()))
if total_time > most_time:
most_time = total_time
field = names[idx]
return field
def field_observation_length(info, field):
if isinstance(info, str):
with open(info, 'r') as f:
info = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
names = info['FIELD']['NAME']
ids = info['FIELD']['SOURCE_ID']
def index(field):
if isinstance(field, str):
idx = names.index(field)
elif isinstance(field, int):
idx = ids.index(field)
else:
raise ValueError("Field cannot be a {0:s}".format(type(field)))
return idx
field = str(ids[index(field)])
return numpy.sum(list(info['SCAN'][field].values()))
def closeby(radec_1, radec_2, tol=2.9E-3):
"""
Rough estimate whether two points on celestial sphere are closeby
Parameters:
radec_1 (pair of float): Right ascension and Declination of point 1 in rad
radec_2 (pair of float): Right ascension and Declination of point 2 in rad
tol: Tolerance in rad (default: 10 arcmin)
"""
if numpy.power((radec_1[0] - radec_2[0]) * numpy.cos(
(radec_1[0] - radec_2[0]) / 2), 2) + numpy.power(radec_1[1] - radec_2[1], 2
) < numpy.power(tol, 2):
return True
return False
def hetfield(info, field, db, tol=2.9E-3):
"""
Find match of fields in info
Parameters:
info (dict): dictionary of obsinfo as read by yaml
field (str): field name
db (dict): calibrator data base as returned by
calibrator_database()
Go through all calibrators in db and return the first that matches
the coordinates of field in msinfo. Return empty string if not
found.
"""
# Get position of field in msinfo
ind = info['FIELD']['NAME'].index(field)
firade = info['FIELD']['DELAY_DIR'][ind][0]
firade[0] = numpy.mod(firade[0], 2 * numpy.pi)
dbcp = db.db
for key in dbcp.keys():
carade = [dbcp[key]['ra'], dbcp[key]['decl']]
if closeby(carade, firade, tol=tol):
return key
return False
def find_in_native_calibrators(info, field, mode='both'):
"""Check if field is in the South Calibrators database.
Return model if it is. Return lsm if an lsm is available.
Return a crystalball model if specified and available.
Otherwise, return False.
"""
if isinstance(info, str):
with open(info, 'r') as f:
info = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
returnsky = False
returnmod = False
returncrystal = False
if mode == 'both':
returnsky = True
returnmod = True
returncrystal = True
if mode == 'sky':
returnsky = True
if mode == 'mod':
returnmod = True
if mode == 'crystal':
returncrystal = True
db = mkct.calibrator_database()
fielddb = hetfield(info, field, db)
if not fielddb:
return False
ref = info['SPW']['REF_FREQUENCY'][0] # Centre frequency of first channel
bw = info['SPW']['TOTAL_BANDWIDTH'][0]
nchan = info['SPW']['NUM_CHAN'][0]
src = db.db[fielddb]
aghz = src["a_casa"]
bghz = src["b_casa"]
cghz = src["c_casa"]
dghz = src["d_casa"]
if "lsm" in src and returnsky:
return src["lsm"]
if "crystal" in src and returncrystal:
return src["crystal"]
elif returnmod:
return dict(I=src['S_v0'],
a=src['a_casa'],
b=src['b_casa'],
c=src['c_casa'],
d=src['d_casa'],
ref=src['v0'])
else:
return False
def find_in_casa_calibrators(info, field):
"""Check if field is in the CASA NRAO Calibrators database.
Return model if it is. Else, return False.
"""
if isinstance(info, str):
with open(info, 'r') as f:
info = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
with open(caracal.pckgdir + '/data/casa_calibrators.yml') as stdrb:
db = yaml.safe_load(stdrb)
dbc = mkct.casa_calibrator_database()
# Identify field with a standard name
field_dbc = hetfield(info, field, dbc)
if not field_dbc:
return False
for src in list(db['models'].values()):
if field_dbc == src['3C']:
standards = src['standards']
break
standard = standards.split(',')[0]
return db['standards'][int(standard)]
def meerkat_refant(obsinfo):
""" get reference antenna. Only works for MeerKAT observations downloaded through CARACal"""
with open(obsinfo) as stdr:
info = yaml.safe_load(stdr)
return info['RefAntenna']
@extras("astropy")
def estimate_solints(msinfo, skymodel, Tsys_eta, dish_diameter, npol, gain_tol=0.05, j=3, save=False):
import astropy.io.fits as fitsio
if isinstance(skymodel, str):
skymodel = [skymodel]
flux = 0
for name in skymodel:
with fitsio.open(name) as hdu:
model = hdu[1].data
# Get total flux from model
flux += model['Total_flux'].sum()
# Get number of antennas
with open(msinfo, 'r') as f:
info = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
nant = len(info['ANT']['NAME'])
# Get time and frequency resoltion of data
dtime = info['EXPOSURE']
bw = sum(info['SPW']['TOTAL_BANDWIDTH'])
nchans = sum(info['SPW']['NUM_CHAN'])
dfreq = bw / nchans
k_b = 1.38e-23 # Boltzman's constant
Jy = 1e-26 # 1 Jansky
# estimate noise needed for a gain error of 'gain_tol' using Sandeep Sirothia's Equation (priv comm).
visnoise = flux * numpy.sqrt(nant - j) * gain_tol
# calculate dt*df (solution intervals) needed to get that noise
effective_area = numpy.pi * (dish_diameter / 2.0)**2
dt_dfreq = (2 * k_b * Tsys_eta / (Jy * numpy.sqrt(npol)
* effective_area * visnoise))**2
# return/save dt*df and the time, frequency resolution of the data
if save:
with codecs.open(msinfo, 'w', 'utf8') as yw:
info['DTDF'] = dt_dfreq
yaml.dump(info, yw, default_flow_style=False)
return dt_dfreq, dtime, dfreq
def imaging_params(info, spwid=0):
if isinstance(info, str):
with open(info, 'r') as f:
info = ruamel.yaml.load(f, ruamel.yaml.RoundTripLoader)
maxbl = info['MAXBL']
dish_size = numpy.mean(info['ANTENNA']['DISH_DIAMETER'])
freq = info['SPW']["REF_FREQUENCY"][spwid]
wavelength = 2.998e8 / freq
FoV = numpy.rad2deg(1.22 * wavelength / dish_size)
max_res = numpy.rad2deg(wavelength / maxbl)
return max_res, FoV
def filter_name(string): # change field names into alphanumerical format for naming output files
string = string.replace('+', '_p_')
return re.sub('[^0-9a-zA-Z]', '_', string)
| 11,775 | 29.746736 | 105 | py |
caracal | caracal-master/caracal/dispatch_crew/config_parser.py | # -*- coding: future_fstrings -*-
from pykwalify.core import Core
import argparse
import yaml
import caracal
import os
import copy
import ruamel.yaml
from collections import OrderedDict
# shut this guy up
import logging
pykwalify_logger = logging.getLogger('pykwalify.core')
pykwalify_logger.propagate = False
pykwalify_logger.setLevel(logging.CRITICAL)
DEFAULT_CONFIG = caracal.DEFAULT_CONFIG
class ConfigErrors(RuntimeError):
def __init__(self, config_file, error_dict):
RuntimeError.__init__(self, "configuration file {} fails to validate".format(config_file))
self.config_file = config_file
self.errors = error_dict
def basic_parser(add_help=True):
"""Returns ArgumentParser for basic command-line options"""
parser = argparse.ArgumentParser(description="""
Welcome to CARACal (https://github.com/caracal-pipeline), a containerized data reduction pipeline for radio
interferometry.""",
usage="%(prog)s [-options] -c config_file",
epilog="""
You can override configuration file settings using additional "--worker-option value" arguments. Use
"-wh worker" to get help on a particular worker.
To get started, run e.g. "%(prog)s -gdt meerkat -gd config.yml" to make yourself an initial configuration file,
then edit the file to suit your needs.
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=add_help)
add = parser.add_argument
add("-v", "--version", action='version',
version='{0:s} version {1:s}'.format(parser.prog, caracal.__version__))
add('-c', '--config',
type=lambda a: is_valid_file(parser, a),
default=DEFAULT_CONFIG,
help='pipeline configuration file. This is a mandatory argument.')
add('-b', '--boring',
help='enable boring mode, i.e. suppress colours in console output',
action='store_true')
add('-sid', '--singularity-image-dir', metavar="DIR",
help='directory where stimela singularity images are stored')
add('-gdt', '--get-default-template',
choices=caracal.SAMPLE_CONFIGS.keys(),
default="minimal",
help='init a configuration file from a default template')
add('-gd', '--get-default', metavar="FILE",
help='name of file where the template should be saved (use in conjunction with -gdt)')
add('-sw', '--start-worker', metavar="WORKER",
help='start pipeline at this worker')
add('-ew', '--end-worker', metavar="WORKER",
help='stop pipeline after this worker')
add('-ct', '--container-tech', choices=["default", "docker", "udocker", "singularity", "podman"],
default="default",
help='Containerization backend to use. Default falls back on "general: backend" config setting, or docker if not set.')
add('-wh', '--worker-help', metavar="WORKER",
help='prints help for a particular worker, then exits')
add('-pcs', '--print-calibrator-standard',
help='prints list of auxiliary calibrator standards, then exits',
action='store_true')
add('-report',
help='(re)generates a final HTML report, if configured, then exits',
action='store_true')
add('-debug',
help='enable debugging mode',
action='store_true')
add('-nr', '--no-reports',
help='disable generation of HTML reports throughout the pipeline',
action='store_true')
# add('-rv', '--report-viewer', action='store_true',
# help='Start the interactive report viewer (requires X session with decent [ie. firefox] webbrowser installed).')
#
# add('--interactive-port', type=int, default=8888,
# help='Port on which to listen when an interactive mode is selected (e.g the configuration editor)')
# add("-la", '--log-append', help="Append to existing log-caracal.txt file instead of replacing it",
# action='store_true')
return parser
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file '%s' does not exist!" % arg)
return arg
class config_parser(object):
def __init__(self):
""" Configuration parser. Sets up command line interface for CARACal
"""
# =========================================================
# Handle the configuration file argument first,
# if one is supplied use that for defaulting arguments
# created further down the line, otherwise use the
# default configuration file
# =========================================================
# Create parser object
self._parser = basic_parser()
self._schemas = {}
def validate_config(self, config_file):
"""Validates configuration file.
Returns tuple of content, version, where content is validated config dict.
Else raises ConfigErrors.
"""
with open(config_file, 'r') as file:
try:
config_content = ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader, version=(1, 1))
except BaseException as exc:
raise ConfigErrors(config_file, {'at top level': [str(exc)]})
version = None
# Validate each worker section against the schema and
# parse schema to extract types and set up cmd argument parser
# self._parser = parser = cls.__primary_parser(add_help=True)
validated_content = OrderedDict()
errors = OrderedDict()
for worker, variables in config_content.items():
# schema_version specifies config version
if worker == "schema_version":
version = variables
continue
_worker = worker.split("__")[0]
if worker in self._schemas:
schema_fn, _ = self._schemas[worker]
elif _worker in self._schemas:
schema_fn, _ = self._schemas[worker] = self._schemas[_worker]
else:
schema_fn = os.path.join(caracal.pckgdir, "schema", "{0:s}_schema.yml".format(_worker))
if _worker == "worker" or not os.path.exists(schema_fn):
errors[worker] = ["this is not a recognized worker name, or its schema file is missing"]
continue
with open(schema_fn, 'r') as file:
full_schema = ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader, version=(1, 1))
schema = full_schema["mapping"][_worker]
self._schemas[worker] = self._schemas[_worker] = schema_fn, schema
# validate worker config
core = Core(source_data={_worker: variables}, schema_files=[schema_fn])
validated_content[worker] = core.validate(raise_exception=False)[_worker]
# check for errors
if core.validation_errors:
errs = errors[worker] = []
for message in core.validation_errors:
# crude hack: we're already fooling the schema by using "flag" for the worker name
# when the name is e.g. "flag__2", so the message is misleading. Substitute the hack back.
message = message.replace("'/{}'".format(_worker), "'/{}'".format(worker))
errs.append(message)
if errors:
raise ConfigErrors(config_file, errors)
return validated_content, version
def populate_parser(self, config_content):
"""Takes config file content (as returned by validate_config), and
populates the parser with corresponding options"""
for worker, variables in config_content.items():
self._process_subparser_tree(variables, self._schemas[worker][1], base_section=worker)
def update_config_from_args(self, config_content, args):
""" Updates argument parser with values from config file """
options, remainder = self._parser.parse_known_args(args)
if len(remainder) > 0:
raise RuntimeError("The following arguments were not parsed: %s" ",".join(remainder))
config = OrderedDict()
for worker, variables in config_content.items():
config[worker] = self._process_subparser_tree(variables, self._schemas[worker][1],
base_section=worker,
options=options)
return options, config
def _process_subparser_tree(self, # class for storage
cfgVars, # config file variables
schema_section, # section of the schema
base_section="", # base of the tree-section of the schema
options=None): # if supplied, values of arguments will be propagated out into config
'''
This function recursively goes through the schema file, loaded as a nested orderedDict: subVars.
If the variable of the schema is a map, the function goes to the inner nest of the dictionary.
If options is None, the default values to run the pipeline, stored in the schema as seq, bool, str/numbers,
are mapped to arguments in self._parser.
If options is set, it must be a namespace returned by ArgumentParser.
The content of the config is overwritten by the specified options.
'''
def _empty(alist):
"recursive function checks if the elements in the array are empty (needed for the variables of the config file)"
if type(alist) not in (list, tuple, dict):
return False
for a in alist:
if not _empty(a):
return False
return True
groups = OrderedDict()
# make schema section loopable
sec_defaults = {k.replace('-', '_'): v for k, v in schema_section["mapping"].items()}
# loop over each key of the variables in the schema
# the key may contain a set of subkeys, being the schema a nested dictionary
for key, subVars in sec_defaults.items():
# store the total name of the key given the workerName(base_section) and key (which may be nested)
# This has '-; for separators
option_name = base_section + "-" + key if base_section != "" else key
# corresponding attribute name
attr_name = option_name.replace("-", "_")
# For subsection, recurse into the nested variable
if "mapping" in subVars:
if key in cfgVars: # check if enabled in config file
sub_vars = cfgVars[key]
else:
sub_vars = {key: {} for key in cfgVars.keys()}
# recurse with the set of variables of the nest
groups[key] = self._process_subparser_tree(sub_vars, subVars, base_section=option_name, options=options)
continue
# True if variable is a list
is_list = "seq" in subVars
# type of variable (of list element, if dealing with lists)
dtype = None
def typecast(val):
"""Helper function to cast value to expected type.
If string=True, bools are cast to string bools, so value is suitable for command-line parsing"""
if is_list and isinstance(val, list):
return [typecast(x) for x in val]
if dtype is any: # "any" type is uncast
return val
if dtype is bool and isinstance(val, str):
return val.lower() in {"true", "yes", "1"}
return dtype(val)
def value2str(val):
"""Converts value to string representation. Bools get special lowercase treatment."""
return str(bool(val)).lower() if dtype is bool else str(val)
def str2list(val):
"""Converts lists in string representation, e.g. "[a, b]" and "a,b", to lists of strings"""
return list(val.lstrip("[").rstrip("]").replace(", ", ",").split(","))
# update default if set in user config
default_value = None
# NB: not sure why the check for _empty() is needed, some archaic carryover
if not _empty(list(cfgVars.values())):
default_value = cfgVars.get(key)
# for sequences, do some type fiddling
if is_list:
dtype = __builtins__[subVars['seq'][0]['type']]
if dtype is map:
dtype = dict
if default_value is None:
if dtype is dict:
default_value = []
else:
default_value = subVars["example"]
if isinstance(default_value, str):
default_value = str2list(default_value)
if not isinstance(default_value, list):
raise TypeError(f"{option_name} default value is not configured correctly. This is a bug, please report!")
else:
# for int, float, bool, str
dtype = __builtins__[subVars['type']]
if default_value is None:
default_value = subVars["example"]
# convert default value to expected type
groups[key] = default_value = typecast(default_value)
# if an options object is passed in, look if its value overrides our setting
if options is not None:
if hasattr(options, attr_name):
optval = getattr(options, attr_name)
# optval is always a string, so...
# ...parse lists or dicts as yaml objects
if isinstance(optval, str) and (is_list or dtype is dict):
optval = yaml.safe_load(optval)
# ...and typecast to expected type
option_value = typecast(optval)
if option_value != default_value:
caracal.log.info(" command line sets --{} = {}".format(option_name, option_value))
groups[key] = option_value
# else populate parser with default value
else:
# lists and dicts expressed via yaml, except the any-type lists
if (is_list and dtype is not any) or dtype is dict:
self._parser.add_argument("--" + option_name, help=argparse.SUPPRESS, type=str,
default=yaml.safe_dump(default_value))
# booleans have a choice
elif dtype is bool:
self._parser.add_argument("--" + option_name, help=argparse.SUPPRESS,
choices="true yes 1 false no 0".split(),
default=value2str(bool(default_value)))
# all others passed with native dtype
else:
self._parser.add_argument("--" + option_name, help=argparse.SUPPRESS,
type=dtype, default=default_value)
return groups
def save_options(self, config, filename):
""" Save configuration options to yaml """
dictovals = copy.deepcopy(config)
with open(filename, 'w') as f:
f.write(yaml.dump(dictovals, Dumper=ruamel.yaml.RoundTripDumper))
def log_options(self, config):
""" Prints argument tree to the logger for posterity to behold """
# caracal.log.info(
# "".join(["".ljust(25, "#"), " PIPELINE CONFIGURATION ", "".ljust(25, "#")]))
indent0 = " "
def _tree_print(branch, indent=indent0):
dicts = OrderedDict(
[(k, v) for k, v in branch.items() if isinstance(v, dict)])
other = OrderedDict(
[(k, v) for k, v in branch.items() if not isinstance(v, dict)])
def _printval(k, v):
if isinstance(v, dict):
if not v.get("enable", True):
return
if indent == indent0:
caracal.log.info("")
extra = dict(color="GREEN")
else:
extra = {}
# (indent == "\t") and caracal.log.info(
# indent.ljust(60, "#"))
caracal.log.info(f"{indent}{k}:", extra=extra)
# (indent == "\t") and caracal.log.info(
# indent.ljust(60, "#"))
# (indent != "\t") and caracal.log.info(
# indent.ljust(60, "-"))
_tree_print(v, indent=indent + indent0)
else:
# totally ugly -- I promise I'll fix it when we have a better qualifier
if k == "cabs" and not v:
return
if type(v) in (list, tuple):
vstr = ', '.join([str(x) or '""' for x in v])
if len(v) < 2:
vstr = f"[{vstr}]"
else:
vstr = str(v) or '""'
k += ":"
caracal.log.info(f"{indent}{k:30}{vstr}")
for k, v in other.items():
_printval(k, v)
for k, v in dicts.items():
_printval(k, v)
ordered_groups = OrderedDict(sorted(list(config.items()),
key=lambda p: p[1].get("order", 0)))
_tree_print(ordered_groups)
# caracal.log.info(
# "".join(["".ljust(25, "#"), " END OF CONFIGURATION ", "".ljust(25, "#")]))
| 17,997 | 43.330049 | 134 | py |
caracal | caracal-master/caracal/dispatch_crew/__init__.py | 0 | 0 | 0 | py | |
caracal | caracal-master/caracal/dispatch_crew/catalog_parser.py | import re
import numpy as np
import copy
from caracal.utils.requires import extras
class catalog_parser:
def __init__(self, filename):
"""
The all-knowning catalog class
Give me a filename and I shall pass on divine knowledge
"""
cls = self.__class__
self._cat = cls.read_caltable(filename)
# Comment by Josh: @property is a means to protect private
# Variables
# if a is an instance of catalog_parser, then the expression
# a.db is transformed into a.db(). So it looks like a class
# variable but it isn't. Something nasty like
# a.db = XXX is then impossible.
@property
def db(self):
""" Returns a copy of divine sky knowledge """
return copy.deepcopy(self._cat)
def __str__(self):
""" Return multiline string describing the calibrator database """
lines = [""]
lines.extend(["\t%s\tEpoch:%d\tRA:%3.2f\tDEC:%3.2f\t"
"S_v0:%.4f\tv0:%.4e\ta:%.4f\tb:%.4f\tc:%.4f\td:%.4f\t"
"lsm:%s\tlsm epoch:%d" %
(str(name).ljust(15),
db["epoch"],
db["ra"],
db["decl"],
db["S_v0"],
db["v0"],
db["a_casa"],
db["b_casa"],
db["c_casa"],
db["d_casa"],
db.get("lsm", "<none>").ljust(30),
db.get("lsm_epoch", db["epoch"]))
for name, db in self._cat.items()])
return '\n'.join(lines)
@classmethod
def read_caltable(cls, filename):
"""
Read calibrator database (specified in MHz)
and returns a dictionary containing the following
:filename: filename of caltable database
:returns: for every source (name = key):
Epoch, RA, Declination,
a_ghz, b_ghz, c_ghz, d_ghz,
a_mhz, b_mhz, c_mhz, d_mhz,
S_v0, a_casa, b_casa, c_casa, d_casa, v0
:side-effects: none
"""
calibrator_db = {}
with open(filename) as f:
line = f.readline()
ln_no = 1
while line:
# discard comments
command = line.split("//")[0]
# empty line ?
if command.strip() == "":
line = f.readline()
ln_no += 1
continue
cmd = None
# source ?
valset = re.match(r"^name=(?P<name>[0-9A-Za-z\-+_ ]+)[ ]+"
r"epoch=(?P<epoch>[0-9]+)[ ]+"
r"ra=(?P<ra>[+\-]?[0-9]+h[0-9]+m[0-9]+(?:.[0-9]+)?s)[ ]+"
r"dec=(?P<decl>[+\-]?[0-9]+d[0-9]+m[0-9]+(?:.[0-9]+)?s)[ ]+"
r"a=(?P<a>[+\-]?[0-9]+(?:.[0-9]+)?)[ ]+"
r"b=(?P<b>[+\-]?[0-9]+(?:.[0-9]+)?)[ ]+"
r"c=(?P<c>[+\-]?[0-9]+(?:.[0-9]+)?)[ ]+"
r"d=(?P<d>[+\-]?[0-9]+(?:.[0-9]+)?)$",
command)
# else alias ?
if not valset:
valset = re.match(r"^alias src=(?P<src>[0-9A-Za-z\-+_ ]+)[ ]+"
r"dest=(?P<dest>[0-9A-Za-z\-+_ ]+)$",
command)
# else lsm?
if not valset:
valset = re.match(r"^lsm name=(?P<src>[0-9A-Za-z\-+_ ]+)[ ]+"
r"epoch=(?P<epoch>[0-9]+)[ ]+"
r"(?P<lsmname>[0-9a-zA-Z\-.]+)$",
command)
if not valset:
valset = re.match(r"^crystal name=(?P<src>[0-9A-Za-z\-+_ ]+)[ ]+"
r"epoch=(?P<epoch>[0-9]+)[ ]+"
r"(?P<lsmname>[0-9a-zA-Z\-.]+)$",
command)
if not valset:
raise RuntimeError("Illegal line encountered while parsing"
"southern standard at line %d:'%s'" %
(ln_no, line))
else:
cmd = "crystal"
else:
cmd = "lsm"
else:
cmd = "alias"
else:
cmd = "add"
if cmd == "add":
# parse sources (spectra in MHz)
name = valset.group("name").strip()
epoch = int(valset.group("epoch"))
ra = valset.group("ra")
valset_ra = re.match(r"^(?P<h>[+\-]?[0-9]+)h"
r"(?P<m>[0-9]+)m"
r"(?P<s>[0-9]+(?:.[0-9]+)?)s$",
ra)
ra = np.deg2rad((float(valset_ra.group("h")) +
float(valset_ra.group("m")) / 60.0 +
float(valset_ra.group("s")) / 3600) / 24.0 * 360)
decl = valset.group("decl")
valset_decl = re.match(r"^(?P<d>[+\-]?[0-9]+)d"
r"(?P<m>[0-9]+)m"
r"(?P<s>[0-9]+(?:.[0-9]+)?)s$",
decl)
signum = 1.
if decl[0] == '-':
signum = -1.
decl = np.deg2rad(float(valset_decl.group("d")) +
signum * float(valset_decl.group("m")) / 60. +
signum * float(valset_decl.group("s")) / 3600.)
a = float(valset.group("a"))
b = float(valset.group("b"))
c = float(valset.group("c"))
d = float(valset.group("d"))
# convert models to Perley Butler GHz format
k = np.log10(1000)
ag = a + (b * k) + (c * k ** 2) + (d * k ** 3)
bg = b + (2 * c * k) + (3 * d * k ** 2)
cg = c + (3 * d * k)
dg = d
# convert model components to CASA/MT format
s_v0, a_casa, b_casa, c_casa, d_casa = cls.convert_pb_to_casaspi(0.8, 1.8, 1.4,
ag, bg, cg, dg)
calibrator_db[name] = {"epoch": epoch, "ra": ra, "decl": decl,
"a_ghz": ag, "b_ghz": bg, "c_ghz": cg, "d_ghz": dg,
"a_mhz": a, "b_mhz": b, "c_mhz": c, "d_mhz": d,
"S_v0": s_v0,
"a_casa": a_casa, "b_casa": b_casa,
"c_casa": c_casa, "d_casa": d_casa,
"v0": 1.4e9}
elif cmd == "alias":
src = valset.group("src")
dest = valset.group("dest")
if src not in calibrator_db:
raise RuntimeError("%s has not been defined. Cannot alias "
"%s to %s in line %d" %
(src, dest, src, ln_no))
calibrator_db[dest] = calibrator_db[src]
elif cmd == "lsm":
src = valset.group("src")
epoch = valset.group("epoch")
lsm = valset.group("lsmname")
if src not in calibrator_db:
raise RuntimeError("%s has not been defined. Cannot link lsm "
"%s to %s in line %d" %
(src, lsm, ln_no))
calibrator_db[name]["lsm"] = lsm
calibrator_db[name]["lsm_epoch"] = int(epoch)
elif cmd == "crystal":
src = valset.group("src")
epoch = valset.group("epoch")
crystal = valset.group("lsmname")
if src not in calibrator_db:
raise RuntimeError("%s has not been defined. Cannot link to crystalball model"
"%s to %s in line %d" %
(src, crystal, ln_no))
calibrator_db[name]["crystal"] = crystal
calibrator_db[name]["lsm_epoch"] = int(epoch)
else:
raise RuntimeError(
"Invalid command processed. This is a bug")
# finally parse next line
line = f.readline()
ln_no += 1
return calibrator_db
@classmethod
def convert_pb_to_casaspi(cls, vlower, vupper, v0, a, b, c, d):
"""
Coverts between the different conventions:
PB: 10 ** [a + b * log10(v) + c * log10(v) ** 2 + d * log10(v) ** 3]
CASA/Meqtrees SPI: S(v0) * (v/v0) ** [a' + b'*log10(v/v0) + c'*log10(v/v0) ** 2 + d'*log10(v/v0) ** 3]
args:
:vlower, vupper: range (same unit as a, b, c, d coefficients!) to fit for a',b',c',d'
:v0: reference frequency (same unit as vlower, vupper!)
:a,b,c,d: PB coefficients (for the unit used in vlower, vupper and v0!)
side-effects: none
"""
if vlower > vupper:
raise ValueError("vlower must be lower than vupper")
def pbspi(v, a, b, c, d):
return 10 ** (a + b * np.log10(v) + c * np.log10(v) ** 2 + d * np.log10(v) ** 3)
def casaspi(v, v0, I, a, b, c, d):
return I * (v / v0) ** (a + b * np.log10(v / v0) + c * np.log10(v / v0) ** 2 + d * np.log10(v / v0) ** 3)
I = pbspi(v0, a, b, c, d)
if a == 0 and b == 0 and c == 0 and d == 0:
popt = [0., 0., 0., 0.]
else:
# Wrap in useless function to hide scipy
# Importing midway is non-kosher, but what you gonna do ¯\_('_')_/¯
@extras("scipy.optimize")
def needs_curve_fit():
from scipy.optimize import curve_fit
v = np.linspace(vlower, vupper, 10000)
popt, pcov = curve_fit(lambda v, a, b, c, d: casaspi(
v, v0, I, a, b, c, d), v, pbspi(v, a, b, c, d))
return popt, pcov
popt, pcov = needs_curve_fit()
perr = np.sqrt(np.diag(pcov))
assert np.all(perr < 1.0e-6)
# returns (S(v0), a', b', c', d')
return I, popt[0], popt[1], popt[2], popt[3]
| 11,235 | 45.04918 | 117 | py |
caracal | caracal-master/caracal/dispatch_crew/interruptable_process.py | from multiprocessing import Process
import os
import signal
class interruptable_process(Process):
def __init__(self, target):
"""
Interruptable process
Args:
@target: method to execute in separate process
"""
self.__pid = 0
def __run(target_proc):
self.__pid = os.getpid()
target_proc()
Process.__init__(self, target=lambda: __run(target))
def interrupt(self):
try:
os.kill(self.__pid, signal.SIGINT)
except KeyboardInterrupt:
pass # do not pass onto parent process
| 608 | 22.423077 | 60 | py |
CP2 | CP2-main/main.py | import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from mmcv.utils import Config
import loader
import builder
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler('./log_cp2.txt')
handler.setLevel(level=logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
parser = argparse.ArgumentParser(description='Copy-Paste Contrastive Pretraining on ImageNet')
parser.add_argument('--config', help='path to configuration file')
parser.add_argument('--data', metavar='DIR', help='path to dataset')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--num-images', default=1281167, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='total batch size over all GPUs')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
parser.add_argument('--optim', default='sgd', help='optimizer')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--scalar-freq', default=100, type=int,
help='metrics writing frequency')
parser.add_argument('--ckpt-freq', default=1, type=int,
help='checkpoint saving frequency')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:10001', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multiple GPUs by default')
parser.set_defaults(multiprocessing_distributed=True)
parser.add_argument('--output-stride', default=16, type=int,
help='output stride of encoder')
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = ngpus_per_node * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
cfg = Config.fromfile(args.config)
data_dir = args.data
args.gpu = gpu
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
model = builder.CP2_MOCO(cfg)
print(model)
if args.distributed:
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
raise NotImplementedError("Only DistributedDataParallel is supported.")
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
if args.optim == 'adamw':
optimizer = torch.optim.AdamW(model.parameters(), args.lr,
weight_decay=0.01)
elif args.optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise NotImplementedError("Only sgd and adamw optimizers are supported.")
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(data_dir, 'train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
augmentation = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([loader.GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
# simply use RandomErasing for Copy-Paste implementation:
# erase a random block of background image and replace the erased positions by foreground
augmentation_bg = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([loader.GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
transforms.RandomErasing(p=1., scale=(0.5, 0.8), ratio=(0.8, 1.25), value=0.)
]
train_dataset = datasets.ImageFolder(
traindir,
loader.TwoCropsTransform(transforms.Compose(augmentation)))
train_dataset_bg = datasets.ImageFolder(
traindir,
transforms.Compose(augmentation_bg))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, seed=0)
train_sampler_bg0 = torch.utils.data.distributed.DistributedSampler(train_dataset_bg, seed=1024)
train_sampler_bg1 = torch.utils.data.distributed.DistributedSampler(train_dataset_bg, seed=2048)
else:
train_sampler = None
train_sampler_bg0 = None
train_sampler_bg1 = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
train_loader_bg0 = torch.utils.data.DataLoader(
train_dataset_bg, batch_size=args.batch_size, shuffle=(train_sampler_bg0 is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler_bg0, drop_last=True)
train_loader_bg1 = torch.utils.data.DataLoader(
train_dataset_bg, batch_size=args.batch_size, shuffle=(train_sampler_bg1 is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler_bg1, drop_last=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_sampler_bg0.set_epoch(epoch)
train_sampler_bg1.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train([train_loader, train_loader_bg0, train_loader_bg1], model, criterion, optimizer, epoch, args)
if epoch % args.ckpt_freq == args.ckpt_freq - 1:
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}, is_best=False, filename='checkpoint_{:04d}.pth.tar'.format(epoch))
def train(train_loader_list, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
# data_time = AverageMeter('Data', ':6.3f')
loss_i = AverageMeter('Loss_ins', ':.4f')
loss_d = AverageMeter('Loss_den', ':.4f')
acc_ins = AverageMeter('Acc_ins', ':6.2f')
acc_seg = AverageMeter('Acc_seg', ':6.2f')
train_loader, train_loader_bg0, train_loader_bg1 = train_loader_list
progress = ProgressMeter(
len(train_loader),
[batch_time, loss_i, loss_d, acc_ins, acc_seg],
prefix="Epoch: [{}]".format(epoch))
# cre_dense = nn.LogSoftmax(dim=1)
model.train()
end = time.time()
for i, ((images, _), (bg0, _), (bg1, _)) in enumerate(zip(train_loader, train_loader_bg0, train_loader_bg1)):
# data_time.update(time.time() - end)
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
bg0 = bg0.cuda(args.gpu, non_blocking=True)
bg1 = bg1.cuda(args.gpu, non_blocking=True)
# mask_q = mask_q.cuda(args.gpu, non_blocking=True)
# mask_k = mask_k.cuda(args.gpu, non_blocking=True)
mask_q, mask_k = (bg0[:, 0] == 0).float(), (bg1[:, 0] == 0).float()
image_q = images[0] * mask_q.unsqueeze(1) + bg0
image_k = images[1] * mask_k.unsqueeze(1) + bg1
# compute output
stride = args.output_stride
output_instance, output_dense, target_instance, target_dense, mask_dense = model(
image_q, image_k,
mask_q[:, stride//2::stride, stride//2::stride],
mask_k[:, stride//2::stride, stride//2::stride])
loss_instance = criterion(output_instance, target_instance)
# dense loss of softmax
output_dense_log = (-1.) * nn.LogSoftmax(dim=1)(output_dense)
output_dense_log = output_dense_log.reshape(output_dense_log.shape[0], -1)
loss_dense = torch.mean(
torch.mul(output_dense_log, target_dense).sum(dim=1) / target_dense.sum(dim=1))
loss = loss_instance + loss_dense * .2
acc1, acc5 = accuracy(output_instance, target_instance, topk=(1, 5))
acc_dense_pos = output_dense.reshape(output_dense.shape[0], -1).argmax(dim=1)
acc_dense = target_dense[torch.arange(0, target_dense.shape[0]), acc_dense_pos].float().mean() * 100.
loss_i.update(loss_instance.item(), images[0].size(0))
loss_d.update(loss_dense.item(), images[0].size(0))
acc_ins.update(acc1[0], images[0].size(0))
acc_seg.update(acc_dense.item(), images[0].size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print(' '.join(entries))
if torch.distributed.get_rank() == 0:
logger.info('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
lr = args.lr
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 16,052 | 39.537879 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.