sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def filenames_to_uniq(names,new_delim='.'):
'''
Given a set of file names, produce a list of names consisting of the
uniq parts of the names. This works from the end of the name. Chunks of
the name are split on '.' and '-'.
For example:
A.foo.bar.txt
B.foo.bar.txt
returns: ['A','B']
AA.BB.foo.txt
CC.foo.txt
returns: ['AA.BB','CC']
'''
name_words = []
maxlen = 0
for name in names:
name_words.append(name.replace('.',' ').replace('-',' ').strip().split())
name_words[-1].reverse()
if len(name_words[-1]) > maxlen:
maxlen = len(name_words[-1])
common = [False,] * maxlen
for i in range(maxlen):
last = None
same = True
for nameword in name_words:
if i >= len(nameword):
same = False
break
if not last:
last = nameword[i]
elif nameword[i] != last:
same = False
break
common[i] = same
newnames = []
for nameword in name_words:
nn = []
for (i, val) in enumerate(common):
if not val and i < len(nameword):
nn.append(nameword[i])
nn.reverse()
newnames.append(new_delim.join(nn))
return newnames | Given a set of file names, produce a list of names consisting of the
uniq parts of the names. This works from the end of the name. Chunks of
the name are split on '.' and '-'.
For example:
A.foo.bar.txt
B.foo.bar.txt
returns: ['A','B']
AA.BB.foo.txt
CC.foo.txt
returns: ['AA.BB','CC'] | entailment |
def run_process(self, analysis, action_name, message='__nomessagetoken__'):
"""Executes an process in the analysis with the given message.
It also handles the start and stop signals in case a process_id
is given.
"""
if action_name == 'connect':
analysis.on_connect(self.executable, self.zmq_publish)
while not analysis.zmq_handshake:
yield tornado.gen.sleep(0.1)
log.debug('sending action {}'.format(action_name))
analysis.zmq_send({'signal': action_name, 'load': message})
if action_name == 'disconnected':
# Give kernel time to process disconnected message.
yield tornado.gen.sleep(0.1)
analysis.on_disconnected() | Executes an process in the analysis with the given message.
It also handles the start and stop signals in case a process_id
is given. | entailment |
def check_git_unchanged(filename, yes=False):
"""Check git to avoid overwriting user changes."""
if check_staged(filename):
s = 'There are staged changes in {}, overwrite? [y/n] '.format(filename)
if yes or input(s) in ('y', 'yes'):
return
else:
raise RuntimeError('There are staged changes in '
'{}, aborting.'.format(filename))
if check_unstaged(filename):
s = 'There are unstaged changes in {}, overwrite? [y/n] '.format(filename)
if yes or input(s) in ('y', 'yes'):
return
else:
raise RuntimeError('There are unstaged changes in '
'{}, aborting.'.format(filename)) | Check git to avoid overwriting user changes. | entailment |
def check_staged(filename=None):
"""Check if there are 'changes to be committed' in the index."""
retcode, _, stdout = git['diff-index', '--quiet', '--cached', 'HEAD',
filename].run(retcode=None)
if retcode == 1:
return True
elif retcode == 0:
return False
else:
raise RuntimeError(stdout) | Check if there are 'changes to be committed' in the index. | entailment |
def run_process(analysis, action_name, message='__nomessagetoken__'):
"""Executes an action in the analysis with the given message.
It also handles the start and stop signals in the case that message
is a `dict` with a key ``__process_id``.
:param str action_name: Name of the action to trigger.
:param message: Message.
:param callback:
A callback function when done (e.g.
:meth:`~tornado.testing.AsyncTestCase.stop` in tests).
:rtype: tornado.concurrent.Future
"""
if analysis is None:
return
# detect process_id
process_id = None
if isinstance(message, dict) and '__process_id' in message:
process_id = message['__process_id']
del message['__process_id']
if process_id:
yield analysis.emit('__process',
{'id': process_id, 'status': 'start'})
fns = [
functools.partial(handler, analysis)
for handler in (analysis._action_handlers.get(action_name, []) +
analysis._action_handlers.get('*', []))
]
if fns:
args, kwargs = [], {}
# Check whether this is a list (positional arguments)
# or a dictionary (keyword arguments).
if isinstance(message, list):
args = message
elif isinstance(message, dict):
kwargs = message
elif message == '__nomessagetoken__':
pass
else:
args = [message]
for fn in fns:
log.debug('calling {}'.format(fn))
try:
yield tornado.gen.maybe_future(fn(*args, **kwargs))
except Exception as e:
yield analysis.emit('error', 'an Exception occured')
raise e
else:
yield analysis.emit('warn',
'no handler for {}'.format(action_name))
if process_id:
yield analysis.emit('__process',
{'id': process_id, 'status': 'end'}) | Executes an action in the analysis with the given message.
It also handles the start and stop signals in the case that message
is a `dict` with a key ``__process_id``.
:param str action_name: Name of the action to trigger.
:param message: Message.
:param callback:
A callback function when done (e.g.
:meth:`~tornado.testing.AsyncTestCase.stop` in tests).
:rtype: tornado.concurrent.Future | entailment |
def dl_file(url, dest, chunk_size=6553):
"""Download `url` to `dest`"""
import urllib3
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with dest.open('wb') as out:
while True:
data = r.read(chunk_size)
if data is None or len(data) == 0:
break
out.write(data)
r.release_conn() | Download `url` to `dest` | entailment |
def extract_lzma(path):
"""Extract an lzma file and return the temporary file name"""
tlfile = pathlib.Path(path)
# open lzma file
with tlfile.open("rb") as td:
data = lzma.decompress(td.read())
# write temporary tar file
fd, tmpname = tempfile.mkstemp(prefix="odt_ex_", suffix=".tar")
with open(fd, "wb") as fo:
fo.write(data)
return tmpname | Extract an lzma file and return the temporary file name | entailment |
def get_file(fname, datapath=datapath):
"""Return path of an example data file
Return the full path to an example data file name.
If the file does not exist in the `datapath` directory,
tries to download it from the ODTbrain GitHub repository.
"""
# download location
datapath = pathlib.Path(datapath)
datapath.mkdir(parents=True, exist_ok=True)
dlfile = datapath / fname
if not dlfile.exists():
print("Attempting to download file {} from {} to {}.".
format(fname, webloc, datapath))
try:
dl_file(url=webloc+fname, dest=dlfile)
except BaseException:
warnings.warn("Download failed: {}".format(fname))
raise
return dlfile | Return path of an example data file
Return the full path to an example data file name.
If the file does not exist in the `datapath` directory,
tries to download it from the ODTbrain GitHub repository. | entailment |
def load_data(fname, **kwargs):
"""Load example data"""
fname = get_file(fname)
if fname.suffix == ".lzma":
return load_tar_lzma_data(fname)
elif fname.suffix == ".zip":
return load_zip_data(fname, **kwargs) | Load example data | entailment |
def load_tar_lzma_data(tlfile):
"""Load example sinogram data from a .tar.lzma file"""
tmpname = extract_lzma(tlfile)
# open tar file
fields_real = []
fields_imag = []
phantom = []
parms = {}
with tarfile.open(tmpname, "r") as t:
members = t.getmembers()
members.sort(key=lambda x: x.name)
for m in members:
n = m.name
f = t.extractfile(m)
if n.startswith("fdtd_info"):
for ln in f.readlines():
ln = ln.decode()
if ln.count("=") == 1:
key, val = ln.split("=")
parms[key.strip()] = float(val.strip())
elif n.startswith("phantom"):
phantom.append(np.loadtxt(f))
elif n.startswith("field"):
if n.endswith("imag.txt"):
fields_imag.append(np.loadtxt(f))
elif n.endswith("real.txt"):
fields_real.append(np.loadtxt(f))
try:
os.remove(tmpname)
except OSError:
pass
phantom = np.array(phantom)
sino = np.array(fields_real) + 1j * np.array(fields_imag)
angles = np.linspace(0, 2 * np.pi, sino.shape[0], endpoint=False)
return sino, angles, phantom, parms | Load example sinogram data from a .tar.lzma file | entailment |
def load_zip_data(zipname, f_sino_real, f_sino_imag,
f_angles=None, f_phantom=None, f_info=None):
"""Load example sinogram data from a .zip file"""
ret = []
with zipfile.ZipFile(str(zipname)) as arc:
sino_real = np.loadtxt(arc.open(f_sino_real))
sino_imag = np.loadtxt(arc.open(f_sino_imag))
sino = sino_real + 1j * sino_imag
ret.append(sino)
if f_angles:
angles = np.loadtxt(arc.open(f_angles))
ret.append(angles)
if f_phantom:
phantom = np.loadtxt(arc.open(f_phantom))
ret.append(phantom)
if f_info:
with arc.open(f_info) as info:
cfg = {}
for li in info.readlines():
li = li.decode()
if li.count("=") == 1:
key, val = li.split("=")
cfg[key.strip()] = float(val.strip())
ret.append(cfg)
return ret | Load example sinogram data from a .zip file | entailment |
def fourier_map_2d(uSin, angles, res, nm, lD=0, semi_coverage=False,
coords=None, count=None, max_count=None, verbose=0):
r"""2D Fourier mapping with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This function implements the solution by interpolation in
Fourier space.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
semi_coverage: bool
If set to `True`, it is assumed that the sinogram does not
necessarily cover the full angular range from 0 to 2π, but an
equidistant coverage over 2π can be achieved by inferring point
(anti)symmetry of the (imaginary) real parts of the Fourier
transform of f. Valid for any set of angles {X} that result in
a 2π coverage with the union set {X}U{X+π}.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
backpropagate_2d: implementation by backpropagation
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
The interpolation in Fourier space (which is done with
:func:`scipy.interpolate.griddata`) may be unstable and lead to
artifacts if the data to interpolate contains sharp spikes. This
issue is not handled at all by this method (in fact, a test has
been removed in version 0.2.6 because ``griddata`` gave different
results on Windows and Linux).
"""
##
##
# TODO:
# - zero-padding as for backpropagate_2D - However this is not
# necessary as Fourier interpolation is not parallelizable with
# multiprocessing and thus unattractive. Could be interesting for
# specific environments without the Python GIL.
# - Deal with oversampled data. Maybe issue a warning.
##
##
A = angles.shape[0]
if max_count is not None:
max_count.value += 4
# Check input data
assert len(uSin.shape) == 2, "Input data `uSin` must have shape (A,N)!"
assert len(uSin) == A, "`len(angles)` must be equal to `len(uSin)`!"
if coords is not None:
raise NotImplementedError("Output coordinates cannot yet be set"
+ "for the 2D backrpopagation algorithm.")
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Fourier transform of all uB's
# In the script we used the unitary angular frequency (uaf) Fourier
# Transform. The discrete Fourier transform is equivalent to the
# unitary ordinary frequency (uof) Fourier transform.
#
# uof: f₁(ξ) = int f(x) exp(-2πi xξ)
#
# uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx)
#
# f₁(ω/(2π)) = (2π)^(n/2) f₃(ω)
# ω = 2πξ
#
# Our Backpropagation Formula is with uaf convention of the Form
#
# F(k) = 1/sqrt(2π) U(kD)
#
# If we convert now to uof convention, we get
#
# F(k) = U(kD)
#
# This means that if we divide the Fourier transform of the input
# data by sqrt(2π) to convert f₃(ω) to f₁(ω/(2π)), the resulting
# value for F is off by a factor of 2π.
#
# Instead, we can just multiply *UB* by sqrt(2π) and calculate
# everything in uof.
# UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1))/np.sqrt(2*np.pi)
#
#
# Furthermore, we define
# a wave propagating to the right as:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1)) * np.sqrt(2 * np.pi)
# Corresponding sample frequencies
fx = np.fft.fftfreq(len(uSin[0])) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
if count is not None:
count.value += 1
# Undersampling/oversampling?
# Determine if the resolution of the image is too low by looking
# at the maximum value for kx. This is no comparison between
# Nyquist and Rayleigh frequency.
if verbose and np.max(kx**2) <= km**2:
# Detector is not set up properly. Higher resolution
# can be achieved.
print("......Measurement data is undersampled.")
else:
print("......Measurement data is oversampled.")
# raise NotImplementedError("Oversampled data not yet supported."+
# " Please rescale xD-axis of the input data.")
# DEAL WITH OVERSAMPLED DATA?
# lenk = len(kx)
# kx = np.fft.ifftshift(np.linspace(-np.sqrt(km),
# np.sqrt(km),
# len(fx), endpoint=False))
#
# F(kD-kₘs₀) = - i kₘ sqrt(2/π) / a₀ * M exp(-i kₘ M lD) * UB(kD)
# kₘM = sqrt( kₘ² - kx² )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# We create the 2D interpolation object F
# - We compute the real coordinates (krx,kry) = kD-kₘs₀
# - We set as grid points the right side of the equation
#
# The interpolated griddata may go up to sqrt(2)*kₘ for kx and ky.
kx = kx.reshape(1, -1)
# a0 should have same shape as kx and UB
# a0 = np.atleast_1d(a0)
# a0 = a0.reshape(1,-1)
filter_klp = (kx**2 < km**2)
M = 1. / km * np.sqrt(km**2 - kx**2)
# Fsin = -1j * km * np.sqrt(2/np.pi) / a0 * M * np.exp(-1j*km*M*lD)
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
Fsin = -1j * km * np.sqrt(2 / np.pi) * M * np.exp(-1j * km * (M-1) * lD)
# UB has same shape (len(angles), len(kx))
Fsin = Fsin * UB * filter_klp
ang = angles.reshape(-1, 1)
if semi_coverage:
Fsin = np.vstack((Fsin, np.conj(Fsin)))
ang = np.vstack((ang, ang + np.pi))
if count is not None:
count.value += 1
# Compute kxl and kyl (in rotated system ϕ₀)
kxl = kx
kyl = np.sqrt((km**2 - kx**2) * filter_klp) - km
# rotate kxl and kyl to where they belong
krx = np.cos(ang) * kxl + np.sin(ang) * kyl
kry = - np.sin(ang) * kxl + np.cos(ang) * kyl
Xf = krx.flatten()
Yf = kry.flatten()
Zf = Fsin.flatten()
# DEBUG: plot kry vs krx
# from matplotlib import pylab as plt
# plt.figure()
# for i in range(len(krx)):
# plt.plot(krx[i],kry[i],"x")
# plt.axes().set_aspect('equal')
# plt.show()
# interpolation on grid with same resolution as input data
kintp = np.fft.fftshift(kx.reshape(-1))
Fcomp = intp.griddata((Xf, Yf), Zf, (kintp[None, :], kintp[:, None]))
if count is not None:
count.value += 1
# removed nans
Fcomp[np.where(np.isnan(Fcomp))] = 0
# Filter data
kinx, kiny = np.meshgrid(np.fft.fftshift(kx), np.fft.fftshift(kx))
Fcomp[np.where((kinx**2 + kiny**2) > np.sqrt(2) * km)] = 0
# Fcomp[np.where(kinx**2+kiny**2<km)] = 0
# Fcomp is centered at K = 0 due to the way we chose kintp/coords
f = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(Fcomp)))
if count is not None:
count.value += 1
return f[::-1] | r"""2D Fourier mapping with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This function implements the solution by interpolation in
Fourier space.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
semi_coverage: bool
If set to `True`, it is assumed that the sinogram does not
necessarily cover the full angular range from 0 to 2π, but an
equidistant coverage over 2π can be achieved by inferring point
(anti)symmetry of the (imaginary) real parts of the Fourier
transform of f. Valid for any set of angles {X} that result in
a 2π coverage with the union set {X}U{X+π}.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
backpropagate_2d: implementation by backpropagation
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
The interpolation in Fourier space (which is done with
:func:`scipy.interpolate.griddata`) may be unstable and lead to
artifacts if the data to interpolate contains sharp spikes. This
issue is not handled at all by this method (in fact, a test has
been removed in version 0.2.6 because ``griddata`` gave different
results on Windows and Linux). | entailment |
def transform_to(ext):
"""
Decorator to create an output filename from an output filename with
the specified extension. Changes the extension, in_file is transformed
to a new type.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@transform(".bam")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.bam")
@transform(".bam")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_mkdir(out_dir)
out_name = replace_suffix(os.path.basename(in_path), ext)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor | Decorator to create an output filename from an output filename with
the specified extension. Changes the extension, in_file is transformed
to a new type.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@transform(".bam")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.bam")
@transform(".bam")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.bam") | entailment |
def filter_to(word):
"""
Decorator to create an output filename from an input filename by
adding a word onto the stem. in_file is filtered by the function
and the results are written to out_file. You would want to use
this over transform_to if you don't know the extension of the file
going in. This also memoizes the output file.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@filter_to(".foo")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.foo.bam")
@filter_to(".foo")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.foo.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_mkdir(out_dir)
out_name = append_stem(os.path.basename(in_path), word)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor | Decorator to create an output filename from an input filename by
adding a word onto the stem. in_file is filtered by the function
and the results are written to out_file. You would want to use
this over transform_to if you don't know the extension of the file
going in. This also memoizes the output file.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@filter_to(".foo")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.foo.bam")
@filter_to(".foo")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.foo.bam") | entailment |
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
safe_mkdir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir) | Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/ | entailment |
def file_uptodate(fname, cmp_fname):
"""Check if a file exists, is non-empty and is more recent than cmp_fname.
"""
try:
return (file_exists(fname) and file_exists(cmp_fname) and
getmtime(fname) >= getmtime(cmp_fname))
except OSError:
return False | Check if a file exists, is non-empty and is more recent than cmp_fname. | entailment |
def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
with open(fname, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason) | Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message. | entailment |
def symlink_plus(orig, new):
"""Create relative symlinks and handle associated biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and not os.path.lexists(new + ext):
with chdir(os.path.dirname(new)):
os.symlink(os.path.relpath(orig + ext), os.path.basename(new + ext))
orig_noext = splitext_plus(orig)[0]
new_noext = splitext_plus(new)[0]
for sub_ext in [".bai"]:
if os.path.exists(orig_noext + sub_ext) and not os.path.lexists(new_noext + sub_ext):
with chdir(os.path.dirname(new_noext)):
os.symlink(os.path.relpath(orig_noext + sub_ext), os.path.basename(new_noext + sub_ext)) | Create relative symlinks and handle associated biological index files. | entailment |
def partition(pred, iterable):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
try:
return itertools.ifilterfalse(pred, t1), itertools.ifilter(pred, t2)
except:
return itertools.filterfalse(pred, t1), filter(pred, t2) | Use a predicate to partition entries into false entries and true entries | entailment |
def get_in(d, t, default=None):
"""
look up if you can get a tuple of values from a nested dictionary,
each item in the tuple a deeper layer
example: get_in({1: {2: 3}}, (1, 2)) -> 3
example: get_in({1: {2: 3}}, (2, 3)) -> {}
"""
result = reduce(lambda d, t: d.get(t, {}), t, d)
if not result:
return default
else:
return result | look up if you can get a tuple of values from a nested dictionary,
each item in the tuple a deeper layer
example: get_in({1: {2: 3}}, (1, 2)) -> 3
example: get_in({1: {2: 3}}, (2, 3)) -> {} | entailment |
def which(program):
"""
returns the path to an executable or None if it can't be found
"""
def is_exe(_fpath):
return os.path.isfile(_fpath) and os.access(_fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None | returns the path to an executable or None if it can't be found | entailment |
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: # ~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:] | Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing. | entailment |
def splitext_plus(fname):
"""Split on file extensions, allowing for zipped extensions.
"""
base, ext = splitext(fname)
if ext in [".gz", ".bz2", ".zip"]:
base, ext2 = splitext(base)
ext = ext2 + ext
return base, ext | Split on file extensions, allowing for zipped extensions. | entailment |
def dots_to_empty_cells(config, tsv_fpath):
"""Put dots instead of empty cells in order to view TSV with column -t
"""
def proc_line(l, i):
while '\t\t' in l:
l = l.replace('\t\t', '\t.\t')
return l
return iterate_file(config, tsv_fpath, proc_line, suffix='dots') | Put dots instead of empty cells in order to view TSV with column -t | entailment |
def file_transaction(work_dir, *rollback_files):
"""Wrap file generation in a transaction, moving to output if finishes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", "vcf.gz": ".tbi"}
safe_fpaths, orig_names = _flatten_plus_safe(work_dir, rollback_files)
__remove_files(safe_fpaths) # remove any half-finished transactions
try:
if len(safe_fpaths) == 1:
yield safe_fpaths[0]
else:
yield tuple(safe_fpaths)
except: # failure -- delete any temporary files
__remove_files(safe_fpaths)
raise
else: # worked -- move the temporary files to permanent location
for safe, orig in zip(safe_fpaths, orig_names):
if exists(safe):
shutil.move(safe, orig)
for check_ext, check_idx in exts.items():
if safe.endswith(check_ext):
safe_idx = safe + check_idx
if exists(safe_idx):
shutil.move(safe_idx, orig + check_idx) | Wrap file generation in a transaction, moving to output if finishes. | entailment |
def tx_tmpdir(base_dir, rollback_dirpath):
"""Context manager to create and remove a transactional temporary directory.
"""
# tmp_dir_base = join(base_dir, 'tx', str(uuid.uuid4()))
# unique_attempts = 0
# while os.path.exists(tmp_dir_base):
# if unique_attempts > 5:
# break
# tmp_dir_base = join(base_dir, 'tx', str(uuid.uuid4()))
# time.sleep(1)
# unique_attempts += 1
# if base_dir is not None:
# tmp_dir_base = os.path.join(base_dir, "tx")
# else:
# tmp_dir_base = os.path.join(os.getcwd(), "tx")
if exists(rollback_dirpath):
critical(rollback_dirpath + ' already exists')
tmp_dir = tempfile.mkdtemp(dir=base_dir)
safe_mkdir(tmp_dir)
try:
yield tmp_dir
finally:
if tmp_dir and exists(tmp_dir):
os.rename(tmp_dir, rollback_dirpath) | Context manager to create and remove a transactional temporary directory. | entailment |
def _flatten_plus_safe(tmp_dir, rollback_files):
"""Flatten names of files and create temporary file names.
"""
tx_fpaths, orig_files = [], []
for fnames in rollback_files:
if isinstance(fnames, six.string_types):
fnames = [fnames]
for fname in fnames:
tx_file = fname + '.tx'
tx_fpath = join(tmp_dir, tx_file) if tmp_dir else tx_file
tx_fpaths.append(tx_fpath)
orig_files.append(fname)
return tx_fpaths, orig_files | Flatten names of files and create temporary file names. | entailment |
def merge_overlaps(work_dir, bed_fpath, distance=None):
"""Merge bed file intervals to avoid overlapping regions.
Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes
that don't collapse BEDs prior to using them.
"""
output_fpath = intermediate_fname(work_dir, bed_fpath, 'merged')
if isfile(output_fpath) and verify_file(output_fpath, cmp_f=bed_fpath):
return output_fpath
with file_transaction(work_dir, output_fpath) as tx:
kwargs = dict(d=distance) if distance else dict()
BedTool(bed_fpath).merge(**kwargs).saveas(tx)
return output_fpath | Merge bed file intervals to avoid overlapping regions.
Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes
that don't collapse BEDs prior to using them. | entailment |
def checkformat(self):
"""************************************************************************************************************************************************************
Task: checks the format of the bed file. The only requirements checked are that each line presents at least 3 tab separated columns, the
two on the right must present integer values indicating the start/end position respectively. Right value must be greater than the
left value.
Outputs:
err: string containing the detected error. Empty string in case of a correct format.
************************************************************************************************************************************************************"""
fd = open_gzipsafe(self.filename)
line = fd.readline()
while line.startswith('#'):
line = fd.readline()
fields = line.split('\t')
lc = 1
error = ''
# Checks that the two columns on the right contain integer values
try:
# Parses each line and checks that there are at least 3 fields, the two on the right containing integer values and being the right one
# greater than the left one
while line != '' and len(fields) > 2 and int(fields[1]) <= int(fields[2]):
lc += 1
line = fd.readline()
fields = line.split('\t')
except ValueError:
error += 'Incorrect start/end values at line ' + str(lc) + '\n'
error += 'Start/End coordinates must be indicated with integer values. The right value must be greater than the left value.\n'
error += 'Line found: ' + line
fd.close()
return error
# If it get to this point means that either the file ended or there is a line with less than 3 fields
if line != '':
error += 'Incorrect line format at line ' + str(lc) + '\n'
error += 'At least three columns are expected in each line\n'
error += 'The right value must be greater than the left value.\n'
error += 'Line found: ' + line
fd.close()
return error | ************************************************************************************************************************************************************
Task: checks the format of the bed file. The only requirements checked are that each line presents at least 3 tab separated columns, the
two on the right must present integer values indicating the start/end position respectively. Right value must be greater than the
left value.
Outputs:
err: string containing the detected error. Empty string in case of a correct format.
************************************************************************************************************************************************************ | entailment |
def trigger_all_callbacks(self, callbacks=None):
"""Trigger callbacks for all keys on all or a subset of subscribers.
:param Iterable callbacks: list of callbacks or none for all subscribed
:rtype: Iterable[tornado.concurrent.Future]
"""
return [ret
for key in self
for ret in self.trigger_callbacks(key, callbacks=None)] | Trigger callbacks for all keys on all or a subset of subscribers.
:param Iterable callbacks: list of callbacks or none for all subscribed
:rtype: Iterable[tornado.concurrent.Future] | entailment |
def set(self, key, value):
"""Set a value at key and return a Future.
:rtype: Iterable[tornado.concurrent.Future]
"""
value_encoded = encode(value)
if key in self.data and self.data[key] == value_encoded:
return []
self.data[key] = value_encoded
return self.trigger_callbacks(key) | Set a value at key and return a Future.
:rtype: Iterable[tornado.concurrent.Future] | entailment |
def set_state(self, updater=None, **kwargs):
"""Update the datastore.
:param func|dict updater: (state) => state_change or dict state_change
:rtype: Iterable[tornado.concurrent.Future]
"""
if callable(updater):
state_change = updater(self)
elif updater is not None:
state_change = updater
else:
state_change = kwargs
return [callback_result
for k, v in state_change.items()
for callback_result in self.set(k, v)] | Update the datastore.
:param func|dict updater: (state) => state_change or dict state_change
:rtype: Iterable[tornado.concurrent.Future] | entailment |
def init(self, key_value_pairs=None, **kwargs):
"""Initialize datastore.
Only sets values for keys that are not in the datastore already.
:param dict key_value_pairs:
A set of key value pairs to use to initialize the datastore.
:rtype: Iterable[tornado.concurrent.Future]
"""
if key_value_pairs is None:
key_value_pairs = kwargs
return [self.set(k, v)
for k, v in key_value_pairs.items()
if k not in self] | Initialize datastore.
Only sets values for keys that are not in the datastore already.
:param dict key_value_pairs:
A set of key value pairs to use to initialize the datastore.
:rtype: Iterable[tornado.concurrent.Future] | entailment |
def close(self):
"""Close and delete instance."""
# remove callbacks
Datastore.stores[self.domain].remove(self)
# delete data after the last instance is gone
if self.release_storage and not Datastore.stores[self.domain]:
del Datastore.global_data[self.domain]
del self | Close and delete instance. | entailment |
def run_multisample_qualimap(output_dir, work_dir, samples, targqc_full_report):
""" 1. Generates Qualimap2 plots and put into plots_dirpath
2. Adds records to targqc_full_report.plots
"""
plots_dirpath = join(output_dir, 'plots')
individual_report_fpaths = [s.qualimap_html_fpath for s in samples]
if isdir(plots_dirpath) and not any(
not can_reuse(join(plots_dirpath, f), individual_report_fpaths)
for f in listdir(plots_dirpath) if not f.startswith('.')):
debug('Qualimap miltisample plots exist - ' + plots_dirpath + ', reusing...')
else:
# Qualimap2 run for multi-sample plots
if len([s.qualimap_html_fpath for s in samples if s.qualimap_html_fpath]) > 0:
if find_executable() is not None: # and get_qualimap_type(find_executable()) == 'full':
qualimap_output_dir = join(work_dir, 'qualimap_multi_bamqc')
_correct_qualimap_genome_results(samples)
_correct_qualimap_insert_size_histogram(samples)
safe_mkdir(qualimap_output_dir)
rows = []
for sample in samples:
if sample.qualimap_html_fpath:
rows += [[sample.name, sample.qualimap_html_fpath]]
data_fpath = write_tsv_rows(([], rows), join(qualimap_output_dir, 'qualimap_results_by_sample.tsv'))
qualimap_plots_dirpath = join(qualimap_output_dir, 'images_multisampleBamQcReport')
cmdline = find_executable() + ' multi-bamqc --data {data_fpath} -outdir {qualimap_output_dir}'.format(**locals())
run(cmdline, env_vars=dict(DISPLAY=None),
checks=[lambda _1, _2: verify_dir(qualimap_output_dir)], reuse=cfg.reuse_intermediate)
if not verify_dir(qualimap_plots_dirpath):
warn('Warning: Qualimap for multi-sample analysis failed to finish. TargQC will not contain plots.')
return None
else:
if exists(plots_dirpath):
shutil.rmtree(plots_dirpath)
shutil.move(qualimap_plots_dirpath, plots_dirpath)
else:
warn('Warning: Qualimap for multi-sample analysis was not found. TargQC will not contain plots.')
return None
targqc_full_report.plots = []
for plot_fpath in listdir(plots_dirpath):
plot_fpath = join(plots_dirpath, plot_fpath)
if verify_file(plot_fpath) and plot_fpath.endswith('.png'):
targqc_full_report.plots.append(relpath(plot_fpath, output_dir)) | 1. Generates Qualimap2 plots and put into plots_dirpath
2. Adds records to targqc_full_report.plots | entailment |
def odt_to_ri(f, res, nm):
r"""Convert the ODT object function to refractive index
In :abbr:`ODT (Optical Diffraction Tomography)`, the object function
is defined by the Helmholtz equation
.. math::
f(\mathbf{r}) = k_\mathrm{m}^2 \left[
\left( \frac{n(\mathbf{r})}{n_\mathrm{m}} \right)^2 - 1
\right]
with :math:`k_\mathrm{m} = \frac{2\pi n_\mathrm{m}}{\lambda}`.
By inverting this equation, we obtain the refractive index
:math:`n(\mathbf{r})`.
.. math::
n(\mathbf{r}) = n_\mathrm{m}
\sqrt{\frac{f(\mathbf{r})}{k_\mathrm{m}^2} + 1 }
Parameters
----------
f: n-dimensional ndarray
The reconstructed object function :math:`f(\mathbf{r})`.
res: float
The size of the vacuum wave length :math:`\lambda` in pixels.
nm: float
The refractive index of the medium :math:`n_\mathrm{m}` that
surrounds the object in :math:`f(\mathbf{r})`.
Returns
-------
ri: n-dimensional ndarray
The complex refractive index :math:`n(\mathbf{r})`.
Notes
-----
Because this function computes the root of a complex number, there
are several solutions to the refractive index. Always the positive
(real) root of the refractive index is used.
"""
km = (2 * np.pi * nm) / res
ri = nm * np.sqrt(f / km**2 + 1)
# Always take the positive root as the refractive index.
# Because f can be imaginary, numpy cannot return the correct
# positive root of f. However, we know that *ri* must be postive and
# thus we take the absolute value of ri.
# This also is what happens in Slaneys
# diffract/Src/back.c in line 414.
negrootcoord = np.where(ri.real < 0)
ri[negrootcoord] *= -1
return ri | r"""Convert the ODT object function to refractive index
In :abbr:`ODT (Optical Diffraction Tomography)`, the object function
is defined by the Helmholtz equation
.. math::
f(\mathbf{r}) = k_\mathrm{m}^2 \left[
\left( \frac{n(\mathbf{r})}{n_\mathrm{m}} \right)^2 - 1
\right]
with :math:`k_\mathrm{m} = \frac{2\pi n_\mathrm{m}}{\lambda}`.
By inverting this equation, we obtain the refractive index
:math:`n(\mathbf{r})`.
.. math::
n(\mathbf{r}) = n_\mathrm{m}
\sqrt{\frac{f(\mathbf{r})}{k_\mathrm{m}^2} + 1 }
Parameters
----------
f: n-dimensional ndarray
The reconstructed object function :math:`f(\mathbf{r})`.
res: float
The size of the vacuum wave length :math:`\lambda` in pixels.
nm: float
The refractive index of the medium :math:`n_\mathrm{m}` that
surrounds the object in :math:`f(\mathbf{r})`.
Returns
-------
ri: n-dimensional ndarray
The complex refractive index :math:`n(\mathbf{r})`.
Notes
-----
Because this function computes the root of a complex number, there
are several solutions to the refractive index. Always the positive
(real) root of the refractive index is used. | entailment |
def opt_to_ri(f, res, nm):
r"""Convert the OPT object function to refractive index
In :abbr:`OPT (Optical Projection Tomography)`, the object function
is computed from the raw phase data. This method converts phase data
to refractive index data.
.. math::
n(\mathbf{r}) = n_\mathrm{m} +
\frac{f(\mathbf{r}) \cdot \lambda}{2 \pi}
Parameters
----------
f: n-dimensional ndarray
The reconstructed object function :math:`f(\mathbf{r})`.
res: float
The size of the vacuum wave length :math:`\lambda` in pixels.
nm: float
The refractive index of the medium :math:`n_\mathrm{m}` that
surrounds the object in :math:`f(\mathbf{r})`.
Returns
-------
ri: n-dimensional ndarray
The complex refractive index :math:`n(\mathbf{r})`.
Notes
-----
This function is not meant to be used with diffraction tomography
data. For ODT, use :py:func:`odt_to_ri` instead.
"""
ri = nm + f / (2 * np.pi) * res
return ri | r"""Convert the OPT object function to refractive index
In :abbr:`OPT (Optical Projection Tomography)`, the object function
is computed from the raw phase data. This method converts phase data
to refractive index data.
.. math::
n(\mathbf{r}) = n_\mathrm{m} +
\frac{f(\mathbf{r}) \cdot \lambda}{2 \pi}
Parameters
----------
f: n-dimensional ndarray
The reconstructed object function :math:`f(\mathbf{r})`.
res: float
The size of the vacuum wave length :math:`\lambda` in pixels.
nm: float
The refractive index of the medium :math:`n_\mathrm{m}` that
surrounds the object in :math:`f(\mathbf{r})`.
Returns
-------
ri: n-dimensional ndarray
The complex refractive index :math:`n(\mathbf{r})`.
Notes
-----
This function is not meant to be used with diffraction tomography
data. For ODT, use :py:func:`odt_to_ri` instead. | entailment |
def rasterize(vectorobject, reference, outname=None, burn_values=1, expressions=None, nodata=0, append=False):
"""
rasterize a vector object
Parameters
----------
vectorobject: Vector
the vector object to be rasterized
reference: Raster
a reference Raster object to retrieve geo information and extent from
outname: str or None
the name of the GeoTiff output file; if None, an in-memory object of type :class:`Raster` is returned and
parameter outname is ignored
burn_values: int or list
the values to be written to the raster file
expressions: list
SQL expressions to filter the vector object by attributes
nodata: int
the nodata value of the target raster file
append: bool
if the output file already exists, update this file with new rasterized values?
If True and the output file exists, parameters `reference` and `nodata` are ignored.
Returns
-------
Raster or None
if outname is `None`, a raster object pointing to an in-memory dataset else `None`
Example
-------
>>> from spatialist import Vector, Raster, rasterize
>>> vec = Vector('source.shp')
>>> ref = Raster('reference.tif')
>>> outname = 'target.tif'
>>> expressions = ['ATTRIBUTE=1', 'ATTRIBUTE=2']
>>> burn_values = [1, 2]
>>> rasterize(vec, reference, outname, burn_values, expressions)
"""
if expressions is None:
expressions = ['']
if isinstance(burn_values, (int, float)):
burn_values = [burn_values]
if len(expressions) != len(burn_values):
raise RuntimeError('expressions and burn_values of different length')
failed = []
for exp in expressions:
try:
vectorobject.layer.SetAttributeFilter(exp)
except RuntimeError:
failed.append(exp)
if len(failed) > 0:
raise RuntimeError('failed to set the following attribute filter(s): ["{}"]'.format('", '.join(failed)))
if append and outname is not None and os.path.isfile(outname):
target_ds = gdal.Open(outname, GA_Update)
else:
if not isinstance(reference, Raster):
raise RuntimeError("parameter 'reference' must be of type Raster")
if outname is not None:
target_ds = gdal.GetDriverByName('GTiff').Create(outname, reference.cols, reference.rows, 1, gdal.GDT_Byte)
else:
target_ds = gdal.GetDriverByName('MEM').Create('', reference.cols, reference.rows, 1, gdal.GDT_Byte)
target_ds.SetGeoTransform(reference.raster.GetGeoTransform())
target_ds.SetProjection(reference.raster.GetProjection())
band = target_ds.GetRasterBand(1)
band.SetNoDataValue(nodata)
band.FlushCache()
band = None
for expression, value in zip(expressions, burn_values):
vectorobject.layer.SetAttributeFilter(expression)
gdal.RasterizeLayer(target_ds, [1], vectorobject.layer, burn_values=[value])
vectorobject.layer.SetAttributeFilter('')
if outname is None:
return Raster(target_ds)
else:
target_ds = None | rasterize a vector object
Parameters
----------
vectorobject: Vector
the vector object to be rasterized
reference: Raster
a reference Raster object to retrieve geo information and extent from
outname: str or None
the name of the GeoTiff output file; if None, an in-memory object of type :class:`Raster` is returned and
parameter outname is ignored
burn_values: int or list
the values to be written to the raster file
expressions: list
SQL expressions to filter the vector object by attributes
nodata: int
the nodata value of the target raster file
append: bool
if the output file already exists, update this file with new rasterized values?
If True and the output file exists, parameters `reference` and `nodata` are ignored.
Returns
-------
Raster or None
if outname is `None`, a raster object pointing to an in-memory dataset else `None`
Example
-------
>>> from spatialist import Vector, Raster, rasterize
>>> vec = Vector('source.shp')
>>> ref = Raster('reference.tif')
>>> outname = 'target.tif'
>>> expressions = ['ATTRIBUTE=1', 'ATTRIBUTE=2']
>>> burn_values = [1, 2]
>>> rasterize(vec, reference, outname, burn_values, expressions) | entailment |
def reproject(rasterobject, reference, outname, targetres=None, resampling='bilinear', format='GTiff'):
"""
reproject a raster file
Parameters
----------
rasterobject: Raster or str
the raster image to be reprojected
reference: Raster, Vector, str, int or osr.SpatialReference
either a projection string or a spatial object with an attribute 'projection'
outname: str
the name of the output file
targetres: tuple
the output resolution in the target SRS; a two-entry tuple is required: (xres, yres)
resampling: str
the resampling algorithm to be used
format: str
the output file format
Returns
-------
"""
if isinstance(rasterobject, str):
rasterobject = Raster(rasterobject)
if not isinstance(rasterobject, Raster):
raise RuntimeError('rasterobject must be of type Raster or str')
if isinstance(reference, (Raster, Vector)):
projection = reference.projection
if targetres is not None:
xres, yres = targetres
elif hasattr(reference, 'res'):
xres, yres = reference.res
else:
raise RuntimeError('parameter targetres is missing and cannot be read from the reference')
elif isinstance(reference, (int, str, osr.SpatialReference)):
try:
projection = crsConvert(reference, 'proj4')
except TypeError:
raise RuntimeError('reference projection cannot be read')
if targetres is None:
raise RuntimeError('parameter targetres is missing and cannot be read from the reference')
else:
xres, yres = targetres
else:
raise TypeError('reference must be of type Raster, Vector, osr.SpatialReference, str or int')
options = {'format': format,
'resampleAlg': resampling,
'xRes': xres,
'yRes': yres,
'srcNodata': rasterobject.nodata,
'dstNodata': rasterobject.nodata,
'dstSRS': projection}
gdalwarp(rasterobject, outname, options) | reproject a raster file
Parameters
----------
rasterobject: Raster or str
the raster image to be reprojected
reference: Raster, Vector, str, int or osr.SpatialReference
either a projection string or a spatial object with an attribute 'projection'
outname: str
the name of the output file
targetres: tuple
the output resolution in the target SRS; a two-entry tuple is required: (xres, yres)
resampling: str
the resampling algorithm to be used
format: str
the output file format
Returns
------- | entailment |
def stack(srcfiles, dstfile, resampling, targetres, dstnodata, srcnodata=None, shapefile=None, layernames=None,
sortfun=None, separate=False, overwrite=False, compress=True, cores=4):
"""
function for mosaicking, resampling and stacking of multiple raster files into a 3D data cube
Parameters
----------
srcfiles: list
a list of file names or a list of lists; each sub-list is treated as a task to mosaic its containing files
dstfile: str
the destination file or a directory (if `separate` is True)
resampling: {near, bilinear, cubic, cubicspline, lanczos, average, mode, max, min, med, Q1, Q3}
the resampling method; see `documentation of gdalwarp <https://www.gdal.org/gdalwarp.html>`_.
targetres: tuple or list
two entries for x and y spatial resolution in units of the source CRS
srcnodata: int, float or None
the nodata value of the source files; if left at the default (None), the nodata values are read from the files
dstnodata: int or float
the nodata value of the destination file(s)
shapefile: str, Vector or None
a shapefile for defining the spatial extent of the destination files
layernames: list
the names of the output layers; if `None`, the basenames of the input files are used; overrides sortfun
sortfun: function
a function for sorting the input files; not used if layernames is not None.
This is first used for sorting the items in each sub-list of srcfiles;
the basename of the first item in a sub-list will then be used as the name for the mosaic of this group.
After mosaicing, the function is again used for sorting the names in the final output
(only relevant if `separate` is False)
separate: bool
should the files be written to a single raster stack (ENVI format) or separate files (GTiff format)?
overwrite: bool
overwrite the file if it already exists?
compress: bool
compress the geotiff files?
cores: int
the number of CPU threads to use; this is only relevant if `separate` is True, in which case each
mosaicing/resampling job is passed to a different CPU
Returns
-------
Notes
-----
This function does not reproject any raster files. Thus, the CRS must be the same for all input raster files.
This is checked prior to executing gdalwarp. In case a shapefile is defined, it is internally reprojected to the
raster CRS prior to retrieving its extent.
Examples
--------
.. code-block:: python
from pyroSAR.ancillary import groupbyTime, find_datasets, seconds
from spatialist.raster import stack
# find pyroSAR files by metadata attributes
archive_s1 = '/.../sentinel1/GRD/processed'
scenes_s1 = find_datasets(archive_s1, sensor=('S1A', 'S1B'), acquisition_mode='IW')
# group images by acquisition time
groups = groupbyTime(images=scenes_s1, function=seconds, time=30)
# mosaic individual groups and stack the mosaics to a single ENVI file
# only files overlapping with the shapefile are selected and resampled to its extent
stack(srcfiles=groups, dstfile='stack', resampling='bilinear', targetres=(20, 20),
srcnodata=-99, dstnodata=-99, shapefile='site.shp', separate=False)
"""
# perform some checks on the input data
if len(dissolve(srcfiles)) == 0:
raise RuntimeError('no input files provided to function raster.stack')
if layernames is not None:
if len(layernames) != len(srcfiles):
raise RuntimeError('mismatch between number of source file groups and layernames')
if not isinstance(targetres, (list, tuple)) or len(targetres) != 2:
raise RuntimeError('targetres must be a list or tuple with two entries for x and y resolution')
if len(srcfiles) == 1 and not isinstance(srcfiles[0], list):
raise RuntimeError('only one file specified; nothing to be done')
if resampling not in ['near', 'bilinear', 'cubic', 'cubicspline', 'lanczos',
'average', 'mode', 'max', 'min', 'med', 'Q1', 'Q3']:
raise RuntimeError('resampling method not supported')
projections = list()
for x in dissolve(srcfiles):
try:
projection = Raster(x).projection
except RuntimeError as e:
print('cannot read file: {}'.format(x))
raise e
projections.append(projection)
projections = list(set(projections))
if len(projections) > 1:
raise RuntimeError('raster projection mismatch')
elif projections[0] == '':
raise RuntimeError('could not retrieve the projection from any of the {} input images'.format(len(srcfiles)))
else:
srs = projections[0]
##########################################################################################
# read shapefile bounding coordinates and reduce list of rasters to those overlapping with the shapefile
if shapefile is not None:
shp = shapefile.clone() if isinstance(shapefile, Vector) else Vector(shapefile)
shp.reproject(srs)
ext = shp.extent
arg_ext = (ext['xmin'], ext['ymin'], ext['xmax'], ext['ymax'])
for i, item in enumerate(srcfiles):
group = item if isinstance(item, list) else [item]
if layernames is None and sortfun is not None:
group = sorted(group, key=sortfun)
group = [x for x in group if intersect(shp, Raster(x).bbox())]
if len(group) > 1:
srcfiles[i] = group
elif len(group) == 1:
srcfiles[i] = group[0]
else:
srcfiles[i] = None
shp.close()
srcfiles = list(filter(None, srcfiles))
else:
arg_ext = None
##########################################################################################
# set general options and parametrization
dst_base = os.path.splitext(dstfile)[0]
options_warp = {'options': ['-q'],
'format': 'GTiff' if separate else 'ENVI',
'outputBounds': arg_ext, 'multithread': True,
'dstNodata': dstnodata,
'xRes': targetres[0], 'yRes': targetres[1],
'resampleAlg': resampling}
if overwrite:
options_warp['options'] += ['-overwrite']
if separate and compress:
options_warp['options'] += ['-co', 'COMPRESS=DEFLATE', '-co', 'PREDICTOR=2']
options_buildvrt = {'outputBounds': arg_ext}
if srcnodata is not None:
options_warp['srcNodata'] = srcnodata
options_buildvrt['srcNodata'] = srcnodata
##########################################################################################
# create VRT files for mosaicing
for i, group in enumerate(srcfiles):
if isinstance(group, list):
if len(group) > 1:
base = group[0]
# in-memory VRT files cannot be shared between multiple processes on Windows
# this has to do with different process forking behaviour
# see function spatialist.ancillary.multicore and this link:
# https://stackoverflow.com/questions/38236211/why-multiprocessing-process-behave-differently-on-windows-and-linux-for-global-o
vrt_base = os.path.splitext(os.path.basename(base))[0] + '.vrt'
if platform.system() == 'Windows':
vrt = os.path.join(tempfile.gettempdir(), vrt_base)
else:
vrt = '/vsimem/' + vrt_base
gdalbuildvrt(group, vrt, options_buildvrt)
srcfiles[i] = vrt
else:
srcfiles[i] = group[0]
else:
srcfiles[i] = group
##########################################################################################
# define the output band names
# if no specific layernames are defined, sort files by custom function
if layernames is None and sortfun is not None:
srcfiles = sorted(srcfiles, key=sortfun)
# use the file basenames without extension as band names if none are defined
bandnames = [os.path.splitext(os.path.basename(x))[0] for x in srcfiles] if layernames is None else layernames
if len(list(set(bandnames))) != len(bandnames):
raise RuntimeError('output bandnames are not unique')
##########################################################################################
# create the actual image files
if separate:
if not os.path.isdir(dstfile):
os.makedirs(dstfile)
dstfiles = [os.path.join(dstfile, x) + '.tif' for x in bandnames]
jobs = [x for x in zip(srcfiles, dstfiles)]
if not overwrite:
jobs = [x for x in jobs if not os.path.isfile(x[1])]
if len(jobs) == 0:
print('all target tiff files already exist, nothing to be done')
return
srcfiles, dstfiles = map(list, zip(*jobs))
multicore(gdalwarp, cores=cores, multiargs={'src': srcfiles, 'dst': dstfiles}, options=options_warp)
else:
if len(srcfiles) == 1:
options_warp['format'] = 'GTiff'
if not dstfile.endswith('.tif'):
dstfile = os.path.splitext(dstfile)[0] + '.tif'
gdalwarp(srcfiles[0], dstfile, options_warp)
else:
# create VRT for stacking
vrt = '/vsimem/' + os.path.basename(dst_base) + '.vrt'
options_buildvrt['options'] = ['-separate']
gdalbuildvrt(srcfiles, vrt, options_buildvrt)
# warp files
gdalwarp(vrt, dstfile, options_warp)
# edit ENVI HDR files to contain specific layer names
with envi.HDRobject(dstfile + '.hdr') as hdr:
hdr.band_names = bandnames
hdr.write() | function for mosaicking, resampling and stacking of multiple raster files into a 3D data cube
Parameters
----------
srcfiles: list
a list of file names or a list of lists; each sub-list is treated as a task to mosaic its containing files
dstfile: str
the destination file or a directory (if `separate` is True)
resampling: {near, bilinear, cubic, cubicspline, lanczos, average, mode, max, min, med, Q1, Q3}
the resampling method; see `documentation of gdalwarp <https://www.gdal.org/gdalwarp.html>`_.
targetres: tuple or list
two entries for x and y spatial resolution in units of the source CRS
srcnodata: int, float or None
the nodata value of the source files; if left at the default (None), the nodata values are read from the files
dstnodata: int or float
the nodata value of the destination file(s)
shapefile: str, Vector or None
a shapefile for defining the spatial extent of the destination files
layernames: list
the names of the output layers; if `None`, the basenames of the input files are used; overrides sortfun
sortfun: function
a function for sorting the input files; not used if layernames is not None.
This is first used for sorting the items in each sub-list of srcfiles;
the basename of the first item in a sub-list will then be used as the name for the mosaic of this group.
After mosaicing, the function is again used for sorting the names in the final output
(only relevant if `separate` is False)
separate: bool
should the files be written to a single raster stack (ENVI format) or separate files (GTiff format)?
overwrite: bool
overwrite the file if it already exists?
compress: bool
compress the geotiff files?
cores: int
the number of CPU threads to use; this is only relevant if `separate` is True, in which case each
mosaicing/resampling job is passed to a different CPU
Returns
-------
Notes
-----
This function does not reproject any raster files. Thus, the CRS must be the same for all input raster files.
This is checked prior to executing gdalwarp. In case a shapefile is defined, it is internally reprojected to the
raster CRS prior to retrieving its extent.
Examples
--------
.. code-block:: python
from pyroSAR.ancillary import groupbyTime, find_datasets, seconds
from spatialist.raster import stack
# find pyroSAR files by metadata attributes
archive_s1 = '/.../sentinel1/GRD/processed'
scenes_s1 = find_datasets(archive_s1, sensor=('S1A', 'S1B'), acquisition_mode='IW')
# group images by acquisition time
groups = groupbyTime(images=scenes_s1, function=seconds, time=30)
# mosaic individual groups and stack the mosaics to a single ENVI file
# only files overlapping with the shapefile are selected and resampled to its extent
stack(srcfiles=groups, dstfile='stack', resampling='bilinear', targetres=(20, 20),
srcnodata=-99, dstnodata=-99, shapefile='site.shp', separate=False) | entailment |
def allstats(self, approximate=False):
"""
Compute some basic raster statistics
Parameters
----------
approximate: bool
approximate statistics from overviews or a subset of all tiles?
Returns
-------
list of dicts
a list with a dictionary of statistics for each band. Keys: `min`, `max`, `mean`, `sdev`.
See :osgeo:meth:`gdal.Band.ComputeStatistics`.
"""
statcollect = []
for x in self.layers():
try:
stats = x.ComputeStatistics(approximate)
except RuntimeError:
stats = None
stats = dict(zip(['min', 'max', 'mean', 'sdev'], stats))
statcollect.append(stats)
return statcollect | Compute some basic raster statistics
Parameters
----------
approximate: bool
approximate statistics from overviews or a subset of all tiles?
Returns
-------
list of dicts
a list with a dictionary of statistics for each band. Keys: `min`, `max`, `mean`, `sdev`.
See :osgeo:meth:`gdal.Band.ComputeStatistics`. | entailment |
def array(self):
"""
read all raster bands into a numpy ndarray
Returns
-------
numpy.ndarray
the array containing all raster data
"""
if self.bands == 1:
return self.matrix()
else:
arr = self.raster.ReadAsArray().transpose(1, 2, 0)
if isinstance(self.nodata, list):
for i in range(0, self.bands):
arr[:, :, i][arr[:, :, i] == self.nodata[i]] = np.nan
else:
arr[arr == self.nodata] = np.nan
return arr | read all raster bands into a numpy ndarray
Returns
-------
numpy.ndarray
the array containing all raster data | entailment |
def bandnames(self, names):
"""
set the names of the raster bands
Parameters
----------
names: list of str
the names to be set; must be of same length as the number of bands
Returns
-------
"""
if not isinstance(names, list):
raise TypeError('the names to be set must be of type list')
if len(names) != self.bands:
raise ValueError(
'length mismatch of names to be set ({}) and number of bands ({})'.format(len(names), self.bands))
self.__bandnames = names | set the names of the raster bands
Parameters
----------
names: list of str
the names to be set; must be of same length as the number of bands
Returns
------- | entailment |
def bbox(self, outname=None, format='ESRI Shapefile', overwrite=True):
"""
Parameters
----------
outname: str or None
the name of the file to write; If `None`, the bounding box is returned as vector object
format: str
The file format to write
overwrite: bool
overwrite an already existing file?
Returns
-------
Vector or None
the bounding box vector object
"""
if outname is None:
return bbox(self.geo, self.proj4)
else:
bbox(self.geo, self.proj4, outname=outname, format=format, overwrite=overwrite) | Parameters
----------
outname: str or None
the name of the file to write; If `None`, the bounding box is returned as vector object
format: str
The file format to write
overwrite: bool
overwrite an already existing file?
Returns
-------
Vector or None
the bounding box vector object | entailment |
def extract(self, px, py, radius=1, nodata=None):
"""
extract weighted average of pixels intersecting with a defined radius to a point.
Parameters
----------
px: int or float
the x coordinate in units of the Raster SRS
py: int or float
the y coordinate in units of the Raster SRS
radius: int or float
the radius around the point to extract pixel values from; defined as multiples of the pixel resolution
nodata: int
a value to ignore from the computations; If `None`, the nodata value of the Raster object is used
Returns
-------
int or float
the the weighted average of all pixels within the defined radius
"""
if not self.geo['xmin'] <= px <= self.geo['xmax']:
raise RuntimeError('px is out of bounds')
if not self.geo['ymin'] <= py <= self.geo['ymax']:
raise RuntimeError('py is out of bounds')
if nodata is None:
nodata = self.nodata
xres, yres = self.res
hx = xres / 2.0
hy = yres / 2.0
xlim = float(xres * radius)
ylim = float(yres * radius)
# compute minimum x and y pixel coordinates
xmin = int(floor((px - self.geo['xmin'] - xlim) / xres))
ymin = int(floor((self.geo['ymax'] - py - ylim) / yres))
xmin = xmin if xmin >= 0 else 0
ymin = ymin if ymin >= 0 else 0
# compute maximum x and y pixel coordinates
xmax = int(ceil((px - self.geo['xmin'] + xlim) / xres))
ymax = int(ceil((self.geo['ymax'] - py + ylim) / yres))
xmax = xmax if xmax <= self.cols else self.cols
ymax = ymax if ymax <= self.rows else self.rows
# load array subset
if self.__data[0] is not None:
array = self.__data[0][ymin:ymax, xmin:xmax]
# print('using loaded array of size {}, '
# 'indices [{}:{}, {}:{}] (row/y, col/x)'.format(array.shape, ymin, ymax, xmin, xmax))
else:
array = self.raster.GetRasterBand(1).ReadAsArray(xmin, ymin, xmax - xmin, ymax - ymin)
# print('loading array of size {}, '
# 'indices [{}:{}, {}:{}] (row/y, col/x)'.format(array.shape, ymin, ymax, xmin, xmax))
sum = 0
counter = 0
weightsum = 0
for x in range(xmin, xmax):
for y in range(ymin, ymax):
# check whether point is a valid image index
val = array[y - ymin, x - xmin]
if val != nodata:
# compute distances of pixel center coordinate to requested point
xc = x * xres + hx + self.geo['xmin']
yc = self.geo['ymax'] - y * yres + hy
dx = abs(xc - px)
dy = abs(yc - py)
# check whether point lies within ellipse: if ((dx ** 2) / xlim ** 2) + ((dy ** 2) / ylim ** 2) <= 1
weight = sqrt(dx ** 2 + dy ** 2)
sum += val * weight
weightsum += weight
counter += 1
array = None
if counter > 0:
return sum / weightsum
else:
return nodata | extract weighted average of pixels intersecting with a defined radius to a point.
Parameters
----------
px: int or float
the x coordinate in units of the Raster SRS
py: int or float
the y coordinate in units of the Raster SRS
radius: int or float
the radius around the point to extract pixel values from; defined as multiples of the pixel resolution
nodata: int
a value to ignore from the computations; If `None`, the nodata value of the Raster object is used
Returns
-------
int or float
the the weighted average of all pixels within the defined radius | entailment |
def geo(self):
"""
General image geo information.
Returns
-------
dict
a dictionary with keys `xmin`, `xmax`, `xres`, `rotation_x`, `ymin`, `ymax`, `yres`, `rotation_y`
"""
out = dict(zip(['xmin', 'xres', 'rotation_x', 'ymax', 'rotation_y', 'yres'],
self.raster.GetGeoTransform()))
# note: yres is negative!
out['xmax'] = out['xmin'] + out['xres'] * self.cols
out['ymin'] = out['ymax'] + out['yres'] * self.rows
return out | General image geo information.
Returns
-------
dict
a dictionary with keys `xmin`, `xmax`, `xres`, `rotation_x`, `ymin`, `ymax`, `yres`, `rotation_y` | entailment |
def is_valid(self):
"""
Check image integrity.
Tries to compute the checksum for each raster layer and returns False if this fails.
See this forum entry:
`How to check if image is valid? <https://lists.osgeo.org/pipermail/gdal-dev/2013-November/037520.html>`_.
Returns
-------
bool
is the file valid?
"""
for i in range(self.raster.RasterCount):
try:
checksum = self.raster.GetRasterBand(i + 1).Checksum()
except RuntimeError:
return False
return True | Check image integrity.
Tries to compute the checksum for each raster layer and returns False if this fails.
See this forum entry:
`How to check if image is valid? <https://lists.osgeo.org/pipermail/gdal-dev/2013-November/037520.html>`_.
Returns
-------
bool
is the file valid? | entailment |
def load(self):
"""
load all raster data to internal memory arrays.
This shortens the read time of other methods like :meth:`matrix`.
"""
for i in range(1, self.bands + 1):
self.__data[i - 1] = self.matrix(i) | load all raster data to internal memory arrays.
This shortens the read time of other methods like :meth:`matrix`. | entailment |
def matrix(self, band=1, mask_nan=True):
"""
read a raster band (subset) into a numpy ndarray
Parameters
----------
band: int
the band to read the matrix from; 1-based indexing
mask_nan: bool
convert nodata values to :obj:`numpy.nan`? As :obj:`numpy.nan` requires at least float values, any integer array is cast
to float32.
Returns
-------
numpy.ndarray
the matrix (subset) of the selected band
"""
mat = self.__data[band - 1]
if mat is None:
mat = self.raster.GetRasterBand(band).ReadAsArray()
if mask_nan:
if isinstance(self.nodata, list):
nodata = self.nodata[band - 1]
else:
nodata = self.nodata
try:
mat[mat == nodata] = np.nan
except ValueError:
mat = mat.astype('float32')
mat[mat == nodata] = np.nan
return mat | read a raster band (subset) into a numpy ndarray
Parameters
----------
band: int
the band to read the matrix from; 1-based indexing
mask_nan: bool
convert nodata values to :obj:`numpy.nan`? As :obj:`numpy.nan` requires at least float values, any integer array is cast
to float32.
Returns
-------
numpy.ndarray
the matrix (subset) of the selected band | entailment |
def res(self):
"""
the raster resolution in x and y direction
Returns
-------
tuple
(xres, yres)
"""
return (abs(float(self.geo['xres'])), abs(float(self.geo['yres']))) | the raster resolution in x and y direction
Returns
-------
tuple
(xres, yres) | entailment |
def rescale(self, fun):
"""
perform raster computations with custom functions and assign them to the existing raster object in memory
Parameters
----------
fun: function
the custom function to compute on the data
Examples
--------
>>> with Raster('filename') as ras:
>>> ras.rescale(lambda x: 10 * x)
"""
if self.bands != 1:
raise ValueError('only single band images are currently supported')
# load array
mat = self.matrix()
# scale values
scaled = fun(mat)
# assign newly computed array to raster object
self.assign(scaled, band=0) | perform raster computations with custom functions and assign them to the existing raster object in memory
Parameters
----------
fun: function
the custom function to compute on the data
Examples
--------
>>> with Raster('filename') as ras:
>>> ras.rescale(lambda x: 10 * x) | entailment |
def write(self, outname, dtype='default', format='ENVI', nodata='default', compress_tif=False, overwrite=False):
"""
write the raster object to a file.
Parameters
----------
outname: str
the file to be written
dtype: str
the data type of the written file;
data type notations of GDAL (e.g. `Float32`) and numpy (e.g. `int8`) are supported.
format: str
the file format; e.g. 'GTiff'
nodata: int or float
the nodata value to write to the file
compress_tif: bool
if the format is GeoTiff, compress the written file?
overwrite: bool
overwrite an already existing file?
Returns
-------
"""
if os.path.isfile(outname) and not overwrite:
raise RuntimeError('target file already exists')
if format == 'GTiff' and not re.search(r'\.tif[f]*$', outname):
outname += '.tif'
dtype = Dtype(self.dtype if dtype == 'default' else dtype).gdalint
nodata = self.nodata if nodata == 'default' else nodata
options = []
if format == 'GTiff' and compress_tif:
options += ['COMPRESS=DEFLATE', 'PREDICTOR=2']
driver = gdal.GetDriverByName(format)
outDataset = driver.Create(outname, self.cols, self.rows, self.bands, dtype, options)
driver = None
outDataset.SetMetadata(self.raster.GetMetadata())
outDataset.SetGeoTransform([self.geo[x] for x in ['xmin', 'xres', 'rotation_x', 'ymax', 'rotation_y', 'yres']])
if self.projection is not None:
outDataset.SetProjection(self.projection)
for i in range(1, self.bands + 1):
outband = outDataset.GetRasterBand(i)
if nodata is not None:
outband.SetNoDataValue(nodata)
mat = self.matrix(band=i)
dtype_mat = str(mat.dtype)
dtype_ras = Dtype(dtype).numpystr
if not np.can_cast(dtype_mat, dtype_ras):
warnings.warn("writing band {}: unsafe casting from type {} to {}".format(i, dtype_mat, dtype_ras))
outband.WriteArray(mat)
del mat
outband.FlushCache()
outband = None
if format == 'GTiff':
outDataset.SetMetadataItem('TIFFTAG_DATETIME', strftime('%Y:%m:%d %H:%M:%S', gmtime()))
outDataset = None
if format == 'ENVI':
hdrfile = os.path.splitext(outname)[0] + '.hdr'
with HDRobject(hdrfile) as hdr:
hdr.band_names = self.bandnames
hdr.write() | write the raster object to a file.
Parameters
----------
outname: str
the file to be written
dtype: str
the data type of the written file;
data type notations of GDAL (e.g. `Float32`) and numpy (e.g. `int8`) are supported.
format: str
the file format; e.g. 'GTiff'
nodata: int or float
the nodata value to write to the file
compress_tif: bool
if the format is GeoTiff, compress the written file?
overwrite: bool
overwrite an already existing file?
Returns
------- | entailment |
def numpy2gdalint(self):
"""
create a dictionary for mapping numpy data types to GDAL data type codes
Returns
-------
dict
the type map
"""
if not hasattr(self, '__numpy2gdalint'):
tmap = {}
for group in ['int', 'uint', 'float', 'complex']:
for dtype in np.sctypes[group]:
code = gdal_array.NumericTypeCodeToGDALTypeCode(dtype)
if code is not None:
tmap[dtype().dtype.name] = code
self.__numpy2gdalint = tmap
return self.__numpy2gdalint | create a dictionary for mapping numpy data types to GDAL data type codes
Returns
-------
dict
the type map | entailment |
def static_parser(static):
"""Parse object describing static routes.
Might be a list, a dict or a list of dicts.
"""
if static is None:
return
if isinstance(static, dict):
static = static.items()
for group in static:
if not isinstance(group, dict):
yield group
continue
for item in group.items():
yield item | Parse object describing static routes.
Might be a list, a dict or a list of dicts. | entailment |
def analyses_info(self):
"""Add analyses from the analyses folder."""
f_config = os.path.join(self.analyses_path, 'index.yaml')
tornado.autoreload.watch(f_config)
with io.open(f_config, 'r', encoding='utf8') as f:
config = yaml.safe_load(f)
self.info.update(config)
if self.debug:
self.info['version'] += '.debug-{:04X}'.format(
int(random.random() * 0xffff))
readme = Readme(self.analyses_path)
if self.info['description'] is None:
self.info['description'] = readme.text.strip()
self.info['description_html'] = readme.html | Add analyses from the analyses folder. | entailment |
def register_metas(self):
"""register metas"""
# concatenate some attributes to global lists:
aggregated = {'build': [], 'watch': []}
for attribute, values in aggregated.items():
for info in self.info['analyses'] + [self.info]:
if attribute in info:
values.append(info[attribute])
for meta in self.metas:
log.debug('Registering meta information {}'.format(meta.name))
# grab routes
self.routes += [(r'/{}/{}'.format(meta.name, route),
handler, data)
for route, handler, data in meta.routes]
# process files to watch for autoreload
if aggregated['watch']:
to_watch = [expr for w in aggregated['watch'] for expr in w]
log.info('watching additional files: {}'.format(to_watch))
cwd = os.getcwd()
os.chdir(self.analyses_path)
if glob2:
files = [os.path.join(self.analyses_path, fn)
for expr in to_watch for fn in glob2.glob(expr)]
else:
files = [os.path.join(self.analyses_path, fn)
for expr in to_watch for fn in glob.glob(expr)]
if any('**' in expr for expr in to_watch):
log.warning('Please run "pip install glob2" to properly '
'process watch patterns with "**".')
os.chdir(cwd)
for fn in files:
log.debug('watch file {}'.format(fn))
tornado.autoreload.watch(fn)
# save build commands
self.build_cmds = aggregated['build'] | register metas | entailment |
def build(self):
"""Run the build command specified in index.yaml."""
for cmd in self.build_cmds:
log.info('building command: {}'.format(cmd))
full_cmd = 'cd {}; {}'.format(self.analyses_path, cmd)
log.debug('full command: {}'.format(full_cmd))
subprocess.call(full_cmd, shell=True)
log.info('build done') | Run the build command specified in index.yaml. | entailment |
def get(self):
"""Render the List-of-Analyses overview page."""
return self.render(
'index.html',
databench_version=DATABENCH_VERSION,
meta_infos=self.meta_infos(),
**self.info
) | Render the List-of-Analyses overview page. | entailment |
def Reset(self):
' Reset Axis and set default parameters for H-bridge '
spi.SPI_write_byte(self.CS, 0xC0) # reset
# spi.SPI_write_byte(self.CS, 0x14) # Stall Treshold setup
# spi.SPI_write_byte(self.CS, 0xFF)
# spi.SPI_write_byte(self.CS, 0x13) # Over Current Treshold setup
# spi.SPI_write_byte(self.CS, 0xFF)
spi.SPI_write_byte(self.CS, 0x15) # Full Step speed
spi.SPI_write_byte(self.CS, 0xFF)
spi.SPI_write_byte(self.CS, 0xFF)
spi.SPI_write_byte(self.CS, 0x05) # ACC
spi.SPI_write_byte(self.CS, 0x00)
spi.SPI_write_byte(self.CS, 0x20)
spi.SPI_write_byte(self.CS, 0x06) # DEC
spi.SPI_write_byte(self.CS, 0x00)
spi.SPI_write_byte(self.CS, 0x20)
spi.SPI_write_byte(self.CS, 0x0A) # KVAL_RUN
spi.SPI_write_byte(self.CS, 0xd0)
spi.SPI_write_byte(self.CS, 0x0B) # KVAL_ACC
spi.SPI_write_byte(self.CS, 0xd0)
spi.SPI_write_byte(self.CS, 0x0C) # KVAL_DEC
spi.SPI_write_byte(self.CS, 0xd0)
spi.SPI_write_byte(self.CS, 0x16) # STEPPER
spi.SPI_write_byte(self.CS, 0b00000000)
spi.SPI_write_byte(self.CS, 0x18) # CONFIG
spi.SPI_write_byte(self.CS, 0b00111000)
spi.SPI_write_byte(self.CS, 0b00000000) | Reset Axis and set default parameters for H-bridge | entailment |
def _init_worker(X, X_shape, X_dtype):
"""Initializer for pool for _mprotate"""
# Using a dictionary is not strictly necessary. You can also
# use global variables.
mprotate_dict["X"] = X
mprotate_dict["X_shape"] = X_shape
mprotate_dict["X_dtype"] = X_dtype | Initializer for pool for _mprotate | entailment |
def _mprotate(ang, lny, pool, order):
"""Uses multiprocessing to wrap around _rotate
4x speedup on an intel i7-3820 CPU @ 3.60GHz with 8 cores.
The function calls _rotate which accesses the `mprotate_dict`.
Data is rotated in-place.
Parameters
----------
ang: float
rotation angle in degrees
lny: int
total number of rotations to perform
pool: instance of multiprocessing.pool.Pool
the pool object used for the computation
order: int
interpolation order
"""
targ_args = list()
slsize = np.int(np.floor(lny / ncores))
for t in range(ncores):
ymin = t * slsize
ymax = (t + 1) * slsize
if t == ncores - 1:
ymax = lny
targ_args.append((ymin, ymax, ang, order))
pool.map(_rotate, targ_args) | Uses multiprocessing to wrap around _rotate
4x speedup on an intel i7-3820 CPU @ 3.60GHz with 8 cores.
The function calls _rotate which accesses the `mprotate_dict`.
Data is rotated in-place.
Parameters
----------
ang: float
rotation angle in degrees
lny: int
total number of rotations to perform
pool: instance of multiprocessing.pool.Pool
the pool object used for the computation
order: int
interpolation order | entailment |
def backpropagate_3d(uSin, angles, res, nm, lD=0, coords=None,
weight_angles=True, onlyreal=False,
padding=(True, True), padfac=1.75, padval=None,
intp_order=2, dtype=None,
num_cores=ncores,
save_memory=False,
copy=True,
count=None, max_count=None,
verbose=0):
r"""3D backpropagation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.rotate` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
A = angles.size
if len(uSin.shape) != 3:
raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).")
if len(uSin) != A:
raise ValueError("`len(angles)` must be equal to `len(uSin)`.")
if len(list(padding)) != 2:
raise ValueError("`padding` must be boolean tuple of length 2!")
if np.array(padding).dtype is not np.dtype(bool):
raise ValueError("Parameter `padding` must be boolean tuple.")
if coords is not None:
raise NotImplementedError("Setting coordinates is not yet supported.")
if num_cores > ncores:
raise ValueError("`num_cores` must not exceed number "
+ "of physical cores: {}".format(ncores))
# setup dtype
if dtype is None:
dtype = np.float_
dtype = np.dtype(dtype)
if dtype.name not in ["float32", "float64"]:
raise ValueError("dtype must be float32 or float64!")
dtype_complex = np.dtype("complex{}".format(
2 * np.int(dtype.name.strip("float"))))
# set ctype
ct_dt_map = {np.dtype(np.float32): ctypes.c_float,
np.dtype(np.float64): ctypes.c_double
}
# progress
if max_count is not None:
max_count.value += A + 2
ne.set_num_threads(num_cores)
uSin = np.array(uSin, copy=copy)
# lengths of the input data
lny, lnx = uSin.shape[1], uSin.shape[2]
# The z-size of the output array must match the x-size.
# The rotation is performed about the y-axis (lny).
ln = lnx
# We perform zero-padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
orderx = np.int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2))))
ordery = np.int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2))))
if padding[0]:
padx = orderx - lnx
else:
padx = 0
if padding[1]:
pady = ordery - lny
else:
pady = 0
padyl = np.int(np.ceil(pady / 2))
padyr = pady - padyl
padxl = np.int(np.ceil(padx / 2))
padxr = padx - padxl
# zero-padded length of sinogram.
lNx, lNy = lnx + padx, lny + pady
lNz = ln
if verbose > 0:
print("......Image size (x,y): {}x{}, padded: {}x{}".format(
lnx, lny, lNx, lNy))
# Perform weighting
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1)
uSin *= weights
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Here, the notation for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)² a₀) (prefactor)
# * iiint dϕ₀ dkx dky (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
# (r and s₀ are vectors. The last term contains a dot-product)
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)² a₀)
# * iiint dϕ₀ dkx dky
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
# Corresponding sample frequencies
fx = np.fft.fftfreq(lNx) # 1D array
fy = np.fft.fftfreq(lNy) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
ky = 2 * np.pi * fy
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# y, x
kx = kx.reshape(1, -1)
ky = ky.reshape(-1, 1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 + ky**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
prefactor *= np.abs(kx) * filter_klp
# prefactor *= np.sqrt(((kx**2+ky**2)) * filter_klp )
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
if count is not None:
count.value += 1
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD )
#
# xD = x cos(ϕ₀) + z sin(ϕ₀)
# zD = - x sin(ϕ₀) + z cos(ϕ₀)
# Everything is in pixels
center = lNz / 2.0
z = np.linspace(-center, center, lNz, endpoint=False)
zv = z.reshape(-1, 1, 1)
# z, y, x
Mp = M.reshape(lNy, lNx)
# filter2 = np.exp(1j * zv * km * (Mp - 1))
f2_exp_fac = 1j * km * (Mp - 1)
if save_memory:
# compute filter2 later
pass
else:
# compute filter2 now
filter2 = ne.evaluate("exp(factor * zv)",
local_dict={"factor": f2_exp_fac,
"zv": zv})
# occupies some amount of ram, but yields faster
# computation later
if count is not None:
count.value += 1
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, lny, lnx), dtype=dtype)
else:
outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Create plan for FFTW
# save memory by in-place operations
# projection = np.fft.fft2(sino, axes=(-1,-2)) * prefactor
# FFTW-flag is "estimate":
# specifies that, instead of actual measurements of different
# algorithms, a simple heuristic is used to pick a (probably
# sub-optimal) plan quickly. With this flag, the input/output
# arrays are not overwritten during planning.
# Byte-aligned arrays
oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores,
flags=["FFTW_ESTIMATE"], axes=(0, 1))
# Create plan for IFFTW:
inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
# inarr[:] = (projection[0]*filter2)[0,:,:]
# plan is "patient":
# FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range
# of algorithms and often produces a “more optimal” plan
# (especially for large transforms), but at the expense of
# several times longer planning time (especially for large
# transforms).
# print(inarr.flags)
myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores,
axes=(0, 1),
direction="FFTW_BACKWARD",
flags=["FFTW_MEASURE"])
# Setup a shared array
shared_array = mp.RawArray(ct_dt_map[dtype], ln * lny * lnx)
arr = np.frombuffer(shared_array, dtype=dtype).reshape(ln, lny, lnx)
# Initialize the pool with the shared array
pool4loop = mp.Pool(processes=num_cores,
initializer=_init_worker,
initargs=(shared_array, (ln, lny, lnx), dtype))
# filtered projections in loop
filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex)
for aa in np.arange(A):
if padval is None:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="edge")
else:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="linear_ramp",
end_values=(padval,))
myfftw_plan.execute()
# normalize to (lNx * lNy) for FFTW and multiply with prefactor
oneslice *= prefactor / (lNx * lNy)
# 14x Speedup with fftw3 compared to numpy fft and
# memory reduction by a factor of 2!
# ifft will be computed in-place
for p in range(len(zv)):
if save_memory:
# compute filter2 here;
# this is comparatively slower than the other case
ne.evaluate("exp(factor * zvp) * projectioni",
local_dict={"zvp": zv[p],
"projectioni": oneslice,
"factor": f2_exp_fac},
out=inarr)
else:
# use universal functions
np.multiply(filter2[p], oneslice, out=inarr)
myifftw_plan.execute()
filtered_proj[p, :, :] = inarr[padyl:lny+padyl, padxl:lnx+padxl]
# resize image to original size
# The copy is necessary to prevent memory leakage.
arr[:] = filtered_proj.real
phi0 = np.rad2deg(angles[aa])
if not onlyreal:
filtered_proj_imag = filtered_proj.imag
_mprotate(phi0, lny, pool4loop, intp_order)
outarr.real += arr
if not onlyreal:
arr[:] = filtered_proj_imag
_mprotate(phi0, lny, pool4loop, intp_order)
outarr.imag += arr
if count is not None:
count.value += 1
pool4loop.terminate()
pool4loop.join()
_cleanup_worker()
return outarr | r"""3D backpropagation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.rotate` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`). | entailment |
def compute_angle_weights_1d(angles):
"""
Compute the weight for each angle according to the distance between its
neighbors.
Parameters
----------
angles: 1d ndarray of length A
Angles in radians
Returns
-------
weights: 1d ndarray of length A
The weights for each angle
Notes
-----
To compute the weights, the angles are set modulo PI, not modulo 2PI.
This reduces artifacts when the angular coverage is between PI and 2PI
but does not affect the result when the angles cover the full 2PI interval.
"""
# copy and modulo np.pi
# This is an array with values in [0, np.pi)
angles = (angles.flatten() - angles.min()) % (np.pi)
# sort the array
sortargs = np.argsort(angles)
sortangl = angles[sortargs]
# compute weights for sorted angles
da = (np.roll(sortangl, -1) - np.roll(sortangl, 1)) % (np.pi)
weights = da/np.sum(da)*da.shape[0]
unsortweights = np.zeros_like(weights)
# Sort everything back where it belongs
unsortweights[sortargs] = weights
return unsortweights | Compute the weight for each angle according to the distance between its
neighbors.
Parameters
----------
angles: 1d ndarray of length A
Angles in radians
Returns
-------
weights: 1d ndarray of length A
The weights for each angle
Notes
-----
To compute the weights, the angles are set modulo PI, not modulo 2PI.
This reduces artifacts when the angular coverage is between PI and 2PI
but does not affect the result when the angles cover the full 2PI interval. | entailment |
def initialize(self):
"""See :meth:`pymlab.sensors.Device.initialize` for more information.
Calls `initialize()` on all devices connected to the bus.
"""
Device.initialize(self)
for child in iter(self.children.values()):
child.initialize() | See :meth:`pymlab.sensors.Device.initialize` for more information.
Calls `initialize()` on all devices connected to the bus. | entailment |
def write_byte(self, address, value):
"""Writes the byte to unaddressed register in a device. """
LOGGER.debug("Writing byte %s to device %s!", bin(value), hex(address))
return self.driver.write_byte(address, value) | Writes the byte to unaddressed register in a device. | entailment |
def read_byte(self, address):
"""Reads unadressed byte from a device. """
LOGGER.debug("Reading byte from device %s!", hex(address))
return self.driver.read_byte(address) | Reads unadressed byte from a device. | entailment |
def write_byte_data(self, address, register, value):
"""Write a byte value to a device's register. """
LOGGER.debug("Writing byte data %s to register %s on device %s",
bin(value), hex(register), hex(address))
return self.driver.write_byte_data(address, register, value) | Write a byte value to a device's register. | entailment |
def write_wdata(self, address, register, value):
"""Write a word (two bytes) value to a device's register. """
warnings.warn("write_wdata() is deprecated and will be removed in future versions replace with write_word_data()", DeprecationWarning)
LOGGER.debug("Writing word data %s to register %s on device %s",
bin(value), hex(register), hex(address))
return self.driver.write_word_data(address, register, value) | Write a word (two bytes) value to a device's register. | entailment |
async def _raise_for_status(response):
"""Raise an appropriate error for a given response.
Arguments:
response (:py:class:`aiohttp.ClientResponse`): The API response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: The appropriate
error for the response's status.
This function was taken from the aslack project and modified. The original
copyright notice:
Copyright (c) 2015, Jonathan Sharpe
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
try:
response.raise_for_status()
except aiohttp.ClientResponseError as exc:
reason = response.reason
spacetrack_error_msg = None
try:
json = await response.json()
if isinstance(json, Mapping):
spacetrack_error_msg = json['error']
except (ValueError, KeyError, aiohttp.ClientResponseError):
pass
if not spacetrack_error_msg:
spacetrack_error_msg = await response.text()
if spacetrack_error_msg:
reason += '\nSpace-Track response:\n' + spacetrack_error_msg
payload = dict(
code=response.status,
message=reason,
headers=response.headers,
)
# history attribute is only aiohttp >= 2.1
try:
payload['history'] = exc.history
except AttributeError:
pass
raise aiohttp.ClientResponseError(**payload) | Raise an appropriate error for a given response.
Arguments:
response (:py:class:`aiohttp.ClientResponse`): The API response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: The appropriate
error for the response's status.
This function was taken from the aslack project and modified. The original
copyright notice:
Copyright (c) 2015, Jonathan Sharpe
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | entailment |
async def generic_request(self, class_, iter_lines=False, iter_content=False,
controller=None, parse_types=False, **kwargs):
"""Generic Space-Track query coroutine.
The request class methods use this method internally; the public
API is as follows:
.. code-block:: python
st.tle_publish(*args, **st)
st.basicspacedata.tle_publish(*args, **st)
st.file(*args, **st)
st.fileshare.file(*args, **st)
st.spephemeris.file(*args, **st)
They resolve to the following calls respectively:
.. code-block:: python
st.generic_request('tle_publish', *args, **st)
st.generic_request('tle_publish', *args, controller='basicspacedata', **st)
st.generic_request('file', *args, **st)
st.generic_request('file', *args, controller='fileshare', **st)
st.generic_request('file', *args, controller='spephemeris', **st)
Parameters:
class_: Space-Track request class name
iter_lines: Yield result line by line
iter_content: Yield result in 100 KiB chunks.
controller: Optionally specify request controller to use.
parse_types: Parse string values in response according to type given
in predicate information, e.g. ``'2017-01-01'`` ->
``datetime.date(2017, 1, 1)``.
**kwargs: These keywords must match the predicate fields on
Space-Track. You may check valid keywords with the following
snippet:
.. code-block:: python
spacetrack = AsyncSpaceTrackClient(...)
await spacetrack.tle.get_predicates()
# or
await spacetrack.get_predicates('tle')
See :func:`~spacetrack.operators._stringify_predicate_value` for
which Python objects are converted appropriately.
Yields:
Lines—stripped of newline characters—if ``iter_lines=True``
Yields:
100 KiB chunks if ``iter_content=True``
Returns:
Parsed JSON object, unless ``format`` keyword argument is passed.
.. warning::
Passing ``format='json'`` will return the JSON **unparsed**. Do
not set ``format`` if you want the parsed JSON object returned!
"""
if iter_lines and iter_content:
raise ValueError('iter_lines and iter_content cannot both be True')
if 'format' in kwargs and parse_types:
raise ValueError('parse_types can only be used if format is unset.')
if controller is None:
controller = self._find_controller(class_)
else:
classes = self.request_controllers.get(controller, None)
if classes is None:
raise ValueError(
'Unknown request controller {!r}'.format(controller))
if class_ not in classes:
raise ValueError(
'Unknown request class {!r} for controller {!r}'
.format(class_, controller))
# Decode unicode unless class == download, including conversion of
# CRLF newlines to LF.
decode = (class_ != 'download')
if not decode and iter_lines:
error = (
'iter_lines disabled for binary data, since CRLF newlines '
'split over chunk boundaries would yield extra blank lines. '
'Use iter_content=True instead.')
raise ValueError(error)
await self.authenticate()
url = ('{0}{1}/query/class/{2}'
.format(self.base_url, controller, class_))
offline_check = (class_, controller) in self.offline_predicates
valid_fields = {p.name for p in self.rest_predicates}
predicates = None
if not offline_check:
predicates = await self.get_predicates(class_)
predicate_fields = {p.name for p in predicates}
valid_fields = predicate_fields | {p.name for p in self.rest_predicates}
else:
valid_fields |= self.offline_predicates[(class_, controller)]
for key, value in kwargs.items():
if key not in valid_fields:
raise TypeError(
"'{class_}' got an unexpected argument '{key}'"
.format(class_=class_, key=key))
value = _stringify_predicate_value(value)
url += '/{key}/{value}'.format(key=key, value=value)
logger.debug(url)
resp = await self._ratelimited_get(url)
await _raise_for_status(resp)
if iter_lines:
return _AsyncLineIterator(resp, decode_unicode=decode)
elif iter_content:
return _AsyncChunkIterator(resp, decode_unicode=decode)
else:
# If format is specified, return that format unparsed. Otherwise,
# parse the default JSON response.
if 'format' in kwargs:
if decode:
# Replace CRLF newlines with LF, Python will handle platform
# specific newlines if written to file.
data = await resp.text()
data = data.replace('\r', '')
else:
data = await resp.read()
return data
else:
data = await resp.json()
if predicates is None or not parse_types:
return data
else:
return self._parse_types(data, predicates) | Generic Space-Track query coroutine.
The request class methods use this method internally; the public
API is as follows:
.. code-block:: python
st.tle_publish(*args, **st)
st.basicspacedata.tle_publish(*args, **st)
st.file(*args, **st)
st.fileshare.file(*args, **st)
st.spephemeris.file(*args, **st)
They resolve to the following calls respectively:
.. code-block:: python
st.generic_request('tle_publish', *args, **st)
st.generic_request('tle_publish', *args, controller='basicspacedata', **st)
st.generic_request('file', *args, **st)
st.generic_request('file', *args, controller='fileshare', **st)
st.generic_request('file', *args, controller='spephemeris', **st)
Parameters:
class_: Space-Track request class name
iter_lines: Yield result line by line
iter_content: Yield result in 100 KiB chunks.
controller: Optionally specify request controller to use.
parse_types: Parse string values in response according to type given
in predicate information, e.g. ``'2017-01-01'`` ->
``datetime.date(2017, 1, 1)``.
**kwargs: These keywords must match the predicate fields on
Space-Track. You may check valid keywords with the following
snippet:
.. code-block:: python
spacetrack = AsyncSpaceTrackClient(...)
await spacetrack.tle.get_predicates()
# or
await spacetrack.get_predicates('tle')
See :func:`~spacetrack.operators._stringify_predicate_value` for
which Python objects are converted appropriately.
Yields:
Lines—stripped of newline characters—if ``iter_lines=True``
Yields:
100 KiB chunks if ``iter_content=True``
Returns:
Parsed JSON object, unless ``format`` keyword argument is passed.
.. warning::
Passing ``format='json'`` will return the JSON **unparsed**. Do
not set ``format`` if you want the parsed JSON object returned! | entailment |
async def _download_predicate_data(self, class_, controller):
"""Get raw predicate information for given request class, and cache for
subsequent calls.
"""
await self.authenticate()
url = ('{0}{1}/modeldef/class/{2}'
.format(self.base_url, controller, class_))
resp = await self._ratelimited_get(url)
await _raise_for_status(resp)
resp_json = await resp.json()
return resp_json['data'] | Get raw predicate information for given request class, and cache for
subsequent calls. | entailment |
def get_numeric_value(string_value):
""" parses string_value and returns only number-like part
"""
num_chars = ['.', '+', '-']
number = ''
for c in string_value:
if c.isdigit() or c in num_chars:
number += c
return number | parses string_value and returns only number-like part | entailment |
def main(**kwargs):
"""Entry point to run databench."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(DATABENCH_VERSION))
parser.add_argument('--log', dest='loglevel', default="INFO",
type=str.upper,
help=('log level (info, warning, error, critical or '
'debug, default info)'))
parser.add_argument('--no-watch', dest='watch', default=True,
action='store_false',
help='do not watch and restart when files change')
parser.add_argument('--host', dest='host',
default=os.environ.get('HOST', '127.0.0.1'),
help='host address for webserver (default 127.0.0.1)')
parser.add_argument('--port', dest='port',
type=int, default=int(os.environ.get('PORT', 5000)),
help='port for webserver')
if not kwargs:
parser.add_argument('--analyses', default=None,
help='import path for analyses')
parser.add_argument('--build', default=False, action='store_true',
help='run the build command and exit')
parser.add_argument('--coverage', default=False,
help=argparse.SUPPRESS)
ssl_args = parser.add_argument_group('SSL')
ssl_args.add_argument('--ssl-certfile', dest='ssl_certfile',
default=os.environ.get('SSLCERTFILE'),
help='SSL certificate file')
ssl_args.add_argument('--ssl-keyfile', dest='ssl_keyfile',
default=os.environ.get('SSLKEYFILE'),
help='SSL key file')
ssl_args.add_argument('--ssl-port', dest='ssl_port', type=int,
default=int(os.environ.get('SSLPORT', 0)),
help='SSL port for webserver')
args, analyses_args = parser.parse_known_args()
# coverage
cov = None
if args.coverage:
import coverage
cov = coverage.Coverage(data_file=args.coverage, data_suffix=True)
cov.start()
# this is included here so that is included in coverage
from .app import App, SingleApp
# log
logging.basicConfig(level=getattr(logging, args.loglevel))
if args.loglevel != 'INFO':
logging.info('Set loglevel to {}.'.format(args.loglevel))
# show versions and setup
logging.info('Databench {}'.format(DATABENCH_VERSION))
if args.host in ('localhost', '127.0.0.1'):
logging.info('Open http://{}:{} in a web browser.'
''.format(args.host, args.port))
logging.debug('host={}, port={}'.format(args.host, args.port))
logging.debug('Python {}'.format(sys.version))
if analyses_args:
logging.debug('Arguments passed to analyses: {}'.format(analyses_args))
if not kwargs:
app = App(args.analyses, cli_args=analyses_args, debug=args.watch)
else:
app = SingleApp(cli_args=analyses_args, debug=args.watch, **kwargs)
# check whether this is just a quick build
if args.build:
logging.info('Build mode: only run build command and exit.')
app.build()
if cov:
cov.stop()
cov.save()
return
# HTTP server
tornado_app = app.tornado_app()
tornado_app.listen(args.port, args.host)
# HTTPS server
if args.ssl_port:
if args.ssl_certfile and args.ssl_keyfile:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(args.ssl_certfile, args.ssl_keyfile)
else:
# use Tornado's self signed certificates
module_dir = os.path.dirname(tornado.__file__)
ssl_ctx = {
'certfile': os.path.join(module_dir, 'test', 'test.crt'),
'keyfile': os.path.join(module_dir, 'test', 'test.key'),
}
logging.info('Open https://{}:{} in a web browser.'
''.format(args.host, args.ssl_port))
tornado_app.listen(args.ssl_port, ssl_options=ssl_ctx)
try:
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
tornado.ioloop.IOLoop.current().stop()
if cov:
cov.stop()
cov.save() | Entry point to run databench. | entailment |
def run(analysis, path=None, name=None, info=None, **kwargs):
"""Run a single analysis.
:param Analysis analysis: Analysis class to run.
:param str path: Path of analysis. Can be `__file__`.
:param str name: Name of the analysis.
:param dict info: Optional entries are ``version``, ``title``,
``readme``, ...
:param dict static: Map[url regex, root-folder] to serve static content.
"""
kwargs.update({
'analysis': analysis,
'path': path,
'name': name,
'info': info,
})
main(**kwargs) | Run a single analysis.
:param Analysis analysis: Analysis class to run.
:param str path: Path of analysis. Can be `__file__`.
:param str name: Name of the analysis.
:param dict info: Optional entries are ``version``, ``title``,
``readme``, ...
:param dict static: Map[url regex, root-folder] to serve static content. | entailment |
def _stringify_predicate_value(value):
"""Convert Python objects to Space-Track compatible strings
- Booleans (``True`` -> ``'true'``)
- Sequences (``[25544, 34602]`` -> ``'25544,34602'``)
- dates/datetimes (``date(2015, 12, 23)`` -> ``'2015-12-23'``)
- ``None`` -> ``'null-val'``
"""
if isinstance(value, bool):
return str(value).lower()
elif isinstance(value, Sequence) and not isinstance(value, six.string_types):
return ','.join(_stringify_predicate_value(x) for x in value)
elif isinstance(value, datetime.datetime):
return value.isoformat(sep=' ')
elif isinstance(value, datetime.date):
return value.isoformat()
elif value is None:
return 'null-val'
else:
return str(value) | Convert Python objects to Space-Track compatible strings
- Booleans (``True`` -> ``'true'``)
- Sequences (``[25544, 34602]`` -> ``'25544,34602'``)
- dates/datetimes (``date(2015, 12, 23)`` -> ``'2015-12-23'``)
- ``None`` -> ``'null-val'`` | entailment |
def args_repr(*args, **kwargs):
"""
Returns human-readable string representation of both positional and
keyword arguments passed to the function.
This function uses the built-in :func:`repr()` function to convert
individual arguments to string.
>>> args_repr("a", (1, 2), some_keyword = list("abc"))
"'a', (1, 2), some_keyword = ['a', 'b', 'c']"
"""
items = [repr(a) for a in args]
items += ["%s = %r" % (k, v) for k, v in iter(kwargs.items())]
return ", ".join(items) | Returns human-readable string representation of both positional and
keyword arguments passed to the function.
This function uses the built-in :func:`repr()` function to convert
individual arguments to string.
>>> args_repr("a", (1, 2), some_keyword = list("abc"))
"'a', (1, 2), some_keyword = ['a', 'b', 'c']" | entailment |
def obj_repr(obj, *args, **kwargs):
"""
Returns human-readable string representation of an object given that it has
been created by calling constructor with the specified positional and
keyword arguments.
This is a convenience function to help implement custom `__repr__()`
methods. For example:
>>> class Animal(object):
... def __init__(self, hit_points, color, **kwargs):
... self.hit_points = hit_points
... self.color = color
... self.hostile = kwargs.get("hostile", False)
... def __repr__(self):
... return obj_repr(self, self.hit_points, self.color, hostile = self.hostile)
>>> dog = Animal(2.3, "purple")
>>> repr(dog)
"Animal(2.3, 'purple', hostile = False)"
"""
cls_name = type(obj).__name__
return "%s(%s)" % (cls_name, args_repr(*args, **kwargs), ) | Returns human-readable string representation of an object given that it has
been created by calling constructor with the specified positional and
keyword arguments.
This is a convenience function to help implement custom `__repr__()`
methods. For example:
>>> class Animal(object):
... def __init__(self, hit_points, color, **kwargs):
... self.hit_points = hit_points
... self.color = color
... self.hostile = kwargs.get("hostile", False)
... def __repr__(self):
... return obj_repr(self, self.hit_points, self.color, hostile = self.hostile)
>>> dog = Animal(2.3, "purple")
>>> repr(dog)
"Animal(2.3, 'purple', hostile = False)" | entailment |
def decode_filter(text, encoding='utf-8'):
"""
decode a binary object to str and filter out non-printable characters
Parameters
----------
text: bytes
the binary object to be decoded
encoding: str
the encoding to be used
Returns
-------
str
the decoded and filtered string
"""
if text is not None:
text = text.decode(encoding, errors='ignore')
printable = set(string.printable)
text = filter(lambda x: x in printable, text)
return ''.join(list(text))
else:
return None | decode a binary object to str and filter out non-printable characters
Parameters
----------
text: bytes
the binary object to be decoded
encoding: str
the encoding to be used
Returns
-------
str
the decoded and filtered string | entailment |
def dictmerge(x, y):
"""
merge two dictionaries
"""
z = x.copy()
z.update(y)
return z | merge two dictionaries | entailment |
def dissolve(inlist):
"""
list and tuple flattening
Parameters
----------
inlist: list
the list with sub-lists or tuples to be flattened
Returns
-------
list
the flattened result
Examples
--------
>>> dissolve([[1, 2], [3, 4]])
[1, 2, 3, 4]
>>> dissolve([(1, 2, (3, 4)), [5, (6, 7)]])
[1, 2, 3, 4, 5, 6, 7]
"""
out = []
for i in inlist:
i = list(i) if isinstance(i, tuple) else i
out.extend(dissolve(i)) if isinstance(i, list) else out.append(i)
return out | list and tuple flattening
Parameters
----------
inlist: list
the list with sub-lists or tuples to be flattened
Returns
-------
list
the flattened result
Examples
--------
>>> dissolve([[1, 2], [3, 4]])
[1, 2, 3, 4]
>>> dissolve([(1, 2, (3, 4)), [5, (6, 7)]])
[1, 2, 3, 4, 5, 6, 7] | entailment |
def finder(target, matchlist, foldermode=0, regex=False, recursive=True):
"""
function for finding files/folders in folders and their subdirectories
Parameters
----------
target: str or list of str
a directory, zip- or tar-archive or a list of them to be searched
matchlist: list
a list of search patterns
foldermode: int
* 0: only files
* 1: files and folders
* 2: only folders
regex: bool
are the search patterns in matchlist regular expressions or unix shell standard (default)?
recursive: bool
search target recursively into all subdirectories or only in the top level?
This is currently only implemented for parameter `target` being a directory.
Returns
-------
list of str
the absolute names of files/folders matching the patterns
"""
if foldermode not in [0, 1, 2]:
raise ValueError("'foldermode' must be either 0, 1 or 2")
# match patterns
if isinstance(target, str):
pattern = r'|'.join(matchlist if regex else [fnmatch.translate(x) for x in matchlist])
if os.path.isdir(target):
if recursive:
out = dissolve([[os.path.join(root, x)
for x in dirs + files
if re.search(pattern, x)]
for root, dirs, files in os.walk(target)])
else:
out = [os.path.join(target, x)
for x in os.listdir(target)
if re.search(pattern, x)]
if foldermode == 0:
out = [x for x in out if not os.path.isdir(x)]
if foldermode == 2:
out = [x for x in out if os.path.isdir(x)]
return sorted(out)
elif os.path.isfile(target):
if zf.is_zipfile(target):
with zf.ZipFile(target, 'r') as zip:
out = [os.path.join(target, name)
for name in zip.namelist()
if re.search(pattern, os.path.basename(name.strip('/')))]
if foldermode == 0:
out = [x for x in out if not x.endswith('/')]
elif foldermode == 1:
out = [x.strip('/') for x in out]
elif foldermode == 2:
out = [x.strip('/') for x in out if x.endswith('/')]
return sorted(out)
elif tf.is_tarfile(target):
tar = tf.open(target)
out = [name for name in tar.getnames()
if re.search(pattern, os.path.basename(name.strip('/')))]
if foldermode == 0:
out = [x for x in out if not tar.getmember(x).isdir()]
elif foldermode == 2:
out = [x for x in out if tar.getmember(x).isdir()]
tar.close()
out = [os.path.join(target, x) for x in out]
return sorted(out)
else:
raise TypeError("if parameter 'target' is a file, "
"it must be a zip or tar archive:\n {}"
.format(target))
else:
raise TypeError("if parameter 'target' is of type str, "
"it must be a directory or a file:\n {}"
.format(target))
elif isinstance(target, list):
groups = [finder(x, matchlist, foldermode, regex, recursive) for x in target]
return list(itertools.chain(*groups))
else:
raise TypeError("parameter 'target' must be of type str or list") | function for finding files/folders in folders and their subdirectories
Parameters
----------
target: str or list of str
a directory, zip- or tar-archive or a list of them to be searched
matchlist: list
a list of search patterns
foldermode: int
* 0: only files
* 1: files and folders
* 2: only folders
regex: bool
are the search patterns in matchlist regular expressions or unix shell standard (default)?
recursive: bool
search target recursively into all subdirectories or only in the top level?
This is currently only implemented for parameter `target` being a directory.
Returns
-------
list of str
the absolute names of files/folders matching the patterns | entailment |
def multicore(function, cores, multiargs, **singleargs):
"""
wrapper for multicore process execution
Parameters
----------
function
individual function to be applied to each process item
cores: int
the number of subprocesses started/CPUs used;
this value is reduced in case the number of subprocesses is smaller
multiargs: dict
a dictionary containing sub-function argument names as keys and lists of arguments to be
distributed among the processes as values
singleargs
all remaining arguments which are invariant among the subprocesses
Returns
-------
None or list
the return of the function for all subprocesses
Notes
-----
- all `multiargs` value lists must be of same length, i.e. all argument keys must be explicitly defined for each
subprocess
- all function arguments passed via `singleargs` must be provided with the full argument name and its value
(i.e. argname=argval); default function args are not accepted
- if the processes return anything else than None, this function will return a list of results
- if all processes return None, this function will be of type void
Examples
--------
>>> def add(x, y, z):
>>> return x + y + z
>>> multicore(add, cores=2, multiargs={'x': [1, 2]}, y=5, z=9)
[15, 16]
>>> multicore(add, cores=2, multiargs={'x': [1, 2], 'y': [5, 6]}, z=9)
[15, 17]
See Also
--------
:mod:`pathos.multiprocessing`
"""
tblib.pickling_support.install()
# compare the function arguments with the multi and single arguments and raise errors if mismatches occur
if sys.version_info >= (3, 0):
check = inspect.getfullargspec(function)
varkw = check.varkw
else:
check = inspect.getargspec(function)
varkw = check.keywords
if not check.varargs and not varkw:
multiargs_check = [x for x in multiargs if x not in check.args]
singleargs_check = [x for x in singleargs if x not in check.args]
if len(multiargs_check) > 0:
raise AttributeError('incompatible multi arguments: {0}'.format(', '.join(multiargs_check)))
if len(singleargs_check) > 0:
raise AttributeError('incompatible single arguments: {0}'.format(', '.join(singleargs_check)))
# compare the list lengths of the multi arguments and raise errors if they are of different length
arglengths = list(set([len(multiargs[x]) for x in multiargs]))
if len(arglengths) > 1:
raise AttributeError('multi argument lists of different length')
# prevent starting more threads than necessary
cores = cores if arglengths[0] >= cores else arglengths[0]
# create a list of dictionaries each containing the arguments for individual
# function calls to be passed to the multicore processes
processlist = [dictmerge(dict([(arg, multiargs[arg][i]) for arg in multiargs]), singleargs)
for i in range(len(multiargs[list(multiargs.keys())[0]]))]
if platform.system() == 'Windows':
# in Windows parallel processing needs to strictly be in a "if __name__ == '__main__':" wrapper
# it was thus necessary to outsource this to a different script and try to serialize all input for sharing objects
# https://stackoverflow.com/questions/38236211/why-multiprocessing-process-behave-differently-on-windows-and-linux-for-global-o
# a helper script to perform the parallel processing
script = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'multicore_helper.py')
# a temporary file to write the serialized function variables
tmpfile = os.path.join(tempfile.gettempdir(), 'spatialist_dump')
# check if everything can be serialized
if not dill.pickles([function, cores, processlist]):
raise RuntimeError('cannot fully serialize function arguments;\n'
' see https://github.com/uqfoundation/dill for supported types')
# write the serialized variables
with open(tmpfile, 'wb') as tmp:
dill.dump([function, cores, processlist], tmp, byref=False)
# run the helper script
proc = sp.Popen([sys.executable, script], stdin=sp.PIPE, stderr=sp.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise RuntimeError(err.decode())
# retrieve the serialized output of the processing which was written to the temporary file by the helper script
with open(tmpfile, 'rb') as tmp:
result = dill.load(tmp)
return result
else:
results = None
def wrapper(**kwargs):
try:
return function(**kwargs)
except Exception as e:
return ExceptionWrapper(e)
# block printing of the executed function
with HiddenPrints():
# start pool of processes and do the work
try:
pool = mp.Pool(processes=cores)
except NameError:
raise ImportError("package 'pathos' could not be imported")
results = pool.imap(lambda x: wrapper(**x), processlist)
pool.close()
pool.join()
i = 0
out = []
for item in results:
if isinstance(item, ExceptionWrapper):
item.ee = type(item.ee)(str(item.ee) +
"\n(called function '{}' with args {})"
.format(function.__name__, processlist[i]))
raise (item.re_raise())
out.append(item)
i += 1
# evaluate the return of the processing function;
# if any value is not None then the whole list of results is returned
eval = [x for x in out if x is not None]
if len(eval) == 0:
return None
else:
return out | wrapper for multicore process execution
Parameters
----------
function
individual function to be applied to each process item
cores: int
the number of subprocesses started/CPUs used;
this value is reduced in case the number of subprocesses is smaller
multiargs: dict
a dictionary containing sub-function argument names as keys and lists of arguments to be
distributed among the processes as values
singleargs
all remaining arguments which are invariant among the subprocesses
Returns
-------
None or list
the return of the function for all subprocesses
Notes
-----
- all `multiargs` value lists must be of same length, i.e. all argument keys must be explicitly defined for each
subprocess
- all function arguments passed via `singleargs` must be provided with the full argument name and its value
(i.e. argname=argval); default function args are not accepted
- if the processes return anything else than None, this function will return a list of results
- if all processes return None, this function will be of type void
Examples
--------
>>> def add(x, y, z):
>>> return x + y + z
>>> multicore(add, cores=2, multiargs={'x': [1, 2]}, y=5, z=9)
[15, 16]
>>> multicore(add, cores=2, multiargs={'x': [1, 2], 'y': [5, 6]}, z=9)
[15, 17]
See Also
--------
:mod:`pathos.multiprocessing` | entailment |
def parse_literal(x):
"""
return the smallest possible data type for a string or list of strings
Parameters
----------
x: str or list
a string to be parsed
Returns
-------
int, float or str
the parsing result
Examples
--------
>>> isinstance(parse_literal('1.5'), float)
True
>>> isinstance(parse_literal('1'), int)
True
>>> isinstance(parse_literal('foobar'), str)
True
"""
if isinstance(x, list):
return [parse_literal(y) for y in x]
elif isinstance(x, (bytes, str)):
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return x
else:
raise TypeError('input must be a string or a list of strings') | return the smallest possible data type for a string or list of strings
Parameters
----------
x: str or list
a string to be parsed
Returns
-------
int, float or str
the parsing result
Examples
--------
>>> isinstance(parse_literal('1.5'), float)
True
>>> isinstance(parse_literal('1'), int)
True
>>> isinstance(parse_literal('foobar'), str)
True | entailment |
def rescale(inlist, newrange=(0, 1)):
"""
rescale the values in a list between the values in newrange (a tuple with the new minimum and maximum)
"""
OldMax = max(inlist)
OldMin = min(inlist)
if OldMin == OldMax:
raise RuntimeError('list contains of only one unique value')
OldRange = OldMax - OldMin
NewRange = newrange[1] - newrange[0]
result = [(((float(x) - OldMin) * NewRange) / OldRange) + newrange[0] for x in inlist]
return result | rescale the values in a list between the values in newrange (a tuple with the new minimum and maximum) | entailment |
def run(cmd, outdir=None, logfile=None, inlist=None, void=True, errorpass=False, env=None):
"""
| wrapper for subprocess execution including logfile writing and command prompt piping
| this is a convenience wrapper around the :mod:`subprocess` module and calls
its class :class:`~subprocess.Popen` internally.
Parameters
----------
cmd: list
the command arguments
outdir: str
the directory to execute the command in
logfile: str
a file to write stdout to
inlist: list
a list of arguments passed to stdin, i.e. arguments passed to interactive input of the program
void: bool
return stdout and stderr?
errorpass: bool
if False, a :class:`subprocess.CalledProcessError` is raised if the command fails
env: dict or None
the environment to be passed to the subprocess
Returns
-------
None or Tuple
a tuple of (stdout, stderr) if `void` is False otherwise None
"""
cmd = [str(x) for x in dissolve(cmd)]
if outdir is None:
outdir = os.getcwd()
log = sp.PIPE if logfile is None else open(logfile, 'a')
proc = sp.Popen(cmd, stdin=sp.PIPE, stdout=log, stderr=sp.PIPE, cwd=outdir, env=env)
instream = None if inlist is None \
else ''.join([str(x) + '\n' for x in inlist]).encode('utf-8')
out, err = proc.communicate(instream)
out = decode_filter(out)
err = decode_filter(err)
if not errorpass and proc.returncode != 0:
raise sp.CalledProcessError(proc.returncode, cmd, err)
# add line for separating log entries of repeated function calls
if logfile:
log.write('#####################################################################\n')
log.close()
if not void:
return out, err | | wrapper for subprocess execution including logfile writing and command prompt piping
| this is a convenience wrapper around the :mod:`subprocess` module and calls
its class :class:`~subprocess.Popen` internally.
Parameters
----------
cmd: list
the command arguments
outdir: str
the directory to execute the command in
logfile: str
a file to write stdout to
inlist: list
a list of arguments passed to stdin, i.e. arguments passed to interactive input of the program
void: bool
return stdout and stderr?
errorpass: bool
if False, a :class:`subprocess.CalledProcessError` is raised if the command fails
env: dict or None
the environment to be passed to the subprocess
Returns
-------
None or Tuple
a tuple of (stdout, stderr) if `void` is False otherwise None | entailment |
def urlQueryParser(url, querydict):
"""
parse a url query
"""
address_parse = urlparse(url)
return urlunparse(address_parse._replace(query=urlencode(querydict))) | parse a url query | entailment |
def push(self, x):
"""
append items to the stack; input can be a single value or a list
"""
if isinstance(x, list):
for item in x:
self.stack.append(item)
else:
self.stack.append(x) | append items to the stack; input can be a single value or a list | entailment |
def pop(self):
"""
return the last stack element and delete it from the list
"""
if not self.empty():
val = self.stack[-1]
del self.stack[-1]
return val | return the last stack element and delete it from the list | entailment |
def axes(self, offset = False):
"""returns measured value in miligauss"""
reg, self._scale = self.SCALES[self._gauss]
x = self.bus.read_int16_data(self.address, self.HMC5883L_DXRA)
if x == -4096: x = OVERFLOW
y = self.bus.read_int16_data(self.address, self.HMC5883L_DYRA)
if y == -4096: y = OVERFLOW
z = self.bus.read_int16_data(self.address, self.HMC5883L_DZRA)
if z == -4096: z = OVERFLOW
x*=self._scale
y*=self._scale
z*=self._scale
if offset: (x, y, z) = self.__offset((x,y,z))
return (x, y, z) | returns measured value in miligauss | entailment |
def _ToString(x):
"""The default default formatter!."""
# Some cross-language values for primitives. This is tested in
# jsontemplate_test.py.
if x is None:
return 'null'
if isinstance(x, six.string_types):
return x
return pprint.pformat(x) | The default default formatter!. | entailment |
def _Pairs(data):
"""dictionary -> list of pairs"""
keys = sorted(data)
return [{'@key': k, '@value': data[k]} for k in keys] | dictionary -> list of pairs | entailment |
def _Pluralize(value, unused_context, args):
"""Formatter to pluralize words."""
if len(args) == 0:
s, p = '', 's'
elif len(args) == 1:
s, p = '', args[0]
elif len(args) == 2:
s, p = args
else:
# Should have been checked at compile time
raise AssertionError
if value > 1:
return p
else:
return s | Formatter to pluralize words. | entailment |
def _StrftimeGm(value, unused_context, args):
"""Convert a timestamp in seconds to a string based on the format string.
Returns GM time.
"""
time_tuple = time.gmtime(value)
return _StrftimeHelper(args, time_tuple) | Convert a timestamp in seconds to a string based on the format string.
Returns GM time. | entailment |
def _StrftimeLocal(value, unused_context, args):
"""Convert a timestamp in seconds to a string based on the format string.
Returns local time.
"""
time_tuple = time.localtime(value)
return _StrftimeHelper(args, time_tuple) | Convert a timestamp in seconds to a string based on the format string.
Returns local time. | entailment |
def _TemplateExists(unused_value, context, args):
"""Returns whether the given name is in the current Template's template group."""
try:
name = args[0]
except IndexError:
raise EvaluationError('The "template" predicate requires an argument.')
return context.HasTemplate(name) | Returns whether the given name is in the current Template's template group. | entailment |
def SplitMeta(meta):
"""Split and validate metacharacters.
Example: '{}' -> ('{', '}')
This is public so the syntax highlighter and other tools can use it.
"""
n = len(meta)
if n % 2 == 1:
raise ConfigurationError(
'%r has an odd number of metacharacters' % meta)
return meta[:n // 2], meta[n // 2:] | Split and validate metacharacters.
Example: '{}' -> ('{', '}')
This is public so the syntax highlighter and other tools can use it. | entailment |
def MakeTokenRegex(meta_left, meta_right):
"""Return a (compiled) regular expression for tokenization.
Args:
meta_left, meta_right: e.g. '{' and '}'
- The regular expressions are memoized.
- This function is public so the syntax highlighter can use it.
"""
key = meta_left, meta_right
if key not in _token_re_cache:
# - Need () grouping for re.split
# - The first character must be a non-space. This allows us to ignore
# literals like function() { return 1; } when
# - There must be at least one (non-space) character inside {}
_token_re_cache[key] = re.compile(
r'(' +
re.escape(meta_left) +
r'\S.*?' +
re.escape(meta_right) +
r')')
return _token_re_cache[key] | Return a (compiled) regular expression for tokenization.
Args:
meta_left, meta_right: e.g. '{' and '}'
- The regular expressions are memoized.
- This function is public so the syntax highlighter can use it. | entailment |
def _MatchDirective(token):
"""Helper function for matching certain directives."""
# Tokens below must start with '.'
if token.startswith('.'):
token = token[1:]
else:
return None, None
if token == 'end':
return END_TOKEN, None
if token == 'alternates with':
return ALTERNATES_TOKEN, token
if token.startswith('or'):
if token.strip() == 'or':
return OR_TOKEN, None
else:
pred_str = token[2:].strip()
return OR_TOKEN, pred_str
match = _SECTION_RE.match(token)
if match:
repeated, section_name = match.groups()
if repeated:
return REPEATED_SECTION_TOKEN, section_name
else:
return SECTION_TOKEN, section_name
if token.startswith('template '):
return SUBST_TEMPLATE_TOKEN, token[9:].strip()
if token.startswith('define '):
return DEF_TOKEN, token[7:].strip()
if token.startswith('if '):
return IF_TOKEN, token[3:].strip()
if token.endswith('?'):
return PREDICATE_TOKEN, token
return None, None | Helper function for matching certain directives. | entailment |
def _Tokenize(template_str, meta_left, meta_right, whitespace):
"""Yields tokens, which are 2-tuples (TOKEN_TYPE, token_string)."""
trimlen = len(meta_left)
token_re = MakeTokenRegex(meta_left, meta_right)
do_strip = (whitespace == 'strip-line') # Do this outside loop
do_strip_part = False
for line in template_str.splitlines(True): # retain newlines
if do_strip or do_strip_part:
line = line.strip()
tokens = token_re.split(line)
# Check for a special case first. If a comment or "block" directive is on a
# line by itself (with only space surrounding it), then the space is
# omitted. For simplicity, we don't handle the case where we have 2
# directives, say '{.end} # {#comment}' on a line.
if len(tokens) == 3:
# ''.isspace() == False, so work around that
if (tokens[0].isspace() or not tokens[0]) and \
(tokens[2].isspace() or not tokens[2]):
token = tokens[1][trimlen: -trimlen]
# Check the ones that begin with ## before #
if token == COMMENT_BEGIN:
yield COMMENT_BEGIN_TOKEN, None
continue
if token == COMMENT_END:
yield COMMENT_END_TOKEN, None
continue
if token == OPTION_STRIP_LINE:
do_strip_part = True
continue
if token == OPTION_END:
do_strip_part = False
continue
if token.startswith('#'):
continue # The whole line is omitted
token_type, token = _MatchDirective(token)
if token_type is not None:
yield token_type, token # Only yield the token, not space
continue
# The line isn't special; process it normally.
for i, token in enumerate(tokens):
if i % 2 == 0:
yield LITERAL_TOKEN, token
else: # It's a "directive" in metachracters
assert token.startswith(meta_left), repr(token)
assert token.endswith(meta_right), repr(token)
token = token[trimlen: -trimlen]
# Check the ones that begin with ## before #
if token == COMMENT_BEGIN:
yield COMMENT_BEGIN_TOKEN, None
continue
if token == COMMENT_END:
yield COMMENT_END_TOKEN, None
continue
if token == OPTION_STRIP_LINE:
do_strip_part = True
continue
if token == OPTION_END:
do_strip_part = False
continue
# A single-line comment
if token.startswith('#'):
continue
if token.startswith('.'):
literal = {
'.meta-left': meta_left,
'.meta-right': meta_right,
'.space': ' ',
'.tab': '\t',
'.newline': '\n',
}.get(token)
if literal is not None:
yield META_LITERAL_TOKEN, literal
continue
token_type, token = _MatchDirective(token)
if token_type is not None:
yield token_type, token
else: # Now we know the directive is a substitution.
yield SUBST_TOKEN, token | Yields tokens, which are 2-tuples (TOKEN_TYPE, token_string). | entailment |
def _CompileTemplate(
template_str, builder, meta='{}', format_char='|', default_formatter='str',
whitespace='smart'):
"""Compile the template string, calling methods on the 'program builder'.
Args:
template_str: The template string. It should not have any compilation
options in the header -- those are parsed by FromString/FromFile
builder: The interface of _ProgramBuilder isn't fixed. Use at your own
risk.
meta: The metacharacters to use, e.g. '{}', '[]'.
default_formatter: The formatter to use for substitutions that are missing a
formatter. The 'str' formatter the "default default" -- it just tries
to convert the context value to a string in some unspecified manner.
whitespace: 'smart' or 'strip-line'. In smart mode, if a directive is alone
on a line, with only whitespace on either side, then the whitespace is
removed. In 'strip-line' mode, every line is stripped of its
leading and trailing whitespace.
Returns:
The compiled program (obtained from the builder)
Raises:
The various subclasses of CompilationError. For example, if
default_formatter=None, and a variable is missing a formatter, then
MissingFormatter is raised.
This function is public so it can be used by other tools, e.g. a syntax
checking tool run before submitting a template to source control.
"""
meta_left, meta_right = SplitMeta(meta)
# : is meant to look like Python 3000 formatting {foo:.3f}. According to
# PEP 3101, that's also what .NET uses.
# | is more readable, but, more importantly, reminiscent of pipes, which is
# useful for multiple formatters, e.g. {name|js-string|html}
if format_char not in (':', '|'):
raise ConfigurationError(
'Only format characters : and | are accepted (got %r)' % format_char)
if whitespace not in ('smart', 'strip-line'):
raise ConfigurationError('Invalid whitespace mode %r' % whitespace)
# If we go to -1, then we got too many {end}. If end at 1, then we're missing
# an {end}.
balance_counter = 0
comment_counter = 0 # ditto for ##BEGIN/##END
has_defines = False
for token_type, token in _Tokenize(template_str, meta_left, meta_right,
whitespace):
if token_type == COMMENT_BEGIN_TOKEN:
comment_counter += 1
continue
if token_type == COMMENT_END_TOKEN:
comment_counter -= 1
if comment_counter < 0:
raise CompilationError('Got too many ##END markers')
continue
# Don't process any tokens
if comment_counter > 0:
continue
if token_type in (LITERAL_TOKEN, META_LITERAL_TOKEN):
if token:
builder.Append(token)
continue
if token_type in (SECTION_TOKEN, REPEATED_SECTION_TOKEN, DEF_TOKEN):
parts = [p.strip() for p in token.split(format_char)]
if len(parts) == 1:
name = parts[0]
formatters = []
else:
name = parts[0]
formatters = parts[1:]
builder.NewSection(token_type, name, formatters)
balance_counter += 1
if token_type == DEF_TOKEN:
has_defines = True
continue
if token_type == PREDICATE_TOKEN:
# {.attr?} lookups
builder.NewPredicateSection(token, test_attr=True)
balance_counter += 1
continue
if token_type == IF_TOKEN:
builder.NewPredicateSection(token, test_attr=False)
balance_counter += 1
continue
if token_type == OR_TOKEN:
builder.NewOrClause(token)
continue
if token_type == ALTERNATES_TOKEN:
builder.AlternatesWith()
continue
if token_type == END_TOKEN:
balance_counter -= 1
if balance_counter < 0:
# TODO: Show some context for errors
raise TemplateSyntaxError(
'Got too many %send%s statements. You may have mistyped an '
"earlier 'section' or 'repeated section' directive."
% (meta_left, meta_right))
builder.EndSection()
continue
if token_type == SUBST_TOKEN:
parts = [p.strip() for p in token.split(format_char)]
if len(parts) == 1:
if default_formatter is None:
raise MissingFormatter('This template requires explicit formatters.')
# If no formatter is specified, the default is the 'str' formatter,
# which the user can define however they desire.
name = token
formatters = [default_formatter]
else:
name = parts[0]
formatters = parts[1:]
builder.AppendSubstitution(name, formatters)
continue
if token_type == SUBST_TEMPLATE_TOKEN:
# no formatters
builder.AppendTemplateSubstitution(token)
continue
if balance_counter != 0:
raise TemplateSyntaxError('Got too few %send%s statements' %
(meta_left, meta_right))
if comment_counter != 0:
raise CompilationError('Got %d more {##BEGIN}s than {##END}s' % comment_counter)
return builder.Root(), has_defines | Compile the template string, calling methods on the 'program builder'.
Args:
template_str: The template string. It should not have any compilation
options in the header -- those are parsed by FromString/FromFile
builder: The interface of _ProgramBuilder isn't fixed. Use at your own
risk.
meta: The metacharacters to use, e.g. '{}', '[]'.
default_formatter: The formatter to use for substitutions that are missing a
formatter. The 'str' formatter the "default default" -- it just tries
to convert the context value to a string in some unspecified manner.
whitespace: 'smart' or 'strip-line'. In smart mode, if a directive is alone
on a line, with only whitespace on either side, then the whitespace is
removed. In 'strip-line' mode, every line is stripped of its
leading and trailing whitespace.
Returns:
The compiled program (obtained from the builder)
Raises:
The various subclasses of CompilationError. For example, if
default_formatter=None, and a variable is missing a formatter, then
MissingFormatter is raised.
This function is public so it can be used by other tools, e.g. a syntax
checking tool run before submitting a template to source control. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.