sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def to_nuniq_interval_set(cls, nested_is):
"""
Convert an IntervalSet using the NESTED numbering scheme to an IntervalSet containing UNIQ numbers for HEALPix
cells.
Parameters
----------
nested_is : `IntervalSet`
IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals.
Returns
-------
interval : `IntervalSet`
IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals.
"""
r2 = nested_is.copy()
res = []
if r2.empty():
return IntervalSet()
order = 0
while not r2.empty():
shift = int(2 * (IntervalSet.HPY_MAX_ORDER - order))
ofs = (int(1) << shift) - 1
ofs2 = int(1) << (2 * order + 2)
r4 = []
for iv in r2._intervals:
a = (int(iv[0]) + ofs) >> shift
b = int(iv[1]) >> shift
c = a << shift
d = b << shift
if d > c:
r4.append((c, d))
res.append((a + ofs2, b + ofs2))
if len(r4) > 0:
r4_is = IntervalSet(np.asarray(r4))
r2 = r2.difference(r4_is)
order += 1
return IntervalSet(np.asarray(res))
|
Convert an IntervalSet using the NESTED numbering scheme to an IntervalSet containing UNIQ numbers for HEALPix
cells.
Parameters
----------
nested_is : `IntervalSet`
IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals.
Returns
-------
interval : `IntervalSet`
IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals.
|
entailment
|
def from_nuniq_interval_set(cls, nuniq_is):
"""
Convert an IntervalSet containing NUNIQ intervals to an IntervalSet representing HEALPix
cells following the NESTED numbering scheme.
Parameters
----------
nuniq_is : `IntervalSet`
IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals.
Returns
-------
interval : `IntervalSet`
IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals.
"""
nested_is = IntervalSet()
# Appending a list is faster than appending a numpy array
# For these algorithms we append a list and create the interval set from the finished list
rtmp = []
last_order = 0
intervals = nuniq_is._intervals
diff_order = IntervalSet.HPY_MAX_ORDER
shift_order = 2 * diff_order
for interval in intervals:
for j in range(interval[0], interval[1]):
order, i_pix = uniq2orderipix(j)
if order != last_order:
nested_is = nested_is.union(IntervalSet(np.asarray(rtmp)))
rtmp = []
last_order = order
diff_order = IntervalSet.HPY_MAX_ORDER - order
shift_order = 2 * diff_order
rtmp.append((i_pix << shift_order, (i_pix + 1) << shift_order))
nested_is = nested_is.union(IntervalSet(np.asarray(rtmp)))
return nested_is
|
Convert an IntervalSet containing NUNIQ intervals to an IntervalSet representing HEALPix
cells following the NESTED numbering scheme.
Parameters
----------
nuniq_is : `IntervalSet`
IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals.
Returns
-------
interval : `IntervalSet`
IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals.
|
entailment
|
def merge(a_intervals, b_intervals, op):
"""
Merge two lists of intervals according to the boolean function op
``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals).
This operation keeps the resulting interval set consistent.
Parameters
----------
a_intervals : `~numpy.ndarray`
A sorted merged list of intervals represented as a N x 2 numpy array
b_intervals : `~numpy.ndarray`
A sorted merged list of intervals represented as a N x 2 numpy array
op : `function`
Lambda function taking two params and returning the result of the operation between
these two params.
Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and
``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and
``b_intervals``.
Returns
-------
array : `numpy.ndarray`
a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``.
"""
a_endpoints = a_intervals.flatten().tolist()
b_endpoints = b_intervals.flatten().tolist()
sentinel = max(a_endpoints[-1], b_endpoints[-1]) + 1
a_endpoints += [sentinel]
b_endpoints += [sentinel]
a_index = 0
b_index = 0
res = []
scan = min(a_endpoints[0], b_endpoints[0])
while scan < sentinel:
in_a = not ((scan < a_endpoints[a_index]) ^ (a_index % 2))
in_b = not ((scan < b_endpoints[b_index]) ^ (b_index % 2))
in_res = op(in_a, in_b)
if in_res ^ (len(res) % 2):
res += [scan]
if scan == a_endpoints[a_index]:
a_index += 1
if scan == b_endpoints[b_index]:
b_index += 1
scan = min(a_endpoints[a_index], b_endpoints[b_index])
return np.asarray(res).reshape((-1, 2))
|
Merge two lists of intervals according to the boolean function op
``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals).
This operation keeps the resulting interval set consistent.
Parameters
----------
a_intervals : `~numpy.ndarray`
A sorted merged list of intervals represented as a N x 2 numpy array
b_intervals : `~numpy.ndarray`
A sorted merged list of intervals represented as a N x 2 numpy array
op : `function`
Lambda function taking two params and returning the result of the operation between
these two params.
Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and
``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and
``b_intervals``.
Returns
-------
array : `numpy.ndarray`
a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``.
|
entailment
|
def delete(self):
""" Delete the object from GPU memory.
Note that the GPU object will also be deleted when this gloo
object is about to be deleted. However, sometimes you want to
explicitly delete the GPU object explicitly.
"""
# We only allow the object from being deleted once, otherwise
# we might be deleting another GPU object that got our gl-id
# after our GPU object was deleted. Also note that e.g.
# DataBufferView does not have the _glir attribute.
if hasattr(self, '_glir'):
# Send our final command into the queue
self._glir.command('DELETE', self._id)
# Tell master glir queue that this queue is no longer being used
self._glir._deletable = True
# Detach the queue
del self._glir
|
Delete the object from GPU memory.
Note that the GPU object will also be deleted when this gloo
object is about to be deleted. However, sometimes you want to
explicitly delete the GPU object explicitly.
|
entailment
|
def set_data(self, image):
"""Set the data
Parameters
----------
image : array-like
The image data.
"""
data = np.asarray(image)
if self._data is None or self._data.shape != data.shape:
self._need_vertex_update = True
self._data = data
self._need_texture_upload = True
|
Set the data
Parameters
----------
image : array-like
The image data.
|
entailment
|
def _build_interpolation(self):
"""Rebuild the _data_lookup_fn using different interpolations within
the shader
"""
interpolation = self._interpolation
self._data_lookup_fn = self._interpolation_fun[interpolation]
self.shared_program.frag['get_data'] = self._data_lookup_fn
# only 'bilinear' uses 'linear' texture interpolation
if interpolation == 'bilinear':
texture_interpolation = 'linear'
else:
# 'nearest' (and also 'bilinear') doesn't use spatial_filters.frag
# so u_kernel and shape setting is skipped
texture_interpolation = 'nearest'
if interpolation != 'nearest':
self.shared_program['u_kernel'] = self._kerneltex
self._data_lookup_fn['shape'] = self._data.shape[:2][::-1]
if self._texture.interpolation != texture_interpolation:
self._texture.interpolation = texture_interpolation
self._data_lookup_fn['texture'] = self._texture
self._need_interpolation_update = False
|
Rebuild the _data_lookup_fn using different interpolations within
the shader
|
entailment
|
def _build_vertex_data(self):
"""Rebuild the vertex buffers used for rendering the image when using
the subdivide method.
"""
grid = self._grid
w = 1.0 / grid[1]
h = 1.0 / grid[0]
quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0],
[0, 0, 0], [w, h, 0], [0, h, 0]],
dtype=np.float32)
quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32)
quads[:] = quad
mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0)
mgrid = mgrid[:, :, np.newaxis, :]
mgrid[..., 0] *= w
mgrid[..., 1] *= h
quads[..., :2] += mgrid
tex_coords = quads.reshape(grid[1]*grid[0]*6, 3)
tex_coords = np.ascontiguousarray(tex_coords[:, :2])
vertices = tex_coords * self.size
self._subdiv_position.set_data(vertices.astype('float32'))
self._subdiv_texcoord.set_data(tex_coords.astype('float32'))
|
Rebuild the vertex buffers used for rendering the image when using
the subdivide method.
|
entailment
|
def _update_method(self, view):
"""Decide which method to use for *view* and configure it accordingly.
"""
method = self._method
if method == 'auto':
if view.transforms.get_transform().Linear:
method = 'subdivide'
else:
method = 'impostor'
view._method_used = method
if method == 'subdivide':
view.view_program['method'] = 0
view.view_program['a_position'] = self._subdiv_position
view.view_program['a_texcoord'] = self._subdiv_texcoord
elif method == 'impostor':
view.view_program['method'] = 1
view.view_program['a_position'] = self._impostor_coords
view.view_program['a_texcoord'] = self._impostor_coords
else:
raise ValueError("Unknown image draw method '%s'" % method)
self.shared_program['image_size'] = self.size
view._need_method_update = False
self._prepare_transforms(view)
|
Decide which method to use for *view* and configure it accordingly.
|
entailment
|
def append(self, P, closed=False, itemsize=None, **kwargs):
"""
Append a new set of vertices to the collection.
For kwargs argument, n is the number of vertices (local) or the number
of item (shared)
Parameters
----------
P : np.array
Vertices positions of the path(s) to be added
closed: bool
Whether path(s) is/are closed
itemsize: int or None
Size of an individual path
caps : list, array or 2-tuple
Path start /end cap
color : list, array or 4-tuple
Path color
linewidth : list, array or float
Path linewidth
antialias : list, array or float
Path antialias area
"""
itemsize = itemsize or len(P)
itemcount = len(P) / itemsize
P = P.reshape(itemcount, itemsize, 3)
if closed:
V = np.empty((itemcount, itemsize + 3), dtype=self.vtype)
# Apply default values on vertices
for name in self.vtype.names:
if name not in ['collection_index', 'prev', 'curr', 'next']:
V[name][1:-2] = kwargs.get(name, self._defaults[name])
V['prev'][:, 2:-1] = P
V['prev'][:, 1] = V['prev'][:, -2]
V['curr'][:, 1:-2] = P
V['curr'][:, -2] = V['curr'][:, 1]
V['next'][:, 0:-3] = P
V['next'][:, -3] = V['next'][:, 0]
V['next'][:, -2] = V['next'][:, 1]
else:
V = np.empty((itemcount, itemsize + 2), dtype=self.vtype)
# Apply default values on vertices
for name in self.vtype.names:
if name not in ['collection_index', 'prev', 'curr', 'next']:
V[name][1:-1] = kwargs.get(name, self._defaults[name])
V['prev'][:, 2:] = P
V['prev'][:, 1] = V['prev'][:, 2]
V['curr'][:, 1:-1] = P
V['next'][:, :-2] = P
V['next'][:, -2] = V['next'][:, -3]
V[:, 0] = V[:, 1]
V[:, -1] = V[:, -2]
V = V.ravel()
V = np.repeat(V, 2, axis=0)
V['id'] = np.tile([1, -1], len(V) / 2)
if closed:
V = V.reshape(itemcount, 2 * (itemsize + 3))
else:
V = V.reshape(itemcount, 2 * (itemsize + 2))
V["id"][:, :2] = 2, -2
V["id"][:, -2:] = 2, -2
V = V.ravel()
# Uniforms
if self.utype:
U = np.zeros(itemcount, dtype=self.utype)
for name in self.utype.names:
if name not in ["__unused__"]:
U[name] = kwargs.get(name, self._defaults[name])
else:
U = None
Collection.append(self, vertices=V, uniforms=U,
itemsize=2 * (itemsize + 2 + closed))
|
Append a new set of vertices to the collection.
For kwargs argument, n is the number of vertices (local) or the number
of item (shared)
Parameters
----------
P : np.array
Vertices positions of the path(s) to be added
closed: bool
Whether path(s) is/are closed
itemsize: int or None
Size of an individual path
caps : list, array or 2-tuple
Path start /end cap
color : list, array or 4-tuple
Path color
linewidth : list, array or float
Path linewidth
antialias : list, array or float
Path antialias area
|
entailment
|
def bake(self, P, key='curr', closed=False, itemsize=None):
"""
Given a path P, return the baked vertices as they should be copied in
the collection if the path has already been appended.
Example:
--------
paths.append(P)
P *= 2
paths['prev'][0] = bake(P,'prev')
paths['curr'][0] = bake(P,'curr')
paths['next'][0] = bake(P,'next')
"""
itemsize = itemsize or len(P)
itemcount = len(P) / itemsize # noqa
n = itemsize
if closed:
I = np.arange(n + 3)
if key == 'prev':
I -= 2
I[0], I[1], I[-1] = n - 1, n - 1, n - 1
elif key == 'next':
I[0], I[-3], I[-2], I[-1] = 1, 0, 1, 1
else:
I -= 1
I[0], I[-1], I[n + 1] = 0, 0, 0
else:
I = np.arange(n + 2)
if key == 'prev':
I -= 2
I[0], I[1], I[-1] = 0, 0, n - 2
elif key == 'next':
I[0], I[-1], I[-2] = 1, n - 1, n - 1
else:
I -= 1
I[0], I[-1] = 0, n - 1
I = np.repeat(I, 2)
return P[I]
|
Given a path P, return the baked vertices as they should be copied in
the collection if the path has already been appended.
Example:
--------
paths.append(P)
P *= 2
paths['prev'][0] = bake(P,'prev')
paths['curr'][0] = bake(P,'curr')
paths['next'][0] = bake(P,'next')
|
entailment
|
def draw(self, mode="triangle_strip"):
""" Draw collection """
gl.glDepthMask(gl.GL_FALSE)
Collection.draw(self, mode)
gl.glDepthMask(gl.GL_TRUE)
|
Draw collection
|
entailment
|
def _stop_timers(canvas):
"""Stop all timers in a canvas."""
for attr in dir(canvas):
try:
attr_obj = getattr(canvas, attr)
except NotImplementedError:
# This try/except is needed because canvas.position raises
# an error (it is not implemented in this backend).
attr_obj = None
if isinstance(attr_obj, Timer):
attr_obj.stop()
|
Stop all timers in a canvas.
|
entailment
|
def _last_stack_str():
"""Print stack trace from call that didn't originate from here"""
stack = extract_stack()
for s in stack[::-1]:
if op.join('vispy', 'gloo', 'buffer.py') not in __file__:
break
return format_list([s])[0]
|
Print stack trace from call that didn't originate from here
|
entailment
|
def set_subdata(self, data, offset=0, copy=False):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if offset < 0:
raise ValueError("Offset must be positive")
elif (offset + nbytes) > self._nbytes:
raise ValueError("Data does not fit into buffer")
# If the whole buffer is to be written, we clear any pending data
# (because they will be overwritten anyway)
if nbytes == self._nbytes and offset == 0:
self._glir.command('SIZE', self._id, nbytes)
self._glir.command('DATA', self._id, offset, data)
|
Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
|
entailment
|
def set_data(self, data, copy=False):
""" Set data in the buffer (deferred operation).
This completely resets the size and contents of the buffer.
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if nbytes != self._nbytes:
self.resize_bytes(nbytes)
else:
# Use SIZE to discard any previous data setting
self._glir.command('SIZE', self._id, nbytes)
if nbytes: # Only set data if there *is* data
self._glir.command('DATA', self._id, 0, data)
|
Set data in the buffer (deferred operation).
This completely resets the size and contents of the buffer.
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
|
entailment
|
def resize_bytes(self, size):
""" Resize this buffer (deferred operation).
Parameters
----------
size : int
New buffer size in bytes.
"""
self._nbytes = size
self._glir.command('SIZE', self._id, size)
# Invalidate any view on this buffer
for view in self._views:
if view() is not None:
view()._valid = False
self._views = []
|
Resize this buffer (deferred operation).
Parameters
----------
size : int
New buffer size in bytes.
|
entailment
|
def set_subdata(self, data, offset=0, copy=False, **kwargs):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional keyword arguments.
"""
data = self._prepare_data(data, **kwargs)
offset = offset * self.itemsize
Buffer.set_subdata(self, data=data, offset=offset, copy=copy)
|
Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional keyword arguments.
|
entailment
|
def set_data(self, data, copy=False, **kwargs):
""" Set data (deferred operation)
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional arguments.
"""
data = self._prepare_data(data, **kwargs)
self._dtype = data.dtype
self._stride = data.strides[-1]
self._itemsize = self._dtype.itemsize
Buffer.set_data(self, data=data, copy=copy)
|
Set data (deferred operation)
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional arguments.
|
entailment
|
def glsl_type(self):
""" GLSL declaration strings required for a variable to hold this data.
"""
if self.dtype is None:
return None
dtshape = self.dtype[0].shape
n = dtshape[0] if dtshape else 1
if n > 1:
dtype = 'vec%d' % n
else:
dtype = 'float' if 'f' in self.dtype[0].base.kind else 'int'
return 'attribute', dtype
|
GLSL declaration strings required for a variable to hold this data.
|
entailment
|
def resize_bytes(self, size):
""" Resize the buffer (in-place, deferred operation)
Parameters
----------
size : integer
New buffer size in bytes
Notes
-----
This clears any pending operations.
"""
Buffer.resize_bytes(self, size)
self._size = size // self.itemsize
|
Resize the buffer (in-place, deferred operation)
Parameters
----------
size : integer
New buffer size in bytes
Notes
-----
This clears any pending operations.
|
entailment
|
def compile(self, pretty=True):
""" Compile all code and return a dict {name: code} where the keys
are determined by the keyword arguments passed to __init__().
Parameters
----------
pretty : bool
If True, use a slower method to mangle object names. This produces
GLSL that is more readable.
If False, then the output is mostly unreadable GLSL, but is about
10x faster to compile.
"""
# Authoritative mapping of {obj: name}
self._object_names = {}
#
# 1. collect list of dependencies for each shader
#
# maps {shader_name: [deps]}
self._shader_deps = {}
for shader_name, shader in self.shaders.items():
this_shader_deps = []
self._shader_deps[shader_name] = this_shader_deps
dep_set = set()
for dep in shader.dependencies(sort=True):
# visit each object no more than once per shader
if dep.name is None or dep in dep_set:
continue
this_shader_deps.append(dep)
dep_set.add(dep)
#
# 2. Assign names to all objects.
#
if pretty:
self._rename_objects_pretty()
else:
self._rename_objects_fast()
#
# 3. Now we have a complete namespace; concatenate all definitions
# together in topological order.
#
compiled = {}
obj_names = self._object_names
for shader_name, shader in self.shaders.items():
code = []
for dep in self._shader_deps[shader_name]:
dep_code = dep.definition(obj_names)
if dep_code is not None:
# strip out version pragma if present;
regex = r'#version (\d+)'
m = re.search(regex, dep_code)
if m is not None:
# check requested version
if m.group(1) != '120':
raise RuntimeError("Currently only GLSL #version "
"120 is supported.")
dep_code = re.sub(regex, '', dep_code)
code.append(dep_code)
compiled[shader_name] = '\n'.join(code)
self.code = compiled
return compiled
|
Compile all code and return a dict {name: code} where the keys
are determined by the keyword arguments passed to __init__().
Parameters
----------
pretty : bool
If True, use a slower method to mangle object names. This produces
GLSL that is more readable.
If False, then the output is mostly unreadable GLSL, but is about
10x faster to compile.
|
entailment
|
def _rename_objects_fast(self):
""" Rename all objects quickly to guaranteed-unique names using the
id() of each object.
This produces mostly unreadable GLSL, but is about 10x faster to
compile.
"""
for shader_name, deps in self._shader_deps.items():
for dep in deps:
name = dep.name
if name != 'main':
ext = '_%x' % id(dep)
name = name[:32-len(ext)] + ext
self._object_names[dep] = name
|
Rename all objects quickly to guaranteed-unique names using the
id() of each object.
This produces mostly unreadable GLSL, but is about 10x faster to
compile.
|
entailment
|
def _rename_objects_pretty(self):
""" Rename all objects like "name_1" to avoid conflicts. Objects are
only renamed if necessary.
This method produces more readable GLSL, but is rather slow.
"""
#
# 1. For each object, add its static names to the global namespace
# and make a list of the shaders used by the object.
#
# {name: obj} mapping for finding unique names
# initialize with reserved keywords.
self._global_ns = dict([(kwd, None) for kwd in gloo.util.KEYWORDS])
# functions are local per-shader
self._shader_ns = dict([(shader, {}) for shader in self.shaders])
# for each object, keep a list of shaders the object appears in
obj_shaders = {}
for shader_name, deps in self._shader_deps.items():
for dep in deps:
# Add static names to namespace
for name in dep.static_names():
self._global_ns[name] = None
obj_shaders.setdefault(dep, []).append(shader_name)
#
# 2. Assign new object names
#
name_index = {}
for obj, shaders in obj_shaders.items():
name = obj.name
if self._name_available(obj, name, shaders):
# hooray, we get to keep this name
self._assign_name(obj, name, shaders)
else:
# boo, find a new name
while True:
index = name_index.get(name, 0) + 1
name_index[name] = index
ext = '_%d' % index
new_name = name[:32-len(ext)] + ext
if self._name_available(obj, new_name, shaders):
self._assign_name(obj, new_name, shaders)
break
|
Rename all objects like "name_1" to avoid conflicts. Objects are
only renamed if necessary.
This method produces more readable GLSL, but is rather slow.
|
entailment
|
def _name_available(self, obj, name, shaders):
""" Return True if *name* is available for *obj* in *shaders*.
"""
if name in self._global_ns:
return False
shaders = self.shaders if self._is_global(obj) else shaders
for shader in shaders:
if name in self._shader_ns[shader]:
return False
return True
|
Return True if *name* is available for *obj* in *shaders*.
|
entailment
|
def _assign_name(self, obj, name, shaders):
""" Assign *name* to *obj* in *shaders*.
"""
if self._is_global(obj):
assert name not in self._global_ns
self._global_ns[name] = obj
else:
for shader in shaders:
ns = self._shader_ns[shader]
assert name not in ns
ns[name] = obj
self._object_names[obj] = name
|
Assign *name* to *obj* in *shaders*.
|
entailment
|
def _update(self):
"""Rebuilds the shaders, and repositions the objects
that are used internally by the ColorBarVisual
"""
x, y = self._pos
halfw, halfh = self._halfdim
# test that width and height are non-zero
if halfw <= 0:
raise ValueError("half-width must be positive and non-zero"
", not %s" % halfw)
if halfh <= 0:
raise ValueError("half-height must be positive and non-zero"
", not %s" % halfh)
# test that the given width and height is consistent
# with the orientation
if (self._orientation == "bottom" or self._orientation == "top"):
if halfw < halfh:
raise ValueError("half-width(%s) < half-height(%s) for"
"%s orientation,"
" expected half-width >= half-height" %
(halfw, halfh, self._orientation, ))
else: # orientation == left or orientation == right
if halfw > halfh:
raise ValueError("half-width(%s) > half-height(%s) for"
"%s orientation,"
" expected half-width <= half-height" %
(halfw, halfh, self._orientation, ))
# Set up the attributes that the shaders require
vertices = np.array([[x - halfw, y - halfh],
[x + halfw, y - halfh],
[x + halfw, y + halfh],
# tri 2
[x - halfw, y - halfh],
[x + halfw, y + halfh],
[x - halfw, y + halfh]],
dtype=np.float32)
self.shared_program['a_position'] = vertices
|
Rebuilds the shaders, and repositions the objects
that are used internally by the ColorBarVisual
|
entailment
|
def _update(self):
"""Rebuilds the shaders, and repositions the objects
that are used internally by the ColorBarVisual
"""
self._colorbar.halfdim = self._halfdim
self._border.halfdim = self._halfdim
self._label.text = self._label_str
self._ticks[0].text = str(self._clim[0])
self._ticks[1].text = str(self._clim[1])
self._update_positions()
self._colorbar._update()
self._border._update()
|
Rebuilds the shaders, and repositions the objects
that are used internally by the ColorBarVisual
|
entailment
|
def _update_positions(self):
"""
updates the positions of the colorbars and labels
"""
self._colorbar.pos = self._pos
self._border.pos = self._pos
if self._orientation == "right" or self._orientation == "left":
self._label.rotation = -90
x, y = self._pos
halfw, halfh = self._halfdim
label_anchors = \
ColorBarVisual._get_label_anchors(center=self._pos,
halfdim=self._halfdim,
orientation=self._orientation,
transforms=self.label.transforms)
self._label.anchors = label_anchors
ticks_anchors = \
ColorBarVisual._get_ticks_anchors(center=self._pos,
halfdim=self._halfdim,
orientation=self._orientation,
transforms=self.label.transforms)
self._ticks[0].anchors = ticks_anchors
self._ticks[1].anchors = ticks_anchors
(label_pos, ticks_pos) = \
ColorBarVisual._calc_positions(center=self._pos,
halfdim=self._halfdim,
border_width=self.border_width,
orientation=self._orientation,
transforms=self.transforms)
self._label.pos = label_pos
self._ticks[0].pos = ticks_pos[0]
self._ticks[1].pos = ticks_pos[1]
|
updates the positions of the colorbars and labels
|
entailment
|
def _calc_positions(center, halfdim, border_width,
orientation, transforms):
"""
Calculate the text centeritions given the ColorBar
parameters.
Note
----
This is static because in principle, this
function does not need access to the state of the ColorBar
at all. It's a computation function that computes coordinate
transforms
Parameters
----------
center: tuple (x, y)
Center of the ColorBar
halfdim: tuple (halfw, halfh)
Half of the dimensions measured from the center
border_width: float
Width of the border of the ColorBar
orientation: "top" | "bottom" | "left" | "right"
Position of the label with respect to the ColorBar
transforms: TransformSystem
the transforms of the ColorBar
"""
(x, y) = center
(halfw, halfh) = halfdim
visual_to_doc = transforms.get_transform('visual', 'document')
doc_to_visual = transforms.get_transform('document', 'visual')
# doc_widths = visual_to_doc.map(np.array([halfw, halfh, 0, 0],
# dtype=np.float32))
doc_x = visual_to_doc.map(np.array([halfw, 0, 0, 0], dtype=np.float32))
doc_y = visual_to_doc.map(np.array([0, halfh, 0, 0], dtype=np.float32))
if doc_x[0] < 0:
doc_x *= -1
if doc_y[1] < 0:
doc_y *= -1
# doc_halfw = np.abs(doc_widths[0])
# doc_halfh = np.abs(doc_widths[1])
if orientation == "top":
doc_perp_vector = -doc_y
elif orientation == "bottom":
doc_perp_vector = doc_y
elif orientation == "left":
doc_perp_vector = -doc_x
if orientation == "right":
doc_perp_vector = doc_x
perp_len = np.linalg.norm(doc_perp_vector)
doc_perp_vector /= perp_len
perp_len += border_width
perp_len += 5 # pixels
perp_len *= ColorBarVisual.text_padding_factor
doc_perp_vector *= perp_len
doc_center = visual_to_doc.map(np.array([x, y, 0, 0],
dtype=np.float32))
doc_label_pos = doc_center + doc_perp_vector
visual_label_pos = doc_to_visual.map(doc_label_pos)[:3]
# next, calculate tick positions
if orientation in ["top", "bottom"]:
doc_ticks_pos = [doc_label_pos - doc_x,
doc_label_pos + doc_x]
else:
doc_ticks_pos = [doc_label_pos + doc_y,
doc_label_pos - doc_y]
visual_ticks_pos = []
visual_ticks_pos.append(doc_to_visual.map(doc_ticks_pos[0])[:3])
visual_ticks_pos.append(doc_to_visual.map(doc_ticks_pos[1])[:3])
return (visual_label_pos, visual_ticks_pos)
|
Calculate the text centeritions given the ColorBar
parameters.
Note
----
This is static because in principle, this
function does not need access to the state of the ColorBar
at all. It's a computation function that computes coordinate
transforms
Parameters
----------
center: tuple (x, y)
Center of the ColorBar
halfdim: tuple (halfw, halfh)
Half of the dimensions measured from the center
border_width: float
Width of the border of the ColorBar
orientation: "top" | "bottom" | "left" | "right"
Position of the label with respect to the ColorBar
transforms: TransformSystem
the transforms of the ColorBar
|
entailment
|
def size(self):
""" The size of the ColorBar
Returns
-------
size: (major_axis_length, minor_axis_length)
major and minor axis are defined by the
orientation of the ColorBar
"""
(halfw, halfh) = self._halfdim
if self.orientation in ["top", "bottom"]:
return (halfw * 2., halfh * 2.)
else:
return (halfh * 2., halfw * 2.)
|
The size of the ColorBar
Returns
-------
size: (major_axis_length, minor_axis_length)
major and minor axis are defined by the
orientation of the ColorBar
|
entailment
|
def add_pseudo_fields(self):
"""Add 'pseudo' fields (e.g non-displayed fields) to the display."""
fields = []
if self.backlight_on != enums.BACKLIGHT_ON_NEVER:
fields.append(
display_fields.BacklightPseudoField(ref='0', backlight_rule=self.backlight_on)
)
fields.append(
display_fields.PriorityPseudoField(
ref='0',
priority_playing=self.priority_playing,
priority_not_playing=self.priority_not_playing,
)
)
self.pattern.add_pseudo_fields(fields, self.screen)
|
Add 'pseudo' fields (e.g non-displayed fields) to the display.
|
entailment
|
def padded(self, padding):
"""Return a new Rect padded (smaller) by padding on all sides
Parameters
----------
padding : float
The padding.
Returns
-------
rect : instance of Rect
The padded rectangle.
"""
return Rect(pos=(self.pos[0]+padding, self.pos[1]+padding),
size=(self.size[0]-2*padding, self.size[1]-2*padding))
|
Return a new Rect padded (smaller) by padding on all sides
Parameters
----------
padding : float
The padding.
Returns
-------
rect : instance of Rect
The padded rectangle.
|
entailment
|
def normalized(self):
"""Return a Rect covering the same area, but with height and width
guaranteed to be positive."""
return Rect(pos=(min(self.left, self.right),
min(self.top, self.bottom)),
size=(abs(self.width), abs(self.height)))
|
Return a Rect covering the same area, but with height and width
guaranteed to be positive.
|
entailment
|
def flipped(self, x=False, y=True):
"""Return a Rect with the same bounds but with axes inverted
Parameters
----------
x : bool
Flip the X axis.
y : bool
Flip the Y axis.
Returns
-------
rect : instance of Rect
The flipped rectangle.
"""
pos = list(self.pos)
size = list(self.size)
for i, flip in enumerate((x, y)):
if flip:
pos[i] += size[i]
size[i] *= -1
return Rect(pos, size)
|
Return a Rect with the same bounds but with axes inverted
Parameters
----------
x : bool
Flip the X axis.
y : bool
Flip the Y axis.
Returns
-------
rect : instance of Rect
The flipped rectangle.
|
entailment
|
def _transform_in(self):
"""Return array of coordinates that can be mapped by Transform
classes."""
return np.array([
[self.left, self.bottom, 0, 1],
[self.right, self.top, 0, 1]])
|
Return array of coordinates that can be mapped by Transform
classes.
|
entailment
|
def configure(self, viewport=None, fbo_size=None, fbo_rect=None,
canvas=None):
"""Automatically configure the TransformSystem:
* canvas_transform maps from the Canvas logical pixel
coordinate system to the framebuffer coordinate system, taking into
account the logical/physical pixel scale factor, current FBO
position, and y-axis inversion.
* framebuffer_transform maps from the current GL viewport on the
framebuffer coordinate system to clip coordinates (-1 to 1).
Parameters
==========
viewport : tuple or None
The GL viewport rectangle (x, y, w, h). If None, then it
is assumed to cover the entire canvas.
fbo_size : tuple or None
The size of the active FBO. If None, then it is assumed to have the
same size as the canvas's framebuffer.
fbo_rect : tuple or None
The position and size (x, y, w, h) of the FBO in the coordinate
system of the canvas's framebuffer. If None, then the bounds are
assumed to cover the entire active framebuffer.
canvas : Canvas instance
Optionally set the canvas for this TransformSystem. See the
`canvas` property.
"""
# TODO: check that d2f and f2r transforms still contain a single
# STTransform (if the user has modified these, then auto-config should
# either fail or replace the transforms)
if canvas is not None:
self.canvas = canvas
canvas = self._canvas
if canvas is None:
raise RuntimeError("No canvas assigned to this TransformSystem.")
# By default, this should invert the y axis--canvas origin is in top
# left, whereas framebuffer origin is in bottom left.
map_from = [(0, 0), canvas.size]
map_to = [(0, canvas.physical_size[1]), (canvas.physical_size[0], 0)]
self._canvas_transform.transforms[1].set_mapping(map_from, map_to)
if fbo_rect is None:
self._canvas_transform.transforms[0].scale = (1, 1, 1)
self._canvas_transform.transforms[0].translate = (0, 0, 0)
else:
# Map into FBO coordinates
map_from = [(fbo_rect[0], fbo_rect[1]),
(fbo_rect[0] + fbo_rect[2], fbo_rect[1] + fbo_rect[3])]
map_to = [(0, 0), fbo_size]
self._canvas_transform.transforms[0].set_mapping(map_from, map_to)
if viewport is None:
if fbo_size is None:
# viewport covers entire canvas
map_from = [(0, 0), canvas.physical_size]
else:
# viewport covers entire FBO
map_from = [(0, 0), fbo_size]
else:
map_from = [viewport[:2],
(viewport[0] + viewport[2], viewport[1] + viewport[3])]
map_to = [(-1, -1), (1, 1)]
self._framebuffer_transform.transforms[0].set_mapping(map_from, map_to)
|
Automatically configure the TransformSystem:
* canvas_transform maps from the Canvas logical pixel
coordinate system to the framebuffer coordinate system, taking into
account the logical/physical pixel scale factor, current FBO
position, and y-axis inversion.
* framebuffer_transform maps from the current GL viewport on the
framebuffer coordinate system to clip coordinates (-1 to 1).
Parameters
==========
viewport : tuple or None
The GL viewport rectangle (x, y, w, h). If None, then it
is assumed to cover the entire canvas.
fbo_size : tuple or None
The size of the active FBO. If None, then it is assumed to have the
same size as the canvas's framebuffer.
fbo_rect : tuple or None
The position and size (x, y, w, h) of the FBO in the coordinate
system of the canvas's framebuffer. If None, then the bounds are
assumed to cover the entire active framebuffer.
canvas : Canvas instance
Optionally set the canvas for this TransformSystem. See the
`canvas` property.
|
entailment
|
def dpi(self):
""" Physical resolution of the document coordinate system (dots per
inch).
"""
if self._dpi is None:
if self._canvas is None:
return None
else:
return self.canvas.dpi
else:
return self._dpi
|
Physical resolution of the document coordinate system (dots per
inch).
|
entailment
|
def get_transform(self, map_from='visual', map_to='render'):
"""Return a transform mapping between any two coordinate systems.
Parameters
----------
map_from : str
The starting coordinate system to map from. Must be one of: visual,
scene, document, canvas, framebuffer, or render.
map_to : str
The ending coordinate system to map to. Must be one of: visual,
scene, document, canvas, framebuffer, or render.
"""
tr = ['visual', 'scene', 'document', 'canvas', 'framebuffer', 'render']
ifrom = tr.index(map_from)
ito = tr.index(map_to)
if ifrom < ito:
trs = [getattr(self, '_' + t + '_transform')
for t in tr[ifrom:ito]][::-1]
else:
trs = [getattr(self, '_' + t + '_transform').inverse
for t in tr[ito:ifrom]]
return self._cache.get(trs)
|
Return a transform mapping between any two coordinate systems.
Parameters
----------
map_from : str
The starting coordinate system to map from. Must be one of: visual,
scene, document, canvas, framebuffer, or render.
map_to : str
The ending coordinate system to map to. Must be one of: visual,
scene, document, canvas, framebuffer, or render.
|
entailment
|
def _calculate_delta_pos(adjacency_arr, pos, t, optimal):
"""Helper to calculate the delta position"""
# XXX eventually this should be refactored for the sparse case to only
# do the necessary pairwise distances
delta = pos[:, np.newaxis, :] - pos
# Distance between points
distance2 = (delta*delta).sum(axis=-1)
# Enforce minimum distance of 0.01
distance2 = np.where(distance2 < 0.0001, 0.0001, distance2)
distance = np.sqrt(distance2)
# Displacement "force"
displacement = np.zeros((len(delta), 2))
for ii in range(2):
displacement[:, ii] = (
delta[:, :, ii] *
((optimal * optimal) / (distance*distance) -
(adjacency_arr * distance) / optimal)).sum(axis=1)
length = np.sqrt((displacement**2).sum(axis=1))
length = np.where(length < 0.01, 0.1, length)
delta_pos = displacement * t / length[:, np.newaxis]
return delta_pos
|
Helper to calculate the delta position
|
entailment
|
def get_recipe_intent_handler(request):
"""
You can insert arbitrary business logic code here
"""
# Get variables like userId, slots, intent name etc from the 'Request' object
ingredient = request.slots["Ingredient"] # Gets an Ingredient Slot from the Request object.
if ingredient == None:
return alexa.create_response("Could not find an ingredient!")
# All manipulations to the request's session object are automatically reflected in the request returned to Amazon.
# For e.g. This statement adds a new session attribute (automatically returned with the response) storing the
# Last seen ingredient value in the 'last_ingredient' key.
request.session['last_ingredient'] = ingredient # Automatically returned as a sessionAttribute
# Modifying state like this saves us from explicitly having to return Session objects after every response
# alexa can also build cards which can be sent as part of the response
card = alexa.create_card(title="GetRecipeIntent activated", subtitle=None,
content="asked alexa to find a recipe using {}".format(ingredient))
return alexa.create_response("Finding a recipe with the ingredient {}".format(ingredient),
end_session=False, card_obj=card)
|
You can insert arbitrary business logic code here
|
entailment
|
def use(app=None, gl=None):
""" Set the usage options for vispy
Specify what app backend and GL backend to use.
Parameters
----------
app : str
The app backend to use (case insensitive). Standard backends:
* 'PyQt4': use Qt widget toolkit via PyQt4.
* 'PyQt5': use Qt widget toolkit via PyQt5.
* 'PySide': use Qt widget toolkit via PySide.
* 'PyGlet': use Pyglet backend.
* 'Glfw': use Glfw backend (successor of Glut). Widely available
on Linux.
* 'SDL2': use SDL v2 backend.
* 'osmesa': Use OSMesa backend
Additional backends:
* 'ipynb_vnc': render in the IPython notebook via a VNC approach
(experimental)
gl : str
The gl backend to use (case insensitive). Options are:
* 'gl2': use Vispy's desktop OpenGL API.
* 'pyopengl2': use PyOpenGL's desktop OpenGL API. Mostly for
testing.
* 'es2': (TO COME) use real OpenGL ES 2.0 on Windows via Angle.
Availability of ES 2.0 is larger for Windows, since it relies
on DirectX.
* 'gl+': use the full OpenGL functionality available on
your system (via PyOpenGL).
Notes
-----
If the app option is given, ``vispy.app.use_app()`` is called. If
the gl option is given, ``vispy.gloo.use_gl()`` is called.
If an app backend name is provided, and that backend could not be
loaded, an error is raised.
If no backend name is provided, Vispy will first check if the GUI
toolkit corresponding to each backend is already imported, and try
that backend first. If this is unsuccessful, it will try the
'default_backend' provided in the vispy config. If still not
succesful, it will try each backend in a predetermined order.
See Also
--------
vispy.app.use_app
vispy.gloo.gl.use_gl
"""
if app is None and gl is None:
raise TypeError('Must specify at least one of "app" or "gl".')
# Example for future. This wont work (yet).
if app == 'ipynb_webgl':
app = 'headless'
gl = 'webgl'
if app == 'osmesa':
from ..util.osmesa_gl import fix_osmesa_gl_lib
fix_osmesa_gl_lib()
if gl is not None:
raise ValueError("Do not specify gl when using osmesa")
# Apply now
if gl:
from .. import gloo, config
config['gl_backend'] = gl
gloo.gl.use_gl(gl)
if app:
from ..app import use_app
use_app(app)
|
Set the usage options for vispy
Specify what app backend and GL backend to use.
Parameters
----------
app : str
The app backend to use (case insensitive). Standard backends:
* 'PyQt4': use Qt widget toolkit via PyQt4.
* 'PyQt5': use Qt widget toolkit via PyQt5.
* 'PySide': use Qt widget toolkit via PySide.
* 'PyGlet': use Pyglet backend.
* 'Glfw': use Glfw backend (successor of Glut). Widely available
on Linux.
* 'SDL2': use SDL v2 backend.
* 'osmesa': Use OSMesa backend
Additional backends:
* 'ipynb_vnc': render in the IPython notebook via a VNC approach
(experimental)
gl : str
The gl backend to use (case insensitive). Options are:
* 'gl2': use Vispy's desktop OpenGL API.
* 'pyopengl2': use PyOpenGL's desktop OpenGL API. Mostly for
testing.
* 'es2': (TO COME) use real OpenGL ES 2.0 on Windows via Angle.
Availability of ES 2.0 is larger for Windows, since it relies
on DirectX.
* 'gl+': use the full OpenGL functionality available on
your system (via PyOpenGL).
Notes
-----
If the app option is given, ``vispy.app.use_app()`` is called. If
the gl option is given, ``vispy.gloo.use_gl()`` is called.
If an app backend name is provided, and that backend could not be
loaded, an error is raised.
If no backend name is provided, Vispy will first check if the GUI
toolkit corresponding to each backend is already imported, and try
that backend first. If this is unsuccessful, it will try the
'default_backend' provided in the vispy config. If still not
succesful, it will try each backend in a predetermined order.
See Also
--------
vispy.app.use_app
vispy.gloo.gl.use_gl
|
entailment
|
def run_subprocess(command, return_code=False, **kwargs):
"""Run command using subprocess.Popen
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str
Command to run as subprocess (see subprocess.Popen documentation).
return_code : bool
If True, the returncode will be returned, and no error checking
will be performed (so this function should always return without
error).
**kwargs : dict
Additional kwargs to pass to ``subprocess.Popen``.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
code : int
The command exit code. Only returned if ``return_code`` is True.
"""
# code adapted with permission from mne-python
use_kwargs = dict(stderr=subprocess.PIPE, stdout=subprocess.PIPE)
use_kwargs.update(kwargs)
p = subprocess.Popen(command, **use_kwargs)
output = p.communicate()
# communicate() may return bytes, str, or None depending on the kwargs
# passed to Popen(). Convert all to unicode str:
output = ['' if s is None else s for s in output]
output = [s.decode('utf-8') if isinstance(s, bytes) else s for s in output]
output = tuple(output)
if not return_code and p.returncode:
print(output[0])
print(output[1])
err_fun = subprocess.CalledProcessError.__init__
if 'output' in inspect.getargspec(err_fun).args:
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
if return_code:
output = output + (p.returncode,)
return output
|
Run command using subprocess.Popen
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str
Command to run as subprocess (see subprocess.Popen documentation).
return_code : bool
If True, the returncode will be returned, and no error checking
will be performed (so this function should always return without
error).
**kwargs : dict
Additional kwargs to pass to ``subprocess.Popen``.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
code : int
The command exit code. Only returned if ``return_code`` is True.
|
entailment
|
def start(self, interval=None, iterations=None):
"""Start the timer.
A timeout event will be generated every *interval* seconds.
If *interval* is None, then self.interval will be used.
If *iterations* is specified, the timer will stop after
emitting that number of events. If unspecified, then
the previous value of self.iterations will be used. If the value is
negative, then the timer will continue running until stop() is called.
If the timer is already running when this function is called, nothing
happens (timer continues running as it did previously, without
changing the interval, number of iterations, or emitting a timer
start event).
"""
if self.running:
return # don't do anything if already running
self.iter_count = 0
if interval is not None:
self.interval = interval
if iterations is not None:
self.max_iterations = iterations
self._backend._vispy_start(self.interval)
self._running = True
self._first_emit_time = precision_time()
self._last_emit_time = precision_time()
self.events.start(type='timer_start')
|
Start the timer.
A timeout event will be generated every *interval* seconds.
If *interval* is None, then self.interval will be used.
If *iterations* is specified, the timer will stop after
emitting that number of events. If unspecified, then
the previous value of self.iterations will be used. If the value is
negative, then the timer will continue running until stop() is called.
If the timer is already running when this function is called, nothing
happens (timer continues running as it did previously, without
changing the interval, number of iterations, or emitting a timer
start event).
|
entailment
|
def stop(self):
"""Stop the timer."""
self._backend._vispy_stop()
self._running = False
self.events.stop(type='timer_stop')
|
Stop the timer.
|
entailment
|
def _best_res_pixels(self):
"""
Returns a numpy array of all the HEALPix indexes contained in the MOC at its max order.
Returns
-------
result : `~numpy.ndarray`
The array of HEALPix at ``max_order``
"""
factor = 2 * (AbstractMOC.HPY_MAX_NORDER - self.max_order)
pix_l = []
for iv in self._interval_set._intervals:
for val in range(iv[0] >> factor, iv[1] >> factor):
pix_l.append(val)
return np.asarray(pix_l)
|
Returns a numpy array of all the HEALPix indexes contained in the MOC at its max order.
Returns
-------
result : `~numpy.ndarray`
The array of HEALPix at ``max_order``
|
entailment
|
def contains(self, ra, dec, keep_inside=True):
"""
Returns a boolean mask array of the positions lying inside (or outside) the MOC instance.
Parameters
----------
ra : `astropy.units.Quantity`
Right ascension array
dec : `astropy.units.Quantity`
Declination array
keep_inside : bool, optional
True by default. If so the mask describes coordinates lying inside the MOC. If ``keep_inside``
is false, contains will return the mask of the coordinates lying outside the MOC.
Returns
-------
array : `~np.ndarray`
A mask boolean array
"""
depth = self.max_order
m = np.zeros(nside2npix(1 << depth), dtype=bool)
pix_id = self._best_res_pixels()
m[pix_id] = True
if not keep_inside:
m = np.logical_not(m)
hp = HEALPix(nside=(1 << depth), order='nested')
pix = hp.lonlat_to_healpix(ra, dec)
return m[pix]
|
Returns a boolean mask array of the positions lying inside (or outside) the MOC instance.
Parameters
----------
ra : `astropy.units.Quantity`
Right ascension array
dec : `astropy.units.Quantity`
Declination array
keep_inside : bool, optional
True by default. If so the mask describes coordinates lying inside the MOC. If ``keep_inside``
is false, contains will return the mask of the coordinates lying outside the MOC.
Returns
-------
array : `~np.ndarray`
A mask boolean array
|
entailment
|
def add_neighbours(self):
"""
Extends the MOC instance so that it includes the HEALPix cells touching its border.
The depth of the HEALPix cells added at the border is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self extended by one degree of neighbours.
"""
# Get the pixels array of the MOC at the its max order.
ipix = self._best_res_pixels()
hp = HEALPix(nside=(1 << self.max_order), order='nested')
# Get the HEALPix array containing the neighbors of ``ipix``.
# This array "extends" ``ipix`` by one degree of neighbors.
extend_ipix = AbstractMOC._neighbour_pixels(hp, ipix)
# Compute the difference between ``extend_ipix`` and ``ipix`` to get only the neighboring pixels
# located at the border of the MOC.
neigh_ipix = np.setdiff1d(extend_ipix, ipix)
shift = 2 * (AbstractMOC.HPY_MAX_NORDER - self.max_order)
neigh_itv = np.vstack((neigh_ipix << shift, (neigh_ipix + 1) << shift)).T
# This array of HEALPix neighbors are added to the MOC to get an ``extended`` MOC at its max order.
self._interval_set = self._interval_set.union(IntervalSet(neigh_itv))
return self
|
Extends the MOC instance so that it includes the HEALPix cells touching its border.
The depth of the HEALPix cells added at the border is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self extended by one degree of neighbours.
|
entailment
|
def remove_neighbours(self):
"""
Removes from the MOC instance the HEALPix cells located at its border.
The depth of the HEALPix cells removed is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self minus its HEALPix cells located at its border.
"""
# Get the HEALPix cells of the MOC at its max depth
ipix = self._best_res_pixels()
hp = HEALPix(nside=(1 << self.max_order), order='nested')
# Extend it to include the max depth neighbor cells.
extend_ipix = AbstractMOC._neighbour_pixels(hp, ipix)
# Get only the max depth HEALPix cells lying at the border of the MOC
neigh_ipix = np.setxor1d(extend_ipix, ipix)
# Remove these pixels from ``ipix``
border_ipix = AbstractMOC._neighbour_pixels(hp, neigh_ipix)
reduced_ipix = np.setdiff1d(ipix, border_ipix)
# Build the reduced MOC, i.e. MOC without its pixels which were located at its border.
shift = 2 * (AbstractMOC.HPY_MAX_NORDER - self.max_order)
reduced_itv = np.vstack((reduced_ipix << shift, (reduced_ipix + 1) << shift)).T
self._interval_set = IntervalSet(reduced_itv)
return self
|
Removes from the MOC instance the HEALPix cells located at its border.
The depth of the HEALPix cells removed is equal to the maximum depth of the MOC instance.
Returns
-------
moc : `~mocpy.moc.MOC`
self minus its HEALPix cells located at its border.
|
entailment
|
def fill(self, ax, wcs, **kw_mpl_pathpatch):
"""
Draws the MOC on a matplotlib axis.
This performs the projection of the cells from the world coordinate system to the pixel image coordinate system.
You are able to specify various styling kwargs for `matplotlib.patches.PathPatch`
(see the `list of valid keywords <https://matplotlib.org/api/_as_gen/matplotlib.patches.PathPatch.html#matplotlib.patches.PathPatch>`__).
Parameters
----------
ax : `matplotlib.axes.Axes`
Matplotlib axis.
wcs : `astropy.wcs.WCS`
WCS defining the World system <-> Image system projection.
kw_mpl_pathpatch
Plotting arguments for `matplotlib.patches.PathPatch`.
Examples
--------
>>> from mocpy import MOC, WCS
>>> from astropy.coordinates import Angle, SkyCoord
>>> import astropy.units as u
>>> # Load a MOC, e.g. the MOC of GALEXGR6-AIS-FUV
>>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits'
>>> moc = MOC.from_fits(filename)
>>> # Plot the MOC using matplotlib
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(111, figsize=(15, 15))
>>> # Define a WCS as a context
>>> with WCS(fig,
... fov=50 * u.deg,
... center=SkyCoord(0, 20, unit='deg', frame='icrs'),
... coordsys="icrs",
... rotation=Angle(0, u.degree),
... projection="AIT") as wcs:
... ax = fig.add_subplot(1, 1, 1, projection=wcs)
... # Call fill giving the matplotlib axe and the `~astropy.wcs.WCS` object.
... # We will set the matplotlib keyword linewidth to 0 so that it does not plot
... # the border of each HEALPix cell.
... # The color can also be specified along with an alpha value.
... moc.fill(ax=ax, wcs=wcs, linewidth=0, alpha=0.5, fill=True, color="green")
>>> plt.xlabel('ra')
>>> plt.ylabel('dec')
>>> plt.grid(color="black", linestyle="dotted")
"""
fill.fill(self, ax, wcs, **kw_mpl_pathpatch)
|
Draws the MOC on a matplotlib axis.
This performs the projection of the cells from the world coordinate system to the pixel image coordinate system.
You are able to specify various styling kwargs for `matplotlib.patches.PathPatch`
(see the `list of valid keywords <https://matplotlib.org/api/_as_gen/matplotlib.patches.PathPatch.html#matplotlib.patches.PathPatch>`__).
Parameters
----------
ax : `matplotlib.axes.Axes`
Matplotlib axis.
wcs : `astropy.wcs.WCS`
WCS defining the World system <-> Image system projection.
kw_mpl_pathpatch
Plotting arguments for `matplotlib.patches.PathPatch`.
Examples
--------
>>> from mocpy import MOC, WCS
>>> from astropy.coordinates import Angle, SkyCoord
>>> import astropy.units as u
>>> # Load a MOC, e.g. the MOC of GALEXGR6-AIS-FUV
>>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits'
>>> moc = MOC.from_fits(filename)
>>> # Plot the MOC using matplotlib
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(111, figsize=(15, 15))
>>> # Define a WCS as a context
>>> with WCS(fig,
... fov=50 * u.deg,
... center=SkyCoord(0, 20, unit='deg', frame='icrs'),
... coordsys="icrs",
... rotation=Angle(0, u.degree),
... projection="AIT") as wcs:
... ax = fig.add_subplot(1, 1, 1, projection=wcs)
... # Call fill giving the matplotlib axe and the `~astropy.wcs.WCS` object.
... # We will set the matplotlib keyword linewidth to 0 so that it does not plot
... # the border of each HEALPix cell.
... # The color can also be specified along with an alpha value.
... moc.fill(ax=ax, wcs=wcs, linewidth=0, alpha=0.5, fill=True, color="green")
>>> plt.xlabel('ra')
>>> plt.ylabel('dec')
>>> plt.grid(color="black", linestyle="dotted")
|
entailment
|
def border(self, ax, wcs, **kw_mpl_pathpatch):
"""
Draws the MOC border(s) on a matplotlib axis.
This performs the projection of the sky coordinates defining the perimeter of the MOC to the pixel image coordinate system.
You are able to specify various styling kwargs for `matplotlib.patches.PathPatch`
(see the `list of valid keywords <https://matplotlib.org/api/_as_gen/matplotlib.patches.PathPatch.html#matplotlib.patches.PathPatch>`__).
Parameters
----------
ax : `matplotlib.axes.Axes`
Matplotlib axis.
wcs : `astropy.wcs.WCS`
WCS defining the World system <-> Image system projection.
kw_mpl_pathpatch
Plotting arguments for `matplotlib.patches.PathPatch`
Examples
--------
>>> from mocpy import MOC, WCS
>>> from astropy.coordinates import Angle, SkyCoord
>>> import astropy.units as u
>>> # Load a MOC, e.g. the MOC of GALEXGR6-AIS-FUV
>>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits'
>>> moc = MOC.from_fits(filename)
>>> # Plot the MOC using matplotlib
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(111, figsize=(15, 15))
>>> # Define a WCS as a context
>>> with WCS(fig,
... fov=50 * u.deg,
... center=SkyCoord(0, 20, unit='deg', frame='icrs'),
... coordsys="icrs",
... rotation=Angle(0, u.degree),
... projection="AIT") as wcs:
... ax = fig.add_subplot(1, 1, 1, projection=wcs)
... # Call border giving the matplotlib axe and the `~astropy.wcs.WCS` object.
... moc.border(ax=ax, wcs=wcs, alpha=0.5, color="red")
>>> plt.xlabel('ra')
>>> plt.ylabel('dec')
>>> plt.grid(color="black", linestyle="dotted")
"""
border.border(self, ax, wcs, **kw_mpl_pathpatch)
|
Draws the MOC border(s) on a matplotlib axis.
This performs the projection of the sky coordinates defining the perimeter of the MOC to the pixel image coordinate system.
You are able to specify various styling kwargs for `matplotlib.patches.PathPatch`
(see the `list of valid keywords <https://matplotlib.org/api/_as_gen/matplotlib.patches.PathPatch.html#matplotlib.patches.PathPatch>`__).
Parameters
----------
ax : `matplotlib.axes.Axes`
Matplotlib axis.
wcs : `astropy.wcs.WCS`
WCS defining the World system <-> Image system projection.
kw_mpl_pathpatch
Plotting arguments for `matplotlib.patches.PathPatch`
Examples
--------
>>> from mocpy import MOC, WCS
>>> from astropy.coordinates import Angle, SkyCoord
>>> import astropy.units as u
>>> # Load a MOC, e.g. the MOC of GALEXGR6-AIS-FUV
>>> filename = './../resources/P-GALEXGR6-AIS-FUV.fits'
>>> moc = MOC.from_fits(filename)
>>> # Plot the MOC using matplotlib
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(111, figsize=(15, 15))
>>> # Define a WCS as a context
>>> with WCS(fig,
... fov=50 * u.deg,
... center=SkyCoord(0, 20, unit='deg', frame='icrs'),
... coordsys="icrs",
... rotation=Angle(0, u.degree),
... projection="AIT") as wcs:
... ax = fig.add_subplot(1, 1, 1, projection=wcs)
... # Call border giving the matplotlib axe and the `~astropy.wcs.WCS` object.
... moc.border(ax=ax, wcs=wcs, alpha=0.5, color="red")
>>> plt.xlabel('ra')
>>> plt.ylabel('dec')
>>> plt.grid(color="black", linestyle="dotted")
|
entailment
|
def from_image(cls, header, max_norder, mask=None):
"""
Creates a `~mocpy.moc.MOC` from an image stored as a FITS file.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header containing all the info of where the image is located (position, size, etc...)
max_norder : int
The moc resolution.
mask : `numpy.ndarray`, optional
A boolean array of the same size of the image where pixels having the value 1 are part of
the final MOC and pixels having the value 0 are not.
Returns
-------
moc : `~mocpy.moc.MOC`
The resulting MOC.
"""
# load the image data
height = header['NAXIS2']
width = header['NAXIS1']
# use wcs from astropy to locate the image in the world coordinates
w = wcs.WCS(header)
if mask is not None:
# We have an array of pixels that are part of of survey
y, x = np.where(mask)
pix_crd = np.dstack((x, y))[0]
else:
# If we do not have a mask array we create the moc of all the image
#
step_pix = 1
"""
Coords returned by wcs_pix2world method correspond to pixel centers. We want to retrieve the moc pix
crossing the borders of the image so we have to add 1/2 to the pixels coords before computing the lonlat.
The step between two pix_crd is set to `step_pix` but can be diminished to have a better precision at the
borders so that all the image is covered (a too big step does not retrieve all
the moc pix crossing the borders of the image).
"""
x, y = np.mgrid[0.5:(width + 0.5 + step_pix):step_pix, 0.5:(height + 0.5 + step_pix):step_pix]
pix_crd = np.dstack((x.ravel(), y.ravel()))[0]
frame = wcs.utils.wcs_to_celestial_frame(w)
world_pix_crd = SkyCoord(w.wcs_pix2world(pix_crd, 1), unit='deg', frame=frame)
hp = HEALPix(nside=(1 << max_norder), order='nested', frame=ICRS())
ipix = hp.skycoord_to_healpix(world_pix_crd)
# remove doubles
ipix = np.unique(ipix)
shift = 2 * (AbstractMOC.HPY_MAX_NORDER - max_norder)
intervals_arr = np.vstack((ipix << shift, (ipix + 1) << shift)).T
# This MOC will be consistent when one will do operations on the moc (union, inter, ...) or
# simply write it to a fits or json file
interval_set = IntervalSet(intervals_arr)
return cls(interval_set=interval_set)
|
Creates a `~mocpy.moc.MOC` from an image stored as a FITS file.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header containing all the info of where the image is located (position, size, etc...)
max_norder : int
The moc resolution.
mask : `numpy.ndarray`, optional
A boolean array of the same size of the image where pixels having the value 1 are part of
the final MOC and pixels having the value 0 are not.
Returns
-------
moc : `~mocpy.moc.MOC`
The resulting MOC.
|
entailment
|
def from_fits_images(cls, path_l, max_norder):
"""
Loads a MOC from a set of FITS file images.
Parameters
----------
path_l : [str]
A list of path where the fits image are located.
max_norder : int
The MOC resolution.
Returns
-------
moc : `~mocpy.moc.MOC`
The union of all the MOCs created from the paths found in ``path_l``.
"""
moc = MOC()
for path in path_l:
header = fits.getheader(path)
current_moc = MOC.from_image(header=header, max_norder=max_norder)
moc = moc.union(current_moc)
return moc
|
Loads a MOC from a set of FITS file images.
Parameters
----------
path_l : [str]
A list of path where the fits image are located.
max_norder : int
The MOC resolution.
Returns
-------
moc : `~mocpy.moc.MOC`
The union of all the MOCs created from the paths found in ``path_l``.
|
entailment
|
def from_vizier_table(cls, table_id, nside=256):
"""
Creates a `~mocpy.moc.MOC` object from a VizieR table.
**Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object
from a vizier catalog ID.
Parameters
----------
table_id : str
table index
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
nside_possible_values = (8, 16, 32, 64, 128, 256, 512)
if nside not in nside_possible_values:
raise ValueError('Bad value for nside. Must be in {0}'.format(nside_possible_values))
result = cls.from_ivorn('ivo://CDS/' + table_id, nside)
return result
|
Creates a `~mocpy.moc.MOC` object from a VizieR table.
**Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object
from a vizier catalog ID.
Parameters
----------
table_id : str
table index
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
|
entailment
|
def from_ivorn(cls, ivorn, nside=256):
"""
Creates a `~mocpy.moc.MOC` object from a given ivorn.
Parameters
----------
ivorn : str
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
return cls.from_url('%s?%s' % (MOC.MOC_SERVER_ROOT_URL,
urlencode({
'ivorn': ivorn,
'get': 'moc',
'order': int(np.log2(nside))
})))
|
Creates a `~mocpy.moc.MOC` object from a given ivorn.
Parameters
----------
ivorn : str
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
|
entailment
|
def from_url(cls, url):
"""
Creates a `~mocpy.moc.MOC` object from a given url.
Parameters
----------
url : str
The url of a FITS file storing a MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
path = download_file(url, show_progress=False, timeout=60)
return cls.from_fits(path)
|
Creates a `~mocpy.moc.MOC` object from a given url.
Parameters
----------
url : str
The url of a FITS file storing a MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
|
entailment
|
def from_skycoords(cls, skycoords, max_norder):
"""
Creates a MOC from an `astropy.coordinates.SkyCoord`.
Parameters
----------
skycoords : `astropy.coordinates.SkyCoord`
The sky coordinates that will belong to the MOC.
max_norder : int
The depth of the smallest HEALPix cells contained in the MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
hp = HEALPix(nside=(1 << max_norder), order='nested')
ipix = hp.lonlat_to_healpix(skycoords.icrs.ra, skycoords.icrs.dec)
shift = 2 * (AbstractMOC.HPY_MAX_NORDER - max_norder)
intervals = np.vstack((ipix << shift, (ipix + 1) << shift)).T
interval_set = IntervalSet(intervals)
return cls(interval_set)
|
Creates a MOC from an `astropy.coordinates.SkyCoord`.
Parameters
----------
skycoords : `astropy.coordinates.SkyCoord`
The sky coordinates that will belong to the MOC.
max_norder : int
The depth of the smallest HEALPix cells contained in the MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
|
entailment
|
def from_lonlat(cls, lon, lat, max_norder):
"""
Creates a MOC from astropy lon, lat `astropy.units.Quantity`.
Parameters
----------
lon : `astropy.units.Quantity`
The longitudes of the sky coordinates belonging to the MOC.
lat : `astropy.units.Quantity`
The latitudes of the sky coordinates belonging to the MOC.
max_norder : int
The depth of the smallest HEALPix cells contained in the MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
hp = HEALPix(nside=(1 << max_norder), order='nested')
ipix = hp.lonlat_to_healpix(lon, lat)
shift = 2 * (AbstractMOC.HPY_MAX_NORDER - max_norder)
intervals = np.vstack((ipix << shift, (ipix + 1) << shift)).T
interval_set = IntervalSet(intervals)
return cls(interval_set)
|
Creates a MOC from astropy lon, lat `astropy.units.Quantity`.
Parameters
----------
lon : `astropy.units.Quantity`
The longitudes of the sky coordinates belonging to the MOC.
lat : `astropy.units.Quantity`
The latitudes of the sky coordinates belonging to the MOC.
max_norder : int
The depth of the smallest HEALPix cells contained in the MOC.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
|
entailment
|
def from_polygon_skycoord(cls, skycoord, inside=None, max_depth=10):
"""
Creates a MOC from a polygon.
The polygon is given as an `astropy.coordinates.SkyCoord` that contains the
vertices of the polygon. Concave and convex polygons are accepted but
self-intersecting ones are currently not properly handled.
Parameters
----------
skycoord : `astropy.coordinates.SkyCoord`
The sky coordinates defining the vertices of a polygon. It can describe a convex or
concave polygon but not a self-intersecting one.
inside : `astropy.coordinates.SkyCoord`, optional
A point that will be inside the MOC is needed as it is not possible to determine the inside area of a polygon
on the unit sphere (there is no infinite area that can be considered as the outside because on the sphere,
a closed polygon delimits two finite areas).
Possible improvement: take the inside area as the one covering the smallest region on the sphere.
If inside=None (default behavior), the mean of all the vertices is taken as lying inside the polygon. That approach may not work for
concave polygons.
max_depth : int, optional
The resolution of the MOC. Set to 10 by default.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
return MOC.from_polygon(lon=skycoord.icrs.ra, lat=skycoord.icrs.dec,
inside=inside, max_depth=max_depth)
|
Creates a MOC from a polygon.
The polygon is given as an `astropy.coordinates.SkyCoord` that contains the
vertices of the polygon. Concave and convex polygons are accepted but
self-intersecting ones are currently not properly handled.
Parameters
----------
skycoord : `astropy.coordinates.SkyCoord`
The sky coordinates defining the vertices of a polygon. It can describe a convex or
concave polygon but not a self-intersecting one.
inside : `astropy.coordinates.SkyCoord`, optional
A point that will be inside the MOC is needed as it is not possible to determine the inside area of a polygon
on the unit sphere (there is no infinite area that can be considered as the outside because on the sphere,
a closed polygon delimits two finite areas).
Possible improvement: take the inside area as the one covering the smallest region on the sphere.
If inside=None (default behavior), the mean of all the vertices is taken as lying inside the polygon. That approach may not work for
concave polygons.
max_depth : int, optional
The resolution of the MOC. Set to 10 by default.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
|
entailment
|
def from_polygon(cls, lon, lat, inside=None, max_depth=10):
"""
Creates a MOC from a polygon
The polygon is given as lon and lat `astropy.units.Quantity` that define the
vertices of the polygon. Concave and convex polygons are accepted but
self-intersecting ones are currently not properly handled.
Parameters
----------
lon : `astropy.units.Quantity`
The longitudes defining the polygon. Can describe convex and
concave polygons but not self-intersecting ones.
lat : `astropy.units.Quantity`
The latitudes defining the polygon. Can describe convex and concave
polygons but not self-intersecting ones.
inside : `astropy.coordinates.SkyCoord`, optional
A point that will be inside the MOC is needed as it is not possible to determine the inside area of a polygon
on the unit sphere (there is no infinite area that can be considered as the outside because on the sphere,
a closed polygon delimits two finite areas).
Possible improvement: take the inside area as the one covering the smallest region on the sphere.
If inside=None (default behavior), the mean of all the vertices is taken as lying inside the polygon. That approach may not work for
concave polygons.
max_depth : int, optional
The resolution of the MOC. Set to 10 by default.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
"""
from .polygon import PolygonComputer
polygon_computer = PolygonComputer(lon, lat, inside, max_depth)
# Create the moc from the python dictionary
moc = MOC.from_json(polygon_computer.ipix)
# We degrade it to the user-requested order
if polygon_computer.degrade_to_max_depth:
moc = moc.degrade_to_order(max_depth)
return moc
|
Creates a MOC from a polygon
The polygon is given as lon and lat `astropy.units.Quantity` that define the
vertices of the polygon. Concave and convex polygons are accepted but
self-intersecting ones are currently not properly handled.
Parameters
----------
lon : `astropy.units.Quantity`
The longitudes defining the polygon. Can describe convex and
concave polygons but not self-intersecting ones.
lat : `astropy.units.Quantity`
The latitudes defining the polygon. Can describe convex and concave
polygons but not self-intersecting ones.
inside : `astropy.coordinates.SkyCoord`, optional
A point that will be inside the MOC is needed as it is not possible to determine the inside area of a polygon
on the unit sphere (there is no infinite area that can be considered as the outside because on the sphere,
a closed polygon delimits two finite areas).
Possible improvement: take the inside area as the one covering the smallest region on the sphere.
If inside=None (default behavior), the mean of all the vertices is taken as lying inside the polygon. That approach may not work for
concave polygons.
max_depth : int, optional
The resolution of the MOC. Set to 10 by default.
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC
|
entailment
|
def sky_fraction(self):
"""
Sky fraction covered by the MOC
"""
pix_id = self._best_res_pixels()
nb_pix_filled = pix_id.size
return nb_pix_filled / float(3 << (2*(self.max_order + 1)))
|
Sky fraction covered by the MOC
|
entailment
|
def _query(self, resource_id, max_rows):
"""
Internal method to query Simbad or a VizieR table
for sources in the coverage of the MOC instance
"""
from astropy.io.votable import parse_single_table
if max_rows is not None and max_rows >= 0:
max_rows_str = str(max_rows)
else:
max_rows_str = str(9999999999)
tmp_moc = tempfile.NamedTemporaryFile(delete=False)
self.write(tmp_moc.name)
r = requests.post('http://cdsxmatch.u-strasbg.fr/QueryCat/QueryCat',
data={'mode': 'mocfile',
'catName': resource_id,
'format': 'votable',
'limit': max_rows_str},
files={'moc': open(tmp_moc.name, 'rb')},
headers={'User-Agent': 'MOCPy'},
stream=True)
tmp_vot = BytesIO()
tmp_vot.write(r.content)
table = parse_single_table(tmp_vot).to_table()
# finally delete temp files
os.unlink(tmp_moc.name)
return table
|
Internal method to query Simbad or a VizieR table
for sources in the coverage of the MOC instance
|
entailment
|
def plot(self, title='MOC', frame=None):
"""
Plot the MOC object using a mollweide projection.
**Deprecated**: New `fill` and `border` methods produce more reliable results and allow you to specify additional
matplotlib style parameters.
Parameters
----------
title : str
The title of the plot
frame : `astropy.coordinates.BaseCoordinateFrame`, optional
Describes the coordinate system the plot will be (ICRS, Galactic are the only coordinate systems supported).
"""
frame = ICRS() if frame is None else frame
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.pyplot as plt
plot_order = 8
if self.max_order > plot_order:
plotted_moc = self.degrade_to_order(plot_order)
else:
plotted_moc = self
num_pixels_map = 1024
delta = 2. * np.pi / num_pixels_map
x = np.arange(-np.pi, np.pi, delta)
y = np.arange(-np.pi/2, np.pi/2, delta)
lon_rad, lat_rad = np.meshgrid(x, y)
hp = HEALPix(nside=(1 << plotted_moc.max_order), order='nested')
if frame and not isinstance(frame, BaseCoordinateFrame):
raise ValueError("Only Galactic/ICRS coordinate systems are supported."
"Please set `coord` to either 'C' or 'G'.")
pix_map = hp.lonlat_to_healpix(lon_rad * u.rad, lat_rad * u.rad)
m = np.zeros(nside2npix(1 << plotted_moc.max_order))
pix_id = plotted_moc._best_res_pixels()
# change the HEALPix cells if the frame of the MOC is not the same as the one associated with the plot method.
if isinstance(frame, Galactic):
lon, lat = hp.boundaries_lonlat(pix_id, step=2)
sky_crd = SkyCoord(lon, lat, unit='deg')
pix_id = hp.lonlat_to_healpix(sky_crd.galactic.l, sky_crd.galactic.b)
m[pix_id] = 1
z = np.flip(m[pix_map], axis=1)
plt.figure(figsize=(10, 10))
ax = plt.subplot(111, projection="mollweide")
ax.set_xticklabels(['150°', '120°', '90°', '60°', '30°', '0°', '330°', '300°', '270°', '240°', '210°', '180°'])
color_map = LinearSegmentedColormap.from_list('w2r', ['#eeeeee', '#aa0000'])
color_map.set_under('w')
color_map.set_bad('gray')
ax.pcolormesh(x, y, z, cmap=color_map, vmin=0, vmax=1)
ax.tick_params(labelsize=14, labelcolor='#000000')
plt.title(title)
plt.grid(True, linestyle='--', linewidth=1, color='#555555')
plt.show()
|
Plot the MOC object using a mollweide projection.
**Deprecated**: New `fill` and `border` methods produce more reliable results and allow you to specify additional
matplotlib style parameters.
Parameters
----------
title : str
The title of the plot
frame : `astropy.coordinates.BaseCoordinateFrame`, optional
Describes the coordinate system the plot will be (ICRS, Galactic are the only coordinate systems supported).
|
entailment
|
def inverse(self):
""" The inverse of this transform.
"""
if self._inverse is None:
self._inverse = InverseTransform(self)
return self._inverse
|
The inverse of this transform.
|
entailment
|
def _tile_ticks(self, frac, tickvec):
"""Tiles tick marks along the axis."""
origins = np.tile(self.axis._vec, (len(frac), 1))
origins = self.axis.pos[0].T + (origins.T*frac).T
endpoints = tickvec + origins
return origins, endpoints
|
Tiles tick marks along the axis.
|
entailment
|
def _get_tick_frac_labels(self):
"""Get the major ticks, minor ticks, and major labels"""
minor_num = 4 # number of minor ticks per major division
if (self.axis.scale_type == 'linear'):
domain = self.axis.domain
if domain[1] < domain[0]:
flip = True
domain = domain[::-1]
else:
flip = False
offset = domain[0]
scale = domain[1] - domain[0]
transforms = self.axis.transforms
length = self.axis.pos[1] - self.axis.pos[0] # in logical coords
n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi
# major = np.linspace(domain[0], domain[1], num=11)
# major = MaxNLocator(10).tick_values(*domain)
major = _get_ticks_talbot(domain[0], domain[1], n_inches, 2)
labels = ['%g' % x for x in major]
majstep = major[1] - major[0]
minor = []
minstep = majstep / (minor_num + 1)
minstart = 0 if self.axis._stop_at_major[0] else -1
minstop = -1 if self.axis._stop_at_major[1] else 0
for i in range(minstart, len(major) + minstop):
maj = major[0] + i * majstep
minor.extend(np.linspace(maj + minstep,
maj + majstep - minstep,
minor_num))
major_frac = (major - offset) / scale
minor_frac = (np.array(minor) - offset) / scale
major_frac = major_frac[::-1] if flip else major_frac
use_mask = (major_frac > -0.0001) & (major_frac < 1.0001)
major_frac = major_frac[use_mask]
labels = [l for li, l in enumerate(labels) if use_mask[li]]
minor_frac = minor_frac[(minor_frac > -0.0001) &
(minor_frac < 1.0001)]
elif self.axis.scale_type == 'logarithmic':
return NotImplementedError
elif self.axis.scale_type == 'power':
return NotImplementedError
return major_frac, minor_frac, labels
|
Get the major ticks, minor ticks, and major labels
|
entailment
|
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of
data from each pixel in `ipixels` followed by the `apsize` elements
of data from each pixel in `apixels`. Conventionally `ipixels`
and `apixels` are byte arrays so the sizes are bytes, but it
actually works with any arrays of the same type. The returned
array is the same type as the input arrays which should be the
same type as each other.
"""
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
# Set up the output buffer
# See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
out = array(ipixels.typecode)
# It's annoying that there is no cheap way to set the array size :-(
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
|
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of
data from each pixel in `ipixels` followed by the `apsize` elements
of data from each pixel in `apixels`. Conventionally `ipixels`
and `apixels` are byte arrays so the sizes are bytes, but it
actually works with any arrays of the same type. The returned
array is the same type as the input arrays which should be the
same type as each other.
|
entailment
|
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(_signature)
for chunk in chunks:
write_chunk(out, *chunk)
|
Create a PNG file by writing out the chunks.
|
entailment
|
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
# The output array. Which, pathetically, we extend one-byte at a
# time (fortunately this is linear).
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = (x - line[ai]) & 0xff
out.append(x)
ai += 1
def up():
for i,x in enumerate(line):
x = (x - prev[i]) & 0xff
out.append(x)
def average():
ai = -fo
for i,x in enumerate(line):
if ai >= 0:
x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
else:
x = (x - (prev[i] >> 1)) & 0xff
out.append(x)
ai += 1
def paeth():
# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
ai = -fo # also used for ci
for i,x in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = (x - Pr) & 0xff
out.append(x)
ai += 1
if not prev:
# We're on the first line. Some of the filters can be reduced
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if type == 2: # "up"
type = 0
elif type == 3:
prev = [0]*len(line)
elif type == 4: # "paeth"
type = 1
if type == 0:
out.extend(line)
elif type == 1:
sub()
elif type == 2:
up()
elif type == 3:
average()
else: # type == 4
paeth()
return out
|
Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
|
entailment
|
def from_array(a, mode=None, info={}):
"""Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
.. note :
The use of the term *3-dimensional* is for marketing purposes
only. It doesn't actually work. Please bear with us. Meanwhile
enjoy the complimentary snacks (on request) and please use a
2-dimensional array.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3==24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:``png.Writer`` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
bitdepth = None
try:
# Assign the 'L' or 'RGBA' part to `gotmode`.
if mode.startswith('L'):
gotmode = 'L'
mode = mode[1:]
elif mode.startswith('RGB'):
gotmode = 'RGB'
mode = mode[3:]
else:
raise Error()
if mode.startswith('A'):
gotmode += 'A'
mode = mode[1:]
# Skip any optional ';'
while mode.startswith(';'):
mode = mode[1:]
# Parse optional bitdepth
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error()
except Error:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode = gotmode
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get('bitdepth') and bitdepth != info['bitdepth']:
raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
l = len(a)
except TypeError:
raise Error(
"len(a) does not work, supply info['height'] instead.")
info['height'] = l
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != ('A' in mode):
raise Error("info['alpha'] should match mode.")
info['alpha'] = 'A' in mode
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = t.next()
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
# Not implemented yet
assert not threed
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in 'width height bitdepth greyscale alpha'.split():
assert thing in info
return Image(a, info)
|
Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
.. note :
The use of the term *3-dimensional* is for marketing purposes
only. It doesn't actually work. Please bear with us. Meanwhile
enjoy the complimentary snacks (on request) and please use a
2-dimensional array.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3==24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:``png.Writer`` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
|
entailment
|
def make_palette(self):
"""Create the byte sequences for a ``PLTE`` and if necessary a
``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
``None`` if no ``tRNS`` chunk is necessary.
"""
p = array('B')
t = array('B')
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
p = tostring(p)
t = tostring(t)
if t:
return p,t
return p,None
|
Create the byte sequences for a ``PLTE`` and if necessary a
``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
``None`` if no ``tRNS`` chunk is necessary.
|
entailment
|
def write(self, outfile, rows):
"""Write a PNG image to the output file. `rows` should be
an iterable that yields each row in boxed row flat pixel
format. The rows should be the rows of the original image,
so there should be ``self.height`` rows of ``self.width *
self.planes`` values. If `interlace` is specified (when
creating the instance), then an interlaced PNG file will
be written. Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working
memory.
"""
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*rows))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
|
Write a PNG image to the output file. `rows` should be
an iterable that yields each row in boxed row flat pixel
format. The rows should be the rows of the original image,
so there should be ``self.height`` rows of ``self.width *
self.planes`` values. If `interlace` is specified (when
creating the instance), then an interlaced PNG file will
be written. Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working
memory.
|
entailment
|
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(_signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, 'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, 'gAMA',
struct.pack("!L", int(round(self.gamma*1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(outfile, 'sBIT',
struct.pack('%dB' % self.planes,
*[self.rescale[0]]*self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gAMA chunk must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p,t = self.make_palette()
write_chunk(outfile, 'PLTE', p)
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, 'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, 'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, 'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, 'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, 'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
data = array('B')
if self.bitdepth == 8 or packed:
extend = data.extend
elif self.bitdepth == 16:
# Decompose into bytes
def extend(sl):
fmt = '!%dH' % len(sl)
data.extend(array('B', struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = int(8/self.bitdepth)
def extend(sl):
a = array('B', sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb))*spb - l
a.extend([0]*int(extra))
# Pack into bytes
l = group(a, spb)
l = map(lambda e: reduce(lambda x,y:
(x << self.bitdepth) + y, e), l)
data.extend(l)
if self.rescale:
oldextend = extend
factor = \
float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
def extend(sl):
oldextend(map(lambda x: int(round(factor*x)), sl))
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# First row's filter type.
data.append(0)
# :todo: Certain exceptions in the call to ``.next()`` or the
# following try would indicate no row data supplied.
# Should catch.
i,row = enumrows.next()
try:
# If this fails...
extend(row)
except:
# ... try a version that converts the values to int first.
# Not only does this work for the (slightly broken) NumPy
# types, there are probably lots of other, unknown, "nearly"
# int types it works for.
def wrapmapint(f):
return lambda sl: f(map(int, sl))
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i,row in enumrows:
# Add "None" filter type. Currently, it's essential that
# this filter type be used for every scanline as we do not
# mark the first row of a reduced pass image; that means we
# could accidentally compute the wrong filtered scanline if
# we used "up", "average", or "paeth" on such a line.
data.append(0)
extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(tostring(data))
if len(compressed):
write_chunk(outfile, 'IDAT', compressed)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
compressed = compressor.compress(tostring(data))
else:
compressed = strtobytes('')
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, 'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, 'IEND')
return i+1
|
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
|
entailment
|
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True)
|
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
|
entailment
|
def convert_pnm(self, infile, outfile):
"""
Convert a PNM file containing raw pixel data into a PNG file
with the parameters set in the writer object. Works for
(binary) PGM, PPM, and PAM formats.
"""
if self.interlace:
pixels = array('B')
pixels.fromfile(infile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.file_scanlines(infile))
|
Convert a PNM file containing raw pixel data into a PNG file
with the parameters set in the writer object. Works for
(binary) PGM, PPM, and PAM formats.
|
entailment
|
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
"""
Convert a PPM and PGM file containing raw pixel data into a
PNG outfile with the parameters set in the writer object.
"""
pixels = array('B')
pixels.fromfile(ppmfile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
apixels = array('B')
apixels.fromfile(pgmfile,
(self.bitdepth/8) *
self.width * self.height)
pixels = interleave_planes(pixels, apixels,
(self.bitdepth/8) * self.color_planes,
(self.bitdepth/8))
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
|
Convert a PPM and PGM file containing raw pixel data into a
PNG outfile with the parameters set in the writer object.
|
entailment
|
def file_scanlines(self, infile):
"""
Generates boxed rows in flat pixel format, from the input file
`infile`. It assumes that the input file is in a "Netpbm-like"
binary format, and is positioned at the beginning of the first
pixel. The number of pixels to read is taken from the image
dimensions (`width`, `height`, `planes`) and the number of bytes
per value is implied by the image `bitdepth`.
"""
# Values per row
vpr = self.width * self.planes
row_bytes = vpr
if self.bitdepth > 8:
assert self.bitdepth == 16
row_bytes *= 2
fmt = '>%dH' % vpr
def line():
return array('H', struct.unpack(fmt, infile.read(row_bytes)))
else:
def line():
scanline = array('B', infile.read(row_bytes))
return scanline
for y in range(self.height):
yield line()
|
Generates boxed rows in flat pixel format, from the input file
`infile`. It assumes that the input file is in a "Netpbm-like"
binary format, and is positioned at the beginning of the first
pixel. The number of pixels to read is taken from the image
dimensions (`width`, `height`, `planes`) and the number of bytes
per value is implied by the image `bitdepth`.
|
entailment
|
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array. `pixels` is
the full source image in flat row flat pixel format. The
generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# number of values in reduced image row.
row_len = ppr*self.planes
for y in range(ystart, self.height, ystep):
if xstep == 1:
offset = y * vpr
yield pixels[offset:offset+vpr]
else:
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset+i:end_offset:skip]
yield row
|
Generator for interlaced scanlines from an array. `pixels` is
the full source image in flat row flat pixel format. The
generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
|
entailment
|
def undo_filter(self, filter_type, scanline, previous):
"""Undo the filter for a scanline. `scanline` is a sequence of
bytes that does not include the initial filter type byte.
`previous` is decoded previous scanline (for straightlaced
images this is the previous pixel row, but for interlaced
images, it is the previous scanline in the reduced image, which
in general is not the previous pixel row in the final image).
When there is no previous scanline (the first row of a
straightlaced image, or the first row in one of the passes in an
interlaced image), then this argument should be ``None``.
The scanline will have the effects of filtering removed, and the
result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
# Yes, with the Cython extension making the undo_filter fast,
# updating scanline inplace makes the code 3 times faster
# (reading 50 images of 800x800 went from 40s to 16s)
result = scanline
if filter_type == 0:
return result
if filter_type not in (1,2,3,4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = array('B', [0]*len(scanline))
def sub():
"""Undo sub filter."""
ai = 0
# Loop starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def up():
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def average():
"""Undo average filter."""
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def paeth():
"""Undo Paeth filter."""
# Also used for ci.
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
(None,
pngfilters.undo_filter_sub,
pngfilters.undo_filter_up,
pngfilters.undo_filter_average,
pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
return result
|
Undo the filter for a scanline. `scanline` is a sequence of
bytes that does not include the initial filter type byte.
`previous` is decoded previous scanline (for straightlaced
images this is the previous pixel row, but for interlaced
images, it is the previous scanline in the reduced image, which
in general is not the previous pixel row in the final image).
When there is no previous scanline (the first row of a
straightlaced image, or the first row in one of the passes in an
interlaced image), then this argument should be ``None``.
The scanline will have the effects of filtering removed, and the
result will be returned as a fresh sequence of bytes.
|
entailment
|
def deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Make a result array, and make it big enough. Interleaving
# writes to the output array randomly (well, not quite), so the
# entire output array must be in memory.
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, [0]*vpr*self.height)
source_offset = 0
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# The previous (reconstructed) scanline. None at the
# beginning of a pass to indicate that there is no previous
# line.
recon = None
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset:source_offset+row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self.serialtoflat(recon, ppr)
if xstep == 1:
assert xstart == 0
offset = y * vpr
a[offset:offset+vpr] = flat
else:
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset+i:end_offset:skip] = \
flat[i::self.planes]
return a
|
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
|
entailment
|
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result will
be a freshly allocated object, not shared with
argument.
"""
if self.bitdepth == 8:
return array('B', raw)
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
for o in raw:
out.extend(map(lambda i: mask&(o>>i), shifts))
return out[:width]
return imap(asvalues, rows)
|
Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
|
entailment
|
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out
|
Convert serial format (byte stream) pixel data to flat row
flat pixel.
|
entailment
|
def iterstraight(self, raw):
"""Iterator that undoes the effect of filtering, and yields
each row in serialised format (as a sequence of bytes).
Assumes input is straightlaced. `raw` should be an iterable
that yields the raw bytes in chunks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = array('B')
# The previous (reconstructed) scanline. None indicates first
# line of image.
recon = None
for some in raw:
a.extend(some)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1:rb+1]
del a[:rb+1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
|
Iterator that undoes the effect of filtering, and yields
each row in serialised format (as a sequence of bytes).
Assumes input is straightlaced. `raw` should be an iterable
that yields the raw bytes in chunks of arbitrary size.
|
entailment
|
def chunklentype(self):
"""Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *type*) pair
where *type* is a string. If there are no more chunks, ``None``
is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length,type = struct.unpack('!I4s', x)
type = bytestostr(type)
if length > 2**31-1:
raise FormatError('Chunk %s is too large: %d.' % (type,length))
return length,type
|
Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *type*) pair
where *type* is a string. If there are no more chunks, ``None``
is returned.
|
entailment
|
def read(self, lenient=False):
"""
Read the PNG file and decode it. Returns (`width`, `height`,
`pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
try:
type, data = self.chunk(lenient=lenient)
except ValueError as e:
raise ChunkError(e.args[0])
if type == 'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != 'IDAT':
continue
# type == 'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def iterdecomp(idat):
"""Iterator that yields decompressed strings. `idat` should
be an iterator that yields the ``IDAT`` chunk data.
"""
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in idat:
# :todo: add a max_length argument here to limit output
# size.
yield array('B', d.decompress(data))
yield array('B', d.flush())
self.preamble(lenient=lenient)
raw = iterdecomp(iteridat())
if self.interlace:
raw = array('B', itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth>8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = imap(lambda *row: array(arraycode, row),
*[iter(self.deinterlace(raw))]*self.width*self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
if self.plte:
meta['palette'] = self.palette()
return self.width, self.height, pixels, meta
|
Read the PNG file and decode it. Returns (`width`, `height`,
`pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
|
entailment
|
def palette(self, alpha='natural'):
"""Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or '')
trns.extend([255]*(len(plte)-len(trns)))
plte = map(operator.add, plte, group(trns, 1))
return plte
|
Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
|
entailment
|
def asDirect(self):
"""Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x,y,pixels,meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = map(plte.__getitem__, row)
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2**meta['bitdepth']-1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
typecode = 'BH'[meta['bitdepth']>8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = zip(opa) # convert to 1-tuples
yield array(typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit,self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield map(shift.__rrshift__, row)
pixels = itershift(pixels)
return x,y,pixels,meta
|
Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
|
entailment
|
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info['bitdepth']-1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield map(factor.__mul__, row)
return x,y,iterfloat(),info
|
Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
|
entailment
|
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width,height,pixels,meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield map(lambda x: int(round(x*factor)), row)
if maxval == targetmaxval:
return width, height, pixels, meta
else:
return width, height, iterscale(), meta
|
Helper used by :meth:`asRGB8` and :meth:`asRGBA8`.
|
entailment
|
def asRGB(self):
"""Return image as RGB pixels. RGB colour images are passed
through unchanged; greyscales are expanded into RGB
triplets (there is a small speed overhead for doing this).
An alpha channel in the source image will raise an
exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width,height,pixels,meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(typecode, [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width,height,iterrgb(),meta
|
Return image as RGB pixels. RGB colour images are passed
through unchanged; greyscales are expanded into RGB
triplets (there is a small speed overhead for doing this).
An alpha channel in the source image will raise an
exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
|
entailment
|
def asRGBA(self):
"""Return image as RGBA pixels. Greyscales are expanded into
RGB triplets; an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width,height,pixels,meta
typecode = 'BH'[meta['bitdepth'] > 8]
maxval = 2**meta['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
def newarray():
return array(typecode, maxbuffer)
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
pngfilters.convert_la_to_rgba(row, a)
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_l_to_rgba(row, a)
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_rgb_to_rgba(row, a)
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width,height,convert(),meta
|
Return image as RGBA pixels. Greyscales are expanded into
RGB triplets; an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
|
entailment
|
def load_data_file(fname, directory=None, force_download=False):
"""Get a standard vispy demo data file
Parameters
----------
fname : str
The filename on the remote ``demo-data`` repository to download,
e.g. ``'molecular_viewer/micelle.npy'``. These correspond to paths
on ``https://github.com/vispy/demo-data/``.
directory : str | None
Directory to use to save the file. By default, the vispy
configuration directory is used.
force_download : bool | str
If True, the file will be downloaded even if a local copy exists
(and this copy will be overwritten). Can also be a YYYY-MM-DD date
to ensure a file is up-to-date (modified date of a file on disk,
if present, is checked).
Returns
-------
fname : str
The path to the file on the local system.
"""
_url_root = 'http://github.com/vispy/demo-data/raw/master/'
url = _url_root + fname
if directory is None:
directory = config['data_path']
if directory is None:
raise ValueError('config["data_path"] is not defined, '
'so directory must be supplied')
fname = op.join(directory, op.normcase(fname)) # convert to native
if op.isfile(fname):
if not force_download: # we're done
return fname
if isinstance(force_download, string_types):
ntime = time.strptime(force_download, '%Y-%m-%d')
ftime = time.gmtime(op.getctime(fname))
if ftime >= ntime:
return fname
else:
print('File older than %s, updating...' % force_download)
if not op.isdir(op.dirname(fname)):
os.makedirs(op.abspath(op.dirname(fname)))
# let's go get the file
_fetch_file(url, fname)
return fname
|
Get a standard vispy demo data file
Parameters
----------
fname : str
The filename on the remote ``demo-data`` repository to download,
e.g. ``'molecular_viewer/micelle.npy'``. These correspond to paths
on ``https://github.com/vispy/demo-data/``.
directory : str | None
Directory to use to save the file. By default, the vispy
configuration directory is used.
force_download : bool | str
If True, the file will be downloaded even if a local copy exists
(and this copy will be overwritten). Can also be a YYYY-MM-DD date
to ensure a file is up-to-date (modified date of a file on disk,
if present, is checked).
Returns
-------
fname : str
The path to the file on the local system.
|
entailment
|
def _chunk_read(response, local_file, chunk_size=65536, initial_size=0):
"""Download a file chunk by chunk and show advancement
Can also be used when resuming downloads over http.
Parameters
----------
response: urllib.response.addinfourl
Response to the download request in order to get file size.
local_file: file
Hard disk file where data should be written.
chunk_size: integer, optional
Size of downloaded chunks. Default: 8192
initial_size: int, optional
If resuming, indicate the initial size of the file.
"""
# Adapted from NISL:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
bytes_so_far = initial_size
# Returns only amount left to download when resuming, not the size of the
# entire file
total_size = int(response.headers['Content-Length'].strip())
total_size += initial_size
progress = ProgressBar(total_size, initial_value=bytes_so_far,
max_chars=40, spinner=True, mesg='downloading')
while True:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
sys.stderr.write('\n')
break
_chunk_write(chunk, local_file, progress)
|
Download a file chunk by chunk and show advancement
Can also be used when resuming downloads over http.
Parameters
----------
response: urllib.response.addinfourl
Response to the download request in order to get file size.
local_file: file
Hard disk file where data should be written.
chunk_size: integer, optional
Size of downloaded chunks. Default: 8192
initial_size: int, optional
If resuming, indicate the initial size of the file.
|
entailment
|
def _chunk_write(chunk, local_file, progress):
"""Write a chunk to file and update the progress bar"""
local_file.write(chunk)
progress.update_with_increment_value(len(chunk))
|
Write a chunk to file and update the progress bar
|
entailment
|
def _fetch_file(url, file_name, print_destination=True):
"""Load requested file, downloading it if needed or requested
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
print_destination: bool, optional
If true, destination of where file was saved will be printed after
download finishes.
"""
# Adapted from NISL:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
temp_file_name = file_name + ".part"
local_file = None
initial_size = 0
# Checking file size and displaying it alongside the download url
n_try = 3
for ii in range(n_try):
try:
data = urllib.request.urlopen(url, timeout=15.)
except Exception as e:
if ii == n_try - 1:
raise RuntimeError('Error while fetching file %s.\n'
'Dataset fetching aborted (%s)' % (url, e))
try:
file_size = int(data.headers['Content-Length'].strip())
print('Downloading data from %s (%s)' % (url, sizeof_fmt(file_size)))
local_file = open(temp_file_name, "wb")
_chunk_read(data, local_file, initial_size=initial_size)
# temp file must be closed prior to the move
if not local_file.closed:
local_file.close()
shutil.move(temp_file_name, file_name)
if print_destination is True:
sys.stdout.write('File saved as %s.\n' % file_name)
except Exception as e:
raise RuntimeError('Error while fetching file %s.\n'
'Dataset fetching aborted (%s)' % (url, e))
finally:
if local_file is not None:
if not local_file.closed:
local_file.close()
|
Load requested file, downloading it if needed or requested
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
print_destination: bool, optional
If true, destination of where file was saved will be printed after
download finishes.
|
entailment
|
def update(self, cur_value, mesg=None):
"""Update progressbar with current value of process
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
# Ensure floating-point division so we can get fractions of a percent
# for the progressbar.
self.cur_value = cur_value
progress = float(self.cur_value) / self.max_value
num_chars = int(progress * self.max_chars)
num_left = self.max_chars - num_chars
# Update the message
if mesg is not None:
self.mesg = mesg
# The \r tells the cursor to return to the beginning of the line rather
# than starting a new line. This allows us to have a progressbar-style
# display in the console window.
bar = self.template.format(self.progress_character * num_chars,
' ' * num_left,
progress * 100,
self.spinner_symbols[self.spinner_index],
self.mesg)
sys.stdout.write(bar)
# Increament the spinner
if self.spinner:
self.spinner_index = (self.spinner_index + 1) % self.n_spinner
# Force a flush because sometimes when using bash scripts and pipes,
# the output is not printed until after the program exits.
sys.stdout.flush()
|
Update progressbar with current value of process
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
|
entailment
|
def update_with_increment_value(self, increment_value, mesg=None):
"""Update progressbar with the value of the increment instead of the
current value of process as in update()
Parameters
----------
increment_value : int
Value of the increment of process. The percent of the progressbar
will be computed as
(self.cur_value + increment_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
self.cur_value += increment_value
self.update(self.cur_value, mesg)
|
Update progressbar with the value of the increment instead of the
current value of process as in update()
Parameters
----------
increment_value : int
Value of the increment of process. The percent of the progressbar
will be computed as
(self.cur_value + increment_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
|
entailment
|
def central_widget(self):
""" Returns the default widget that occupies the entire area of the
canvas.
"""
if self._central_widget is None:
self._central_widget = Widget(size=self.size, parent=self.scene)
return self._central_widget
|
Returns the default widget that occupies the entire area of the
canvas.
|
entailment
|
def render(self, region=None, size=None, bgcolor=None):
"""Render the scene to an offscreen buffer and return the image array.
Parameters
----------
region : tuple | None
Specifies the region of the canvas to render. Format is
(x, y, w, h). By default, the entire canvas is rendered.
size : tuple | None
Specifies the size of the image array to return. If no size is
given, then the size of the *region* is used, multiplied by the
pixel scaling factor of the canvas (see `pixel_scale`). This
argument allows the scene to be rendered at resolutions different
from the native canvas resolution.
bgcolor : instance of Color | None
The background color to use.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
"""
self.set_current()
# Set up a framebuffer to render to
offset = (0, 0) if region is None else region[:2]
csize = self.size if region is None else region[2:]
s = self.pixel_scale
size = tuple([x * s for x in csize]) if size is None else size
fbo = gloo.FrameBuffer(color=gloo.RenderBuffer(size[::-1]),
depth=gloo.RenderBuffer(size[::-1]))
self.push_fbo(fbo, offset, csize)
try:
self._draw_scene(bgcolor=bgcolor)
return fbo.read()
finally:
self.pop_fbo()
|
Render the scene to an offscreen buffer and return the image array.
Parameters
----------
region : tuple | None
Specifies the region of the canvas to render. Format is
(x, y, w, h). By default, the entire canvas is rendered.
size : tuple | None
Specifies the size of the image array to return. If no size is
given, then the size of the *region* is used, multiplied by the
pixel scaling factor of the canvas (see `pixel_scale`). This
argument allows the scene to be rendered at resolutions different
from the native canvas resolution.
bgcolor : instance of Color | None
The background color to use.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
|
entailment
|
def draw_visual(self, visual, event=None):
""" Draw a visual and its children to the canvas or currently active
framebuffer.
Parameters
----------
visual : Visual
The visual to draw
event : None or DrawEvent
Optionally specifies the original canvas draw event that initiated
this draw.
"""
prof = Profiler()
# make sure this canvas's context is active
self.set_current()
try:
self._drawing = True
# get order to draw visuals
if visual not in self._draw_order:
self._draw_order[visual] = self._generate_draw_order()
order = self._draw_order[visual]
# draw (while avoiding branches with visible=False)
stack = []
invisible_node = None
for node, start in order:
if start:
stack.append(node)
if invisible_node is None:
if not node.visible:
# disable drawing until we exit this node's subtree
invisible_node = node
else:
if hasattr(node, 'draw'):
node.draw()
prof.mark(str(node))
else:
if node is invisible_node:
invisible_node = None
stack.pop()
finally:
self._drawing = False
|
Draw a visual and its children to the canvas or currently active
framebuffer.
Parameters
----------
visual : Visual
The visual to draw
event : None or DrawEvent
Optionally specifies the original canvas draw event that initiated
this draw.
|
entailment
|
def _generate_draw_order(self, node=None):
"""Return a list giving the order to draw visuals.
Each node appears twice in the list--(node, True) appears before the
node's children are drawn, and (node, False) appears after.
"""
if node is None:
node = self._scene
order = [(node, True)]
children = node.children
children.sort(key=lambda ch: ch.order)
for ch in children:
order.extend(self._generate_draw_order(ch))
order.append((node, False))
return order
|
Return a list giving the order to draw visuals.
Each node appears twice in the list--(node, True) appears before the
node's children are drawn, and (node, False) appears after.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.