sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def visual_at(self, pos):
"""Return the visual at a given position
Parameters
----------
pos : tuple
The position in logical coordinates to query.
Returns
-------
visual : instance of Visual | None
The visual at the position, if it exists.
"""
tr = self.transforms.get_transform('canvas', 'framebuffer')
fbpos = tr.map(pos)[:2]
try:
id_ = self._render_picking(region=(fbpos[0], fbpos[1],
1, 1))
vis = VisualNode._visual_ids.get(id_[0, 0], None)
except RuntimeError:
# Don't have read_pixels() support for IPython. Fall back to
# bounds checking.
return self._visual_bounds_at(pos)
return vis
|
Return the visual at a given position
Parameters
----------
pos : tuple
The position in logical coordinates to query.
Returns
-------
visual : instance of Visual | None
The visual at the position, if it exists.
|
entailment
|
def _visual_bounds_at(self, pos, node=None):
"""Find a visual whose bounding rect encompasses *pos*.
"""
if node is None:
node = self.scene
for ch in node.children:
hit = self._visual_bounds_at(pos, ch)
if hit is not None:
return hit
if (not isinstance(node, VisualNode) or not node.visible or
not node.interactive):
return None
bounds = [node.bounds(axis=i) for i in range(2)]
if None in bounds:
return None
tr = self.scene.node_transform(node).inverse
corners = np.array([
[bounds[0][0], bounds[1][0]],
[bounds[0][0], bounds[1][1]],
[bounds[0][1], bounds[1][0]],
[bounds[0][1], bounds[1][1]]])
bounds = tr.map(corners)
xhit = bounds[:, 0].min() < pos[0] < bounds[:, 0].max()
yhit = bounds[:, 1].min() < pos[1] < bounds[:, 1].max()
if xhit and yhit:
return node
|
Find a visual whose bounding rect encompasses *pos*.
|
entailment
|
def visuals_at(self, pos, radius=10):
"""Return a list of visuals within *radius* pixels of *pos*.
Visuals are sorted by their proximity to *pos*.
Parameters
----------
pos : tuple
(x, y) position at which to find visuals.
radius : int
Distance away from *pos* to search for visuals.
"""
tr = self.transforms.get_transform('canvas', 'framebuffer')
pos = tr.map(pos)[:2]
id = self._render_picking(region=(pos[0]-radius, pos[1]-radius,
radius * 2 + 1, radius * 2 + 1))
ids = []
seen = set()
for i in range(radius):
subr = id[radius-i:radius+i+1, radius-i:radius+i+1]
subr_ids = set(list(np.unique(subr)))
ids.extend(list(subr_ids - seen))
seen |= subr_ids
visuals = [VisualNode._visual_ids.get(x, None) for x in ids]
return [v for v in visuals if v is not None]
|
Return a list of visuals within *radius* pixels of *pos*.
Visuals are sorted by their proximity to *pos*.
Parameters
----------
pos : tuple
(x, y) position at which to find visuals.
radius : int
Distance away from *pos* to search for visuals.
|
entailment
|
def _render_picking(self, **kwargs):
"""Render the scene in picking mode, returning a 2D array of visual
IDs.
"""
try:
self._scene.picking = True
img = self.render(bgcolor=(0, 0, 0, 0), **kwargs)
finally:
self._scene.picking = False
img = img.astype('int32') * [2**0, 2**8, 2**16, 2**24]
id_ = img.sum(axis=2).astype('int32')
return id_
|
Render the scene in picking mode, returning a 2D array of visual
IDs.
|
entailment
|
def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The resize event.
"""
self._update_transforms()
if self._central_widget is not None:
self._central_widget.size = self.size
if len(self._vp_stack) == 0:
self.context.set_viewport(0, 0, *self.physical_size)
|
Resize handler
Parameters
----------
event : instance of Event
The resize event.
|
entailment
|
def on_close(self, event):
"""Close event handler
Parameters
----------
event : instance of Event
The event.
"""
self.events.mouse_press.disconnect(self._process_mouse_event)
self.events.mouse_move.disconnect(self._process_mouse_event)
self.events.mouse_release.disconnect(self._process_mouse_event)
self.events.mouse_wheel.disconnect(self._process_mouse_event)
|
Close event handler
Parameters
----------
event : instance of Event
The event.
|
entailment
|
def push_viewport(self, viewport):
""" Push a viewport (x, y, w, h) on the stack. Values must be integers
relative to the active framebuffer.
Parameters
----------
viewport : tuple
The viewport as (x, y, w, h).
"""
vp = list(viewport)
# Normalize viewport before setting;
if vp[2] < 0:
vp[0] += vp[2]
vp[2] *= -1
if vp[3] < 0:
vp[1] += vp[3]
vp[3] *= -1
self._vp_stack.append(vp)
try:
self.context.set_viewport(*vp)
except:
self._vp_stack.pop()
raise
self._update_transforms()
|
Push a viewport (x, y, w, h) on the stack. Values must be integers
relative to the active framebuffer.
Parameters
----------
viewport : tuple
The viewport as (x, y, w, h).
|
entailment
|
def pop_viewport(self):
""" Pop a viewport from the stack.
"""
vp = self._vp_stack.pop()
# Activate latest
if len(self._vp_stack) > 0:
self.context.set_viewport(*self._vp_stack[-1])
else:
self.context.set_viewport(0, 0, *self.physical_size)
self._update_transforms()
return vp
|
Pop a viewport from the stack.
|
entailment
|
def push_fbo(self, fbo, offset, csize):
""" Push an FBO on the stack.
This activates the framebuffer and causes subsequent rendering to be
written to the framebuffer rather than the canvas's back buffer. This
will also set the canvas viewport to cover the boundaries of the
framebuffer.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer object .
offset : tuple
The location of the fbo origin relative to the canvas's framebuffer
origin.
csize : tuple
The size of the region in the canvas's framebuffer that should be
covered by this framebuffer object.
"""
self._fb_stack.append((fbo, offset, csize))
try:
fbo.activate()
h, w = fbo.color_buffer.shape[:2]
self.push_viewport((0, 0, w, h))
except Exception:
self._fb_stack.pop()
raise
self._update_transforms()
|
Push an FBO on the stack.
This activates the framebuffer and causes subsequent rendering to be
written to the framebuffer rather than the canvas's back buffer. This
will also set the canvas viewport to cover the boundaries of the
framebuffer.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer object .
offset : tuple
The location of the fbo origin relative to the canvas's framebuffer
origin.
csize : tuple
The size of the region in the canvas's framebuffer that should be
covered by this framebuffer object.
|
entailment
|
def pop_fbo(self):
""" Pop an FBO from the stack.
"""
fbo = self._fb_stack.pop()
fbo[0].deactivate()
self.pop_viewport()
if len(self._fb_stack) > 0:
old_fbo = self._fb_stack[-1]
old_fbo[0].activate()
self._update_transforms()
return fbo
|
Pop an FBO from the stack.
|
entailment
|
def _update_transforms(self):
"""Update the canvas's TransformSystem to correct for the current
canvas size, framebuffer, and viewport.
"""
if len(self._fb_stack) == 0:
fb_size = fb_rect = None
else:
fb, origin, fb_size = self._fb_stack[-1]
fb_rect = origin + fb_size
if len(self._vp_stack) == 0:
viewport = None
else:
viewport = self._vp_stack[-1]
self.transforms.configure(viewport=viewport, fbo_size=fb_size,
fbo_rect=fb_rect)
|
Update the canvas's TransformSystem to correct for the current
canvas size, framebuffer, and viewport.
|
entailment
|
def wrapping(self):
""" Texture wrapping mode """
value = self._wrapping
return value[0] if all([v == value[0] for v in value]) else value
|
Texture wrapping mode
|
entailment
|
def resize(self, shape, format=None, internalformat=None):
"""Set the texture size and format
Parameters
----------
shape : tuple of integers
New texture shape in zyx order. Optionally, an extra dimention
may be specified to indicate the number of color channels.
format : str | enum | None
The format of the texture: 'luminance', 'alpha',
'luminance_alpha', 'rgb', or 'rgba'. If not given the format
is chosen automatically based on the number of channels.
When the data has one channel, 'luminance' is assumed.
internalformat : str | enum | None
The internal (storage) format of the texture: 'luminance',
'alpha', 'r8', 'r16', 'r16f', 'r32f'; 'luminance_alpha',
'rg8', 'rg16', 'rg16f', 'rg32f'; 'rgb', 'rgb8', 'rgb16',
'rgb16f', 'rgb32f'; 'rgba', 'rgba8', 'rgba16', 'rgba16f',
'rgba32f'. If None, the internalformat is chosen
automatically based on the number of channels. This is a
hint which may be ignored by the OpenGL implementation.
"""
return self._resize(shape, format, internalformat)
|
Set the texture size and format
Parameters
----------
shape : tuple of integers
New texture shape in zyx order. Optionally, an extra dimention
may be specified to indicate the number of color channels.
format : str | enum | None
The format of the texture: 'luminance', 'alpha',
'luminance_alpha', 'rgb', or 'rgba'. If not given the format
is chosen automatically based on the number of channels.
When the data has one channel, 'luminance' is assumed.
internalformat : str | enum | None
The internal (storage) format of the texture: 'luminance',
'alpha', 'r8', 'r16', 'r16f', 'r32f'; 'luminance_alpha',
'rg8', 'rg16', 'rg16f', 'rg32f'; 'rgb', 'rgb8', 'rgb16',
'rgb16f', 'rgb32f'; 'rgba', 'rgba8', 'rgba16', 'rgba16f',
'rgba32f'. If None, the internalformat is chosen
automatically based on the number of channels. This is a
hint which may be ignored by the OpenGL implementation.
|
entailment
|
def _resize(self, shape, format=None, internalformat=None):
"""Internal method for resize.
"""
shape = self._normalize_shape(shape)
# Check
if not self._resizable:
raise RuntimeError("Texture is not resizable")
# Determine format
if format is None:
format = self._formats[shape[-1]]
# Keep current format if channels match
if self._format and \
self._inv_formats[self._format] == self._inv_formats[format]:
format = self._format
else:
format = check_enum(format)
if internalformat is None:
# Keep current internalformat if channels match
if self._internalformat and \
self._inv_internalformats[self._internalformat] == shape[-1]:
internalformat = self._internalformat
else:
internalformat = check_enum(internalformat)
# Check
if format not in self._inv_formats:
raise ValueError('Invalid texture format: %r.' % format)
elif shape[-1] != self._inv_formats[format]:
raise ValueError('Format does not match with given shape. '
'(format expects %d elements, data has %d)' %
(self._inv_formats[format], shape[-1]))
if internalformat is None:
pass
elif internalformat not in self._inv_internalformats:
raise ValueError(
'Invalid texture internalformat: %r. Allowed formats: %r'
% (internalformat, self._inv_internalformats)
)
elif shape[-1] != self._inv_internalformats[internalformat]:
raise ValueError('Internalformat does not match with given shape.')
# Store and send GLIR command
self._shape = shape
self._format = format
self._internalformat = internalformat
self._glir.command('SIZE', self._id, self._shape, self._format,
self._internalformat)
|
Internal method for resize.
|
entailment
|
def set_data(self, data, offset=None, copy=False):
"""Set texture data
Parameters
----------
data : ndarray
Data to be uploaded
offset: int | tuple of ints
Offset in texture where to start copying data
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory. Asking explicitly
for a copy will prevent this behavior.
Notes
-----
This operation implicitely resizes the texture to the shape of
the data if given offset is None.
"""
return self._set_data(data, offset, copy)
|
Set texture data
Parameters
----------
data : ndarray
Data to be uploaded
offset: int | tuple of ints
Offset in texture where to start copying data
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory. Asking explicitly
for a copy will prevent this behavior.
Notes
-----
This operation implicitely resizes the texture to the shape of
the data if given offset is None.
|
entailment
|
def _set_data(self, data, offset=None, copy=False):
"""Internal method for set_data.
"""
# Copy if needed, check/normalize shape
data = np.array(data, copy=copy)
data = self._normalize_shape(data)
# Maybe resize to purge DATA commands?
if offset is None:
self._resize(data.shape)
elif all([i == 0 for i in offset]) and data.shape == self._shape:
self._resize(data.shape)
# Convert offset to something usable
offset = offset or tuple([0 for i in range(self._ndim)])
assert len(offset) == self._ndim
# Check if data fits
for i in range(len(data.shape)-1):
if offset[i] + data.shape[i] > self._shape[i]:
raise ValueError("Data is too large")
# Send GLIR command
self._glir.command('DATA', self._id, offset, data)
|
Internal method for set_data.
|
entailment
|
def set_data(self, data, offset=None, copy=False):
"""Set texture data
Parameters
----------
data : ndarray
Data to be uploaded
offset: int | tuple of ints
Offset in texture where to start copying data
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory. Asking explicitly
for a copy will prevent this behavior.
Notes
-----
This operation implicitely resizes the texture to the shape of
the data if given offset is None.
"""
self._set_emulated_shape(data)
Texture2D.set_data(self, self._normalize_emulated_shape(data),
offset, copy)
self._update_variables()
|
Set texture data
Parameters
----------
data : ndarray
Data to be uploaded
offset: int | tuple of ints
Offset in texture where to start copying data
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory. Asking explicitly
for a copy will prevent this behavior.
Notes
-----
This operation implicitely resizes the texture to the shape of
the data if given offset is None.
|
entailment
|
def resize(self, shape, format=None, internalformat=None):
"""Set the texture size and format
Parameters
----------
shape : tuple of integers
New texture shape in zyx order. Optionally, an extra dimention
may be specified to indicate the number of color channels.
format : str | enum | None
The format of the texture: 'luminance', 'alpha',
'luminance_alpha', 'rgb', or 'rgba'. If not given the format
is chosen automatically based on the number of channels.
When the data has one channel, 'luminance' is assumed.
internalformat : str | enum | None
The internal (storage) format of the texture: 'luminance',
'alpha', 'r8', 'r16', 'r16f', 'r32f'; 'luminance_alpha',
'rg8', 'rg16', 'rg16f', 'rg32f'; 'rgb', 'rgb8', 'rgb16',
'rgb16f', 'rgb32f'; 'rgba', 'rgba8', 'rgba16', 'rgba16f',
'rgba32f'. If None, the internalformat is chosen
automatically based on the number of channels. This is a
hint which may be ignored by the OpenGL implementation.
"""
self._set_emulated_shape(shape)
Texture2D.resize(self, self._normalize_emulated_shape(shape),
format, internalformat)
self._update_variables()
|
Set the texture size and format
Parameters
----------
shape : tuple of integers
New texture shape in zyx order. Optionally, an extra dimention
may be specified to indicate the number of color channels.
format : str | enum | None
The format of the texture: 'luminance', 'alpha',
'luminance_alpha', 'rgb', or 'rgba'. If not given the format
is chosen automatically based on the number of channels.
When the data has one channel, 'luminance' is assumed.
internalformat : str | enum | None
The internal (storage) format of the texture: 'luminance',
'alpha', 'r8', 'r16', 'r16f', 'r32f'; 'luminance_alpha',
'rg8', 'rg16', 'rg16f', 'rg32f'; 'rgb', 'rgb8', 'rgb16',
'rgb16f', 'rgb32f'; 'rgba', 'rgba8', 'rgba16', 'rgba16f',
'rgba32f'. If None, the internalformat is chosen
automatically based on the number of channels. This is a
hint which may be ignored by the OpenGL implementation.
|
entailment
|
def get_free_region(self, width, height):
"""Get a free region of given size and allocate it
Parameters
----------
width : int
Width of region to allocate
height : int
Height of region to allocate
Returns
-------
bounds : tuple | None
A newly allocated region as (x, y, w, h) or None
(if failed).
"""
best_height = best_width = np.inf
best_index = -1
for i in range(len(self._atlas_nodes)):
y = self._fit(i, width, height)
if y >= 0:
node = self._atlas_nodes[i]
if (y+height < best_height or
(y+height == best_height and node[2] < best_width)):
best_height = y+height
best_index = i
best_width = node[2]
region = node[0], y, width, height
if best_index == -1:
return None
node = region[0], region[1] + height, width
self._atlas_nodes.insert(best_index, node)
i = best_index+1
while i < len(self._atlas_nodes):
node = self._atlas_nodes[i]
prev_node = self._atlas_nodes[i-1]
if node[0] < prev_node[0]+prev_node[2]:
shrink = prev_node[0]+prev_node[2] - node[0]
x, y, w = self._atlas_nodes[i]
self._atlas_nodes[i] = x+shrink, y, w-shrink
if self._atlas_nodes[i][2] <= 0:
del self._atlas_nodes[i]
i -= 1
else:
break
else:
break
i += 1
# Merge nodes
i = 0
while i < len(self._atlas_nodes)-1:
node = self._atlas_nodes[i]
next_node = self._atlas_nodes[i+1]
if node[1] == next_node[1]:
self._atlas_nodes[i] = node[0], node[1], node[2]+next_node[2]
del self._atlas_nodes[i+1]
else:
i += 1
return region
|
Get a free region of given size and allocate it
Parameters
----------
width : int
Width of region to allocate
height : int
Height of region to allocate
Returns
-------
bounds : tuple | None
A newly allocated region as (x, y, w, h) or None
(if failed).
|
entailment
|
def _fit(self, index, width, height):
"""Test if region (width, height) fit into self._atlas_nodes[index]"""
node = self._atlas_nodes[index]
x, y = node[0], node[1]
width_left = width
if x+width > self._shape[1]:
return -1
i = index
while width_left > 0:
node = self._atlas_nodes[i]
y = max(y, node[1])
if y+height > self._shape[0]:
return -1
width_left -= node[2]
i += 1
return y
|
Test if region (width, height) fit into self._atlas_nodes[index]
|
entailment
|
def _vector_or_scalar(x, type='row'):
"""Convert an object to either a scalar or a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x)
if isinstance(x, np.ndarray):
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
|
Convert an object to either a scalar or a row or column vector.
|
entailment
|
def _vector(x, type='row'):
"""Convert an object to a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x, dtype=np.float32)
elif not isinstance(x, np.ndarray):
x = np.array([x], dtype=np.float32)
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
|
Convert an object to a row or column vector.
|
entailment
|
def _normalize(x, cmin=None, cmax=None, clip=True):
"""Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping."""
if not isinstance(x, np.ndarray):
x = np.array(x)
if cmin is None:
cmin = x.min()
if cmax is None:
cmax = x.max()
if cmin == cmax:
return .5 * np.ones(x.shape)
else:
cmin, cmax = float(cmin), float(cmax)
y = (x - cmin) * 1. / (cmax - cmin)
if clip:
y = np.clip(y, 0., 1.)
return y
|
Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping.
|
entailment
|
def _mix_simple(a, b, x):
"""Mix b (with proportion x) with a."""
x = np.clip(x, 0.0, 1.0)
return (1.0 - x)*a + x*b
|
Mix b (with proportion x) with a.
|
entailment
|
def smoothstep(edge0, edge1, x):
""" performs smooth Hermite interpolation
between 0 and 1 when edge0 < x < edge1. """
# Scale, bias and saturate x to 0..1 range
x = np.clip((x - edge0)/(edge1 - edge0), 0.0, 1.0)
# Evaluate polynomial
return x*x*(3 - 2*x)
|
performs smooth Hermite interpolation
between 0 and 1 when edge0 < x < edge1.
|
entailment
|
def step(colors, x, controls=None):
x = x.ravel()
"""Step interpolation from a set of colors. x belongs in [0, 1]."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(colors)
assert ncolors == len(controls) - 1
assert ncolors >= 2
x_step = _find_controls(x, controls, ncolors-1)
return colors[x_step, ...]
|
Step interpolation from a set of colors. x belongs in [0, 1].
|
entailment
|
def _glsl_mix(controls=None):
"""Generate a GLSL template function from a given interpolation patterns
and control points."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls)
assert ncolors >= 2
if ncolors == 2:
s = " return mix($color_0, $color_1, t);\n"
else:
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
adj_t = '(t - %s) / %s' % (controls[i],
controls[i+1] - controls[i])
s += ("%s {\n return mix($color_%d, $color_%d, %s);\n} " %
(ifs, i, i+1, adj_t))
return "vec4 colormap(float t) {\n%s\n}" % s
|
Generate a GLSL template function from a given interpolation patterns
and control points.
|
entailment
|
def _process_glsl_template(template, colors):
"""Replace $color_i by color #i in the GLSL template."""
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color)
template = template.replace('$color_%d' % i, vec4_color)
return template
|
Replace $color_i by color #i in the GLSL template.
|
entailment
|
def get_colormap(name, *args, **kwargs):
"""Obtain a colormap
Some colormaps can have additional configuration parameters. Refer to
their corresponding documentation for more information.
Parameters
----------
name : str | Colormap
Colormap name. Can also be a Colormap for pass-through.
Examples
--------
>>> get_colormap('autumn')
>>> get_colormap('single_hue', hue=10)
"""
if isinstance(name, BaseColormap):
cmap = name
else:
if not isinstance(name, string_types):
raise TypeError('colormap must be a Colormap or string name')
if name not in _colormaps:
raise KeyError('colormap name %s not found' % name)
cmap = _colormaps[name]
if inspect.isclass(cmap):
cmap = cmap(*args, **kwargs)
return cmap
|
Obtain a colormap
Some colormaps can have additional configuration parameters. Refer to
their corresponding documentation for more information.
Parameters
----------
name : str | Colormap
Colormap name. Can also be a Colormap for pass-through.
Examples
--------
>>> get_colormap('autumn')
>>> get_colormap('single_hue', hue=10)
|
entailment
|
def map(self, x):
"""The Python mapping function from the [0,1] interval to a
list of rgba colors
Parameters
----------
x : array-like
The values to map.
Returns
-------
colors : list
List of rgba colors.
"""
return self._map_function(self.colors.rgba, x, self._controls)
|
The Python mapping function from the [0,1] interval to a
list of rgba colors
Parameters
----------
x : array-like
The values to map.
Returns
-------
colors : list
List of rgba colors.
|
entailment
|
def visual_border_width(self):
""" The border width in visual coordinates
"""
render_to_doc = \
self.transforms.get_transform('document', 'visual')
vec = render_to_doc.map([self.border_width, self.border_width, 0])
origin = render_to_doc.map([0, 0, 0])
visual_border_width = [vec[0] - origin[0], vec[1] - origin[1]]
# we need to flip the y axis because coordinate systems are inverted
visual_border_width[1] *= -1
return visual_border_width
|
The border width in visual coordinates
|
entailment
|
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
|
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
|
entailment
|
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
|
Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
|
entailment
|
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
|
Crawl the figure and process all axes
|
entailment
|
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
|
Crawl the axes and process all elements within
|
entailment
|
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
|
Recursively look through objects in legend children
|
entailment
|
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
|
Process a matplotlib line and call renderer.draw_line
|
entailment
|
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
|
Process a matplotlib text object and call renderer.draw_text
|
entailment
|
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
|
Process a matplotlib patch object and call renderer.draw_path
|
entailment
|
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
|
Process a matplotlib collection and call renderer.draw_collection
|
entailment
|
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
|
Process a matplotlib image object and call renderer.draw_image
|
entailment
|
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
|
Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
|
entailment
|
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
|
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
|
entailment
|
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
|
Build an iterator over the elements of the path collection
|
entailment
|
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
|
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
|
entailment
|
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
|
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
|
entailment
|
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
|
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
|
entailment
|
def from_times(cls, times, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Create a TimeMOC from a `astropy.time.Time`
Parameters
----------
times : `astropy.time.Time`
astropy observation times
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
time_moc : `~mocpy.tmoc.TimeMOC`
"""
times_arr = np.asarray(times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int)
intervals_arr = np.vstack((times_arr, times_arr + 1)).T
# degrade the TimeMoc to the order computer from ``delta_t``
order = TimeMOC.time_resolution_to_order(delta_t)
return TimeMOC(IntervalSet(intervals_arr)).degrade_to_order(order)
|
Create a TimeMOC from a `astropy.time.Time`
Parameters
----------
times : `astropy.time.Time`
astropy observation times
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
time_moc : `~mocpy.tmoc.TimeMOC`
|
entailment
|
def from_time_ranges(cls, min_times, max_times, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Create a TimeMOC from a range defined by two `astropy.time.Time`
Parameters
----------
min_times : `astropy.time.Time`
astropy times defining the left part of the intervals
max_times : `astropy.time.Time`
astropy times defining the right part of the intervals
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
time_moc : `~mocpy.tmoc.TimeMOC`
"""
min_times_arr = np.asarray(min_times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int)
max_times_arr = np.asarray(max_times.jd * TimeMOC.DAY_MICRO_SEC, dtype=int)
intervals_arr = np.vstack((min_times_arr, max_times_arr + 1)).T
# degrade the TimeMoc to the order computer from ``delta_t``
order = TimeMOC.time_resolution_to_order(delta_t)
return TimeMOC(IntervalSet(intervals_arr)).degrade_to_order(order)
|
Create a TimeMOC from a range defined by two `astropy.time.Time`
Parameters
----------
min_times : `astropy.time.Time`
astropy times defining the left part of the intervals
max_times : `astropy.time.Time`
astropy times defining the right part of the intervals
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
time_moc : `~mocpy.tmoc.TimeMOC`
|
entailment
|
def add_neighbours(self):
"""
Add all the pixels at max order in the neighbourhood of the moc
"""
time_delta = 1 << (2*(IntervalSet.HPY_MAX_ORDER - self.max_order))
intervals_arr = self._interval_set._intervals
intervals_arr[:, 0] = np.maximum(intervals_arr[:, 0] - time_delta, 0)
intervals_arr[:, 1] = np.minimum(intervals_arr[:, 1] + time_delta, (1 << 58) - 1)
self._interval_set = IntervalSet(intervals_arr)
|
Add all the pixels at max order in the neighbourhood of the moc
|
entailment
|
def remove_neighbours(self):
"""
Remove all the pixels at max order located at the bound of the moc
"""
time_delta = 1 << (2*(IntervalSet.HPY_MAX_ORDER - self.max_order))
intervals_arr = self._interval_set._intervals
intervals_arr[:, 0] = np.minimum(intervals_arr[:, 0] + time_delta, (1 << 58) - 1)
intervals_arr[:, 1] = np.maximum(intervals_arr[:, 1] - time_delta, 0)
good_intervals = intervals_arr[:, 1] > intervals_arr[:, 0]
self._interval_set = IntervalSet(intervals_arr[good_intervals])
|
Remove all the pixels at max order located at the bound of the moc
|
entailment
|
def _process_degradation(self, another_moc, order_op):
"""
Degrade (down-sampling) self and ``another_moc`` to ``order_op`` order
Parameters
----------
another_moc : `~mocpy.tmoc.TimeMoc`
order_op : int
the order in which self and ``another_moc`` will be down-sampled to.
Returns
-------
result : (`~mocpy.tmoc.TimeMoc`, `~mocpy.tmoc.TimeMoc`)
self and ``another_moc`` degraded TimeMocs
"""
max_order = max(self.max_order, another_moc.max_order)
if order_op > max_order:
message = 'Requested time resolution for the operation cannot be applied.\n' \
'The TimeMoc object resulting from the operation is of time resolution {0} sec.'.format(
TimeMOC.order_to_time_resolution(max_order).sec)
warnings.warn(message, UserWarning)
self_degradation = self.degrade_to_order(order_op)
another_moc_degradation = another_moc.degrade_to_order(order_op)
result = self_degradation, another_moc_degradation
return result
|
Degrade (down-sampling) self and ``another_moc`` to ``order_op`` order
Parameters
----------
another_moc : `~mocpy.tmoc.TimeMoc`
order_op : int
the order in which self and ``another_moc`` will be down-sampled to.
Returns
-------
result : (`~mocpy.tmoc.TimeMoc`, `~mocpy.tmoc.TimeMoc`)
self and ``another_moc`` degraded TimeMocs
|
entailment
|
def intersection(self, another_moc, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Intersection between self and moc. ``delta_t`` gives the possibility to the user
to set a time resolution for performing the tmoc intersection
Parameters
----------
another_moc : `~mocpy.abstract_moc.AbstractMOC`
the MOC/TimeMOC used for performing the intersection with self
delta_t : `~astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMoc order to represent the observations. (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``)
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
MOC object whose interval set corresponds to : self & ``moc``
"""
order_op = TimeMOC.time_resolution_to_order(delta_t)
self_degraded, moc_degraded = self._process_degradation(another_moc, order_op)
return super(TimeMOC, self_degraded).intersection(moc_degraded)
|
Intersection between self and moc. ``delta_t`` gives the possibility to the user
to set a time resolution for performing the tmoc intersection
Parameters
----------
another_moc : `~mocpy.abstract_moc.AbstractMOC`
the MOC/TimeMOC used for performing the intersection with self
delta_t : `~astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMoc order to represent the observations. (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``)
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
MOC object whose interval set corresponds to : self & ``moc``
|
entailment
|
def total_duration(self):
"""
Get the total duration covered by the temporal moc
Returns
-------
duration : `~astropy.time.TimeDelta`
total duration of all the observation times of the tmoc
total duration of all the observation times of the tmoc
"""
if self._interval_set.empty():
return 0
total_time_us = 0
# The interval set is checked for consistency before looping over all the intervals
for (start_time, stop_time) in self._interval_set._intervals:
total_time_us = total_time_us + (stop_time - start_time)
duration = TimeDelta(total_time_us / 1e6, format='sec', scale='tdb')
return duration
|
Get the total duration covered by the temporal moc
Returns
-------
duration : `~astropy.time.TimeDelta`
total duration of all the observation times of the tmoc
total duration of all the observation times of the tmoc
|
entailment
|
def consistency(self):
"""
Get a percentage of fill between the min and max time the moc is defined.
A value near 0 shows a sparse temporal moc (i.e. the moc does not cover a lot
of time and covers very distant times. A value near 1 means that the moc covers
a lot of time without big pauses.
Returns
-------
result : float
fill percentage (between 0 and 1.)
"""
result = self.total_duration.jd / (self.max_time - self.min_time).jd
return result
|
Get a percentage of fill between the min and max time the moc is defined.
A value near 0 shows a sparse temporal moc (i.e. the moc does not cover a lot
of time and covers very distant times. A value near 1 means that the moc covers
a lot of time without big pauses.
Returns
-------
result : float
fill percentage (between 0 and 1.)
|
entailment
|
def min_time(self):
"""
Get the `~astropy.time.Time` time of the tmoc first observation
Returns
-------
min_time : `astropy.time.Time`
time of the first observation
"""
min_time = Time(self._interval_set.min / TimeMOC.DAY_MICRO_SEC, format='jd', scale='tdb')
return min_time
|
Get the `~astropy.time.Time` time of the tmoc first observation
Returns
-------
min_time : `astropy.time.Time`
time of the first observation
|
entailment
|
def max_time(self):
"""
Get the `~astropy.time.Time` time of the tmoc last observation
Returns
-------
max_time : `~astropy.time.Time`
time of the last observation
"""
max_time = Time(self._interval_set.max / TimeMOC.DAY_MICRO_SEC, format='jd', scale='tdb')
return max_time
|
Get the `~astropy.time.Time` time of the tmoc last observation
Returns
-------
max_time : `~astropy.time.Time`
time of the last observation
|
entailment
|
def contains(self, times, keep_inside=True, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the
TMOC instance.
Parameters
----------
times : `astropy.time.Time`
astropy times to check whether they are contained in the TMOC or not.
keep_inside : bool, optional
True by default. If so the filtered table contains only observations that are located the MOC.
If ``keep_inside`` is False, the filtered table contains all observations lying outside the MOC.
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
array : `~numpy.darray`
A mask boolean array
"""
# the requested order for filtering the astropy observations table is more precise than the order
# of the TimeMoc object
current_max_order = self.max_order
new_max_order = TimeMOC.time_resolution_to_order(delta_t)
if new_max_order > current_max_order:
message = 'Requested time resolution filtering cannot be applied.\n' \
'Filtering is applied with a time resolution of {0} sec.'.format(
TimeMOC.order_to_time_resolution(current_max_order).sec)
warnings.warn(message, UserWarning)
rough_tmoc = self.degrade_to_order(new_max_order)
pix_arr = (times.jd * TimeMOC.DAY_MICRO_SEC)
pix_arr = pix_arr.astype(int)
intervals_arr = rough_tmoc._interval_set._intervals
inf_arr = np.vstack([pix_arr[i] >= intervals_arr[:, 0] for i in range(pix_arr.shape[0])])
sup_arr = np.vstack([pix_arr[i] <= intervals_arr[:, 1] for i in range(pix_arr.shape[0])])
if keep_inside:
res = inf_arr & sup_arr
filtered_rows = np.any(res, axis=1)
else:
res = ~inf_arr | ~sup_arr
filtered_rows = np.all(res, axis=1)
return filtered_rows
|
Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the
TMOC instance.
Parameters
----------
times : `astropy.time.Time`
astropy times to check whether they are contained in the TMOC or not.
keep_inside : bool, optional
True by default. If so the filtered table contains only observations that are located the MOC.
If ``keep_inside`` is False, the filtered table contains all observations lying outside the MOC.
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
array : `~numpy.darray`
A mask boolean array
|
entailment
|
def plot(self, title='TimeMoc', view=(None, None)):
"""
Plot the TimeMoc in a time window.
This method uses interactive matplotlib. The user can move its mouse through the plot to see the
time (at the mouse position).
Parameters
----------
title : str, optional
The title of the plot. Set to 'TimeMoc' by default.
view : (`~astropy.time.Time`, `~astropy.time.Time`), optional
Define the view window in which the observations are plotted. Set to (None, None) by default (i.e.
all the observation time window is rendered).
"""
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.pyplot as plt
if self._interval_set.empty():
print('Nothing to print. This TimeMoc object is empty.')
return
plot_order = 15
if self.max_order > plot_order:
plotted_moc = self.degrade_to_order(plot_order)
else:
plotted_moc = self
min_jd = plotted_moc.min_time.jd if not view[0] else view[0].jd
max_jd = plotted_moc.max_time.jd if not view[1] else view[1].jd
if max_jd < min_jd:
raise ValueError("Invalid selection: max_jd = {0} must be > to min_jd = {1}".format(max_jd, min_jd))
fig1 = plt.figure(figsize=(9.5, 5))
ax = fig1.add_subplot(111)
ax.set_xlabel('iso')
ax.get_yaxis().set_visible(False)
size = 2000
delta = (max_jd - min_jd) / size
min_jd_time = min_jd
ax.set_xticks([0, size])
ax.set_xticklabels(Time([min_jd_time, max_jd], format='jd', scale='tdb').iso, rotation=70)
y = np.zeros(size)
for (s_time_us, e_time_us) in plotted_moc._interval_set._intervals:
s_index = int((s_time_us / TimeMOC.DAY_MICRO_SEC - min_jd_time) / delta)
e_index = int((e_time_us / TimeMOC.DAY_MICRO_SEC - min_jd_time) / delta)
y[s_index:(e_index+1)] = 1.0
# hack in case of full time mocs.
if np.all(y):
y[0] = 0
z = np.tile(y, (int(size//10), 1))
plt.title(title)
color_map = LinearSegmentedColormap.from_list('w2r', ['#fffff0', '#aa0000'])
color_map.set_under('w')
color_map.set_bad('gray')
plt.imshow(z, interpolation='bilinear', cmap=color_map)
def on_mouse_motion(event):
for txt in ax.texts:
txt.set_visible(False)
text = ax.text(0, 0, "", va="bottom", ha="left")
time = Time(event.xdata * delta + min_jd_time, format='jd', scale='tdb')
tx = '{0}'.format(time.iso)
text.set_position((event.xdata - 50, 700))
text.set_rotation(70)
text.set_text(tx)
cid = fig1.canvas.mpl_connect('motion_notify_event', on_mouse_motion)
plt.show()
|
Plot the TimeMoc in a time window.
This method uses interactive matplotlib. The user can move its mouse through the plot to see the
time (at the mouse position).
Parameters
----------
title : str, optional
The title of the plot. Set to 'TimeMoc' by default.
view : (`~astropy.time.Time`, `~astropy.time.Time`), optional
Define the view window in which the observations are plotted. Set to (None, None) by default (i.e.
all the observation time window is rendered).
|
entailment
|
def handle(self, client, subhooks=()):
"""Handle a new update.
Fetches new data from the client, then compares it to the previous
lookup.
Returns:
(bool, new_data): whether changes occurred, and the new value.
"""
new_data = self.fetch(client)
# Holds the list of updated fields.
updated = {}
if not subhooks:
# We always want to compare to previous values.
subhooks = [self.name]
for subhook in subhooks:
new_key = self.extract_key(new_data, subhook)
if new_key != self.previous_keys.get(subhook):
updated[subhook] = new_key
if updated:
logger.debug("Hook %s: data changed from %r to %r", self.name, self.previous_keys, updated)
self.previous_keys.update(updated)
return (True, new_data)
return (False, None)
|
Handle a new update.
Fetches new data from the client, then compares it to the previous
lookup.
Returns:
(bool, new_data): whether changes occurred, and the new value.
|
entailment
|
def _text_to_vbo(text, font, anchor_x, anchor_y, lowres_size):
"""Convert text characters to VBO"""
# Necessary to flush commands before requesting current viewport because
# There may be a set_viewport command waiting in the queue.
# TODO: would be nicer if each canvas just remembers and manages its own
# viewport, rather than relying on the context for this.
canvas = context.get_current_canvas()
canvas.context.flush_commands()
text_vtype = np.dtype([('a_position', np.float32, 2),
('a_texcoord', np.float32, 2)])
vertices = np.zeros(len(text) * 4, dtype=text_vtype)
prev = None
width = height = ascender = descender = 0
ratio, slop = 1. / font.ratio, font.slop
x_off = -slop
# Need to make sure we have a unicode string here (Py2.7 mis-interprets
# characters like "•" otherwise)
if sys.version[0] == '2' and isinstance(text, str):
text = text.decode('utf-8')
# Need to store the original viewport, because the font[char] will
# trigger SDF rendering, which changes our viewport
# todo: get rid of call to glGetParameter!
orig_viewport = canvas.context.get_viewport()
for ii, char in enumerate(text):
glyph = font[char]
kerning = glyph['kerning'].get(prev, 0.) * ratio
x0 = x_off + glyph['offset'][0] * ratio + kerning
y0 = glyph['offset'][1] * ratio + slop
x1 = x0 + glyph['size'][0]
y1 = y0 - glyph['size'][1]
u0, v0, u1, v1 = glyph['texcoords']
position = [[x0, y0], [x0, y1], [x1, y1], [x1, y0]]
texcoords = [[u0, v0], [u0, v1], [u1, v1], [u1, v0]]
vi = ii * 4
vertices['a_position'][vi:vi+4] = position
vertices['a_texcoord'][vi:vi+4] = texcoords
x_move = glyph['advance'] * ratio + kerning
x_off += x_move
ascender = max(ascender, y0 - slop)
descender = min(descender, y1 + slop)
width += x_move
height = max(height, glyph['size'][1] - 2*slop)
prev = char
# Also analyse chars with large ascender and descender, otherwise the
# vertical alignment can be very inconsistent
for char in 'hy':
glyph = font[char]
y0 = glyph['offset'][1] * ratio + slop
y1 = y0 - glyph['size'][1]
ascender = max(ascender, y0 - slop)
descender = min(descender, y1 + slop)
height = max(height, glyph['size'][1] - 2*slop)
if orig_viewport is not None:
canvas.context.set_viewport(*orig_viewport)
# Tight bounding box (loose would be width, font.height /.asc / .desc)
width -= glyph['advance'] * ratio - (glyph['size'][0] - 2*slop)
dx = dy = 0
if anchor_y == 'top':
dy = -ascender
elif anchor_y in ('center', 'middle'):
dy = -(height / 2 + descender)
elif anchor_y == 'bottom':
dy = -descender
# Already referenced to baseline
# elif anchor_y == 'baseline':
# dy = -descender
if anchor_x == 'right':
dx = -width
elif anchor_x == 'center':
dx = -width / 2.
vertices['a_position'] += (dx, dy)
vertices['a_position'] /= lowres_size
return vertices
|
Convert text characters to VBO
|
entailment
|
def _load_char(self, char):
"""Build and store a glyph corresponding to an individual character
Parameters
----------
char : str
A single character to be represented.
"""
assert isinstance(char, string_types) and len(char) == 1
assert char not in self._glyphs
# load new glyph data from font
_load_glyph(self._font, char, self._glyphs)
# put new glyph into the texture
glyph = self._glyphs[char]
bitmap = glyph['bitmap']
# convert to padded array
data = np.zeros((bitmap.shape[0] + 2*self._spread,
bitmap.shape[1] + 2*self._spread), np.uint8)
data[self._spread:-self._spread, self._spread:-self._spread] = bitmap
# Store, while scaling down to proper size
height = data.shape[0] // self.ratio
width = data.shape[1] // self.ratio
region = self._atlas.get_free_region(width + 2, height + 2)
if region is None:
raise RuntimeError('Cannot store glyph')
x, y, w, h = region
x, y, w, h = x + 1, y + 1, w - 2, h - 2
self._renderer.render_to_texture(data, self._atlas, (x, y), (w, h))
u0 = x / float(self._atlas.shape[1])
v0 = y / float(self._atlas.shape[0])
u1 = (x+w) / float(self._atlas.shape[1])
v1 = (y+h) / float(self._atlas.shape[0])
texcoords = (u0, v0, u1, v1)
glyph.update(dict(size=(w, h), texcoords=texcoords))
|
Build and store a glyph corresponding to an individual character
Parameters
----------
char : str
A single character to be represented.
|
entailment
|
def get_font(self, face, bold=False, italic=False):
"""Get a font described by face and size"""
key = '%s-%s-%s' % (face, bold, italic)
if key not in self._fonts:
font = dict(face=face, bold=bold, italic=italic)
self._fonts[key] = TextureFont(font, self._renderer)
return self._fonts[key]
|
Get a font described by face and size
|
entailment
|
def stft(x, n_fft=1024, step=512, fs=2*np.pi, window='hann'):
"""Compute the STFT
Parameters
----------
x : array-like
1D signal to operate on. ``If len(x) < n_fft``, x will be zero-padded
to length ``n_fft``.
n_fft : int
Number of FFT points. Much faster for powers of two.
step : int | None
Step size between calculations. If None, ``n_fft // 2``
will be used.
fs : float
The sample rate of the data.
window : str | None
Window function to use. Can be ``'hann'`` for Hann window, or None
for no windowing.
Returns
-------
stft : ndarray
Spectrogram of the data, shape (n_freqs, n_steps).
See also
--------
fft_freqs
"""
x = np.asarray(x, float)
if x.ndim != 1:
raise ValueError('x must be 1D')
if window is not None:
if window not in ('hann',):
raise ValueError('window must be "hann" or None')
w = np.hanning(n_fft)
else:
w = np.ones(n_fft)
n_fft = int(n_fft)
step = max(n_fft // 2, 1) if step is None else int(step)
fs = float(fs)
zero_pad = n_fft - len(x)
if zero_pad > 0:
x = np.concatenate((x, np.zeros(zero_pad, float)))
n_freqs = n_fft // 2 + 1
n_estimates = (len(x) - n_fft) // step + 1
result = np.empty((n_freqs, n_estimates), np.complex128)
for ii in range(n_estimates):
result[:, ii] = np.fft.rfft(w * x[ii * step:ii * step + n_fft]) / n_fft
return result
|
Compute the STFT
Parameters
----------
x : array-like
1D signal to operate on. ``If len(x) < n_fft``, x will be zero-padded
to length ``n_fft``.
n_fft : int
Number of FFT points. Much faster for powers of two.
step : int | None
Step size between calculations. If None, ``n_fft // 2``
will be used.
fs : float
The sample rate of the data.
window : str | None
Window function to use. Can be ``'hann'`` for Hann window, or None
for no windowing.
Returns
-------
stft : ndarray
Spectrogram of the data, shape (n_freqs, n_steps).
See also
--------
fft_freqs
|
entailment
|
def fft_freqs(n_fft, fs):
"""Return frequencies for DFT
Parameters
----------
n_fft : int
Number of points in the FFT.
fs : float
The sampling rate.
"""
return np.arange(0, (n_fft // 2 + 1)) / float(n_fft) * float(fs)
|
Return frequencies for DFT
Parameters
----------
n_fft : int
Number of points in the FFT.
fs : float
The sampling rate.
|
entailment
|
def set_data(self, pos=None, color=None, width=None, connect=None,
arrows=None):
"""Set the data used for this visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
arrows : array
A Nx4 matrix where each row contains the x and y coordinate of the
first and second vertex of the arrow body. Remember that the second
vertex is used as center point for the arrow head, and the first
vertex is only used for determining the arrow head orientation.
"""
if arrows is not None:
self._arrows = arrows
self._arrows_changed = True
LineVisual.set_data(self, pos, color, width, connect)
|
Set the data used for this visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
arrows : array
A Nx4 matrix where each row contains the x and y coordinate of the
first and second vertex of the arrow body. Remember that the second
vertex is used as center point for the arrow head, and the first
vertex is only used for determining the arrow head orientation.
|
entailment
|
def strip_html(text):
""" Get rid of ugly twitter html """
def reply_to(text):
replying_to = []
split_text = text.split()
for index, token in enumerate(split_text):
if token.startswith('@'): replying_to.append(token[1:])
else:
message = split_text[index:]
break
rply_msg = ""
if len(replying_to) > 0:
rply_msg = "Replying to "
for token in replying_to[:-1]: rply_msg += token+","
if len(replying_to)>1: rply_msg += 'and '
rply_msg += replying_to[-1]+". "
return rply_msg + " ".join(message)
text = reply_to(text)
text = text.replace('@', ' ')
return " ".join([token for token in text.split()
if ('http:' not in token) and ('https:' not in token)])
|
Get rid of ugly twitter html
|
entailment
|
def post_tweet(user_id, message, additional_params={}):
"""
Helper function to post a tweet
"""
url = "https://api.twitter.com/1.1/statuses/update.json"
params = { "status" : message }
params.update(additional_params)
r = make_twitter_request(url, user_id, params, request_type='POST')
print (r.text)
return "Successfully posted a tweet {}".format(message)
|
Helper function to post a tweet
|
entailment
|
def make_twitter_request(url, user_id, params={}, request_type='GET'):
""" Generically make a request to twitter API using a particular user's authorization """
if request_type == "GET":
return requests.get(url, auth=get_twitter_auth(user_id), params=params)
elif request_type == "POST":
return requests.post(url, auth=get_twitter_auth(user_id), params=params)
|
Generically make a request to twitter API using a particular user's authorization
|
entailment
|
def geo_search(user_id, search_location):
"""
Search for a location - free form
"""
url = "https://api.twitter.com/1.1/geo/search.json"
params = {"query" : search_location }
response = make_twitter_request(url, user_id, params).json()
return response
|
Search for a location - free form
|
entailment
|
def read_out_tweets(processed_tweets, speech_convertor=None):
"""
Input - list of processed 'Tweets'
output - list of spoken responses
"""
return ["tweet number {num} by {user}. {text}.".format(num=index+1, user=user, text=text)
for index, (user, text) in enumerate(processed_tweets)]
|
Input - list of processed 'Tweets'
output - list of spoken responses
|
entailment
|
def search_for_tweets_about(user_id, params):
""" Search twitter API """
url = "https://api.twitter.com/1.1/search/tweets.json"
response = make_twitter_request(url, user_id, params)
return process_tweets(response.json()["statuses"])
|
Search twitter API
|
entailment
|
def add_val(self, val):
"""add value in form of dict"""
if not isinstance(val, type({})):
raise ValueError(type({}))
self.read()
self.config.update(val)
self.save()
|
add value in form of dict
|
entailment
|
def read_mesh(fname):
"""Read mesh data from file.
Parameters
----------
fname : str
File name to read. Format will be inferred from the filename.
Currently only '.obj' and '.obj.gz' are supported.
Returns
-------
vertices : array
Vertices.
faces : array | None
Triangle face definitions.
normals : array
Normals for the mesh.
texcoords : array | None
Texture coordinates.
"""
# Check format
fmt = op.splitext(fname)[1].lower()
if fmt == '.gz':
fmt = op.splitext(op.splitext(fname)[0])[1].lower()
if fmt in ('.obj'):
return WavefrontReader.read(fname)
elif not format:
raise ValueError('read_mesh needs could not determine format.')
else:
raise ValueError('read_mesh does not understand format %s.' % fmt)
|
Read mesh data from file.
Parameters
----------
fname : str
File name to read. Format will be inferred from the filename.
Currently only '.obj' and '.obj.gz' are supported.
Returns
-------
vertices : array
Vertices.
faces : array | None
Triangle face definitions.
normals : array
Normals for the mesh.
texcoords : array | None
Texture coordinates.
|
entailment
|
def write_mesh(fname, vertices, faces, normals, texcoords, name='',
format='obj', overwrite=False, reshape_faces=True):
""" Write mesh data to file.
Parameters
----------
fname : str
Filename to write. Must end with ".obj" or ".gz".
vertices : array
Vertices.
faces : array | None
Triangle face definitions.
normals : array
Normals for the mesh.
texcoords : array | None
Texture coordinates.
name : str
Name of the object.
format : str
Currently only "obj" is supported.
overwrite : bool
If the file exists, overwrite it.
reshape_faces : bool
Reshape the `faces` array to (Nf, 3). Set to `False`
if you need to write a mesh with non triangular faces.
"""
# Check file
if op.isfile(fname) and not overwrite:
raise IOError('file "%s" exists, use overwrite=True' % fname)
# Check format
if format not in ('obj'):
raise ValueError('Only "obj" format writing currently supported')
WavefrontWriter.write(fname, vertices, faces,
normals, texcoords, name, reshape_faces)
|
Write mesh data to file.
Parameters
----------
fname : str
Filename to write. Must end with ".obj" or ".gz".
vertices : array
Vertices.
faces : array | None
Triangle face definitions.
normals : array
Normals for the mesh.
texcoords : array | None
Texture coordinates.
name : str
Name of the object.
format : str
Currently only "obj" is supported.
overwrite : bool
If the file exists, overwrite it.
reshape_faces : bool
Reshape the `faces` array to (Nf, 3). Set to `False`
if you need to write a mesh with non triangular faces.
|
entailment
|
def _set_config(config):
"""Set gl configuration"""
pyglet_config = pyglet.gl.Config()
pyglet_config.red_size = config['red_size']
pyglet_config.green_size = config['green_size']
pyglet_config.blue_size = config['blue_size']
pyglet_config.alpha_size = config['alpha_size']
pyglet_config.accum_red_size = 0
pyglet_config.accum_green_size = 0
pyglet_config.accum_blue_size = 0
pyglet_config.accum_alpha_size = 0
pyglet_config.depth_size = config['depth_size']
pyglet_config.stencil_size = config['stencil_size']
pyglet_config.double_buffer = config['double_buffer']
pyglet_config.stereo = config['stereo']
pyglet_config.samples = config['samples']
return pyglet_config
|
Set gl configuration
|
entailment
|
def set_shaders(self, vert, frag):
""" Set the vertex and fragment shaders.
Parameters
----------
vert : str
Source code for vertex shader.
frag : str
Source code for fragment shaders.
"""
if not vert or not frag:
raise ValueError('Vertex and fragment code must both be non-empty')
# pre-process shader code for #include directives
vert, frag = preprocess(vert), preprocess(frag)
# Store source code, send it to glir, parse the code for variables
self._shaders = vert, frag
self._glir.command('SHADERS', self._id, vert, frag)
# All current variables become pending variables again
for key, val in self._user_variables.items():
self._pending_variables[key] = val
self._user_variables = {}
# Parse code (and process pending variables)
self._parse_variables_from_code()
|
Set the vertex and fragment shaders.
Parameters
----------
vert : str
Source code for vertex shader.
frag : str
Source code for fragment shaders.
|
entailment
|
def _parse_variables_from_code(self):
""" Parse uniforms, attributes and varyings from the source code.
"""
# Get one string of code with comments removed
code = '\n\n'.join(self._shaders)
code = re.sub(r'(.*)(//.*)', r'\1', code, re.M)
# Regexp to look for variable names
var_regexp = ("\s*VARIABLE\s+" # kind of variable
"((highp|mediump|lowp)\s+)?" # Precision (optional)
"(?P<type>\w+)\s+" # type
"(?P<name>\w+)\s*" # name
"(\[(?P<size>\d+)\])?" # size (optional)
"(\s*\=\s*[0-9.]+)?" # default value (optional)
"\s*;" # end
)
# Parse uniforms, attributes and varyings
self._code_variables = {}
for kind in ('uniform', 'attribute', 'varying', 'const'):
regex = re.compile(var_regexp.replace('VARIABLE', kind),
flags=re.MULTILINE)
for m in re.finditer(regex, code):
gtype = m.group('type')
size = int(m.group('size')) if m.group('size') else -1
this_kind = kind
if size >= 1:
# uniform arrays get added both as individuals and full
for i in range(size):
name = '%s[%d]' % (m.group('name'), i)
self._code_variables[name] = kind, gtype, name, -1
this_kind = 'uniform_array'
name = m.group('name')
self._code_variables[name] = this_kind, gtype, name, size
# Now that our code variables are up-to date, we can process
# the variables that were set but yet unknown.
self._process_pending_variables()
|
Parse uniforms, attributes and varyings from the source code.
|
entailment
|
def bind(self, data):
""" Bind a VertexBuffer that has structured data
Parameters
----------
data : VertexBuffer
The vertex buffer to bind. The field names of the array
are mapped to attribute names in GLSL.
"""
# Check
if not isinstance(data, VertexBuffer):
raise ValueError('Program.bind() requires a VertexBuffer.')
# Apply
for name in data.dtype.names:
self[name] = data[name]
|
Bind a VertexBuffer that has structured data
Parameters
----------
data : VertexBuffer
The vertex buffer to bind. The field names of the array
are mapped to attribute names in GLSL.
|
entailment
|
def _process_pending_variables(self):
""" Try to apply the variables that were set but not known yet.
"""
# Clear our list of pending variables
self._pending_variables, pending = {}, self._pending_variables
# Try to apply it. On failure, it will be added again
for name, data in pending.items():
self[name] = data
|
Try to apply the variables that were set but not known yet.
|
entailment
|
def draw(self, mode='triangles', indices=None, check_error=True):
""" Draw the attribute arrays in the specified mode.
Parameters
----------
mode : str | GL_ENUM
'points', 'lines', 'line_strip', 'line_loop', 'triangles',
'triangle_strip', or 'triangle_fan'.
indices : array
Array of indices to draw.
check_error:
Check error after draw.
"""
# Invalidate buffer (data has already been sent)
self._buffer = None
# Check if mode is valid
mode = check_enum(mode)
if mode not in ['points', 'lines', 'line_strip', 'line_loop',
'triangles', 'triangle_strip', 'triangle_fan']:
raise ValueError('Invalid draw mode: %r' % mode)
# Check leftover variables, warn, discard them
# In GLIR we check whether all attributes are indeed set
for name in self._pending_variables:
logger.warn('Variable %r is given but not known.' % name)
self._pending_variables = {}
# Check attribute sizes
attributes = [vbo for vbo in self._user_variables.values()
if isinstance(vbo, DataBuffer)]
sizes = [a.size for a in attributes]
if len(attributes) < 1:
raise RuntimeError('Must have at least one attribute')
if not all(s == sizes[0] for s in sizes[1:]):
msg = '\n'.join(['%s: %s' % (str(a), a.size) for a in attributes])
raise RuntimeError('All attributes must have the same size, got:\n'
'%s' % msg)
# Get the glir queue that we need now
canvas = get_current_canvas()
assert canvas is not None
# Associate canvas
canvas.context.glir.associate(self.glir)
# Indexbuffer
if isinstance(indices, IndexBuffer):
canvas.context.glir.associate(indices.glir)
logger.debug("Program drawing %r with index buffer" % mode)
gltypes = {np.dtype(np.uint8): 'UNSIGNED_BYTE',
np.dtype(np.uint16): 'UNSIGNED_SHORT',
np.dtype(np.uint32): 'UNSIGNED_INT'}
selection = indices.id, gltypes[indices.dtype], indices.size
canvas.context.glir.command('DRAW', self._id, mode, selection)
elif indices is None:
selection = 0, attributes[0].size
logger.debug("Program drawing %r with %r" % (mode, selection))
canvas.context.glir.command('DRAW', self._id, mode, selection)
else:
raise TypeError("Invalid index: %r (must be IndexBuffer)" %
indices)
# Process GLIR commands
canvas.context.flush_commands()
|
Draw the attribute arrays in the specified mode.
Parameters
----------
mode : str | GL_ENUM
'points', 'lines', 'line_strip', 'line_loop', 'triangles',
'triangle_strip', or 'triangle_fan'.
indices : array
Array of indices to draw.
check_error:
Check error after draw.
|
entailment
|
def set_data(self, x=None, y=None, z=None, colors=None):
"""Update the data in this surface plot.
Parameters
----------
x : ndarray | None
1D array of values specifying the x positions of vertices in the
grid. If None, values will be assumed to be integers.
y : ndarray | None
1D array of values specifying the x positions of vertices in the
grid. If None, values will be assumed to be integers.
z : ndarray
2D array of height values for each grid vertex.
colors : ndarray
(width, height, 4) array of vertex colors.
"""
if x is not None:
if self._x is None or len(x) != len(self._x):
self.__vertices = None
self._x = x
if y is not None:
if self._y is None or len(y) != len(self._y):
self.__vertices = None
self._y = y
if z is not None:
if self._x is not None and z.shape[0] != len(self._x):
raise TypeError('Z values must have shape (len(x), len(y))')
if self._y is not None and z.shape[1] != len(self._y):
raise TypeError('Z values must have shape (len(x), len(y))')
self._z = z
if (self.__vertices is not None and
self._z.shape != self.__vertices.shape[:2]):
self.__vertices = None
if self._z is None:
return
update_mesh = False
new_vertices = False
# Generate vertex and face array
if self.__vertices is None:
new_vertices = True
self.__vertices = np.empty((self._z.shape[0], self._z.shape[1], 3),
dtype=np.float32)
self.generate_faces()
self.__meshdata.set_faces(self.__faces)
update_mesh = True
# Copy x, y, z data into vertex array
if new_vertices or x is not None:
if x is None:
if self._x is None:
x = np.arange(self._z.shape[0])
else:
x = self._x
self.__vertices[:, :, 0] = x.reshape(len(x), 1)
update_mesh = True
if new_vertices or y is not None:
if y is None:
if self._y is None:
y = np.arange(self._z.shape[1])
else:
y = self._y
self.__vertices[:, :, 1] = y.reshape(1, len(y))
update_mesh = True
if new_vertices or z is not None:
self.__vertices[..., 2] = self._z
update_mesh = True
if colors is not None:
self.__meshdata.set_vertex_colors(colors)
update_mesh = True
# Update MeshData
if update_mesh:
self.__meshdata.set_vertices(
self.__vertices.reshape(self.__vertices.shape[0] *
self.__vertices.shape[1], 3))
MeshVisual.set_data(self, meshdata=self.__meshdata)
|
Update the data in this surface plot.
Parameters
----------
x : ndarray | None
1D array of values specifying the x positions of vertices in the
grid. If None, values will be assumed to be integers.
y : ndarray | None
1D array of values specifying the x positions of vertices in the
grid. If None, values will be assumed to be integers.
z : ndarray
2D array of height values for each grid vertex.
colors : ndarray
(width, height, 4) array of vertex colors.
|
entailment
|
def simplified(self):
"""A simplified representation of the same transformation.
"""
if self._simplified is None:
self._simplified = SimplifiedChainTransform(self)
return self._simplified
|
A simplified representation of the same transformation.
|
entailment
|
def map(self, coords):
"""Map coordinates
Parameters
----------
coords : array-like
Coordinates to map.
Returns
-------
coords : ndarray
Coordinates.
"""
for tr in reversed(self.transforms):
coords = tr.map(coords)
return coords
|
Map coordinates
Parameters
----------
coords : array-like
Coordinates to map.
Returns
-------
coords : ndarray
Coordinates.
|
entailment
|
def imap(self, coords):
"""Inverse map coordinates
Parameters
----------
coords : array-like
Coordinates to inverse map.
Returns
-------
coords : ndarray
Coordinates.
"""
for tr in self.transforms:
coords = tr.imap(coords)
return coords
|
Inverse map coordinates
Parameters
----------
coords : array-like
Coordinates to inverse map.
Returns
-------
coords : ndarray
Coordinates.
|
entailment
|
def append(self, tr):
"""
Add a new transform to the end of this chain.
Parameters
----------
tr : instance of Transform
The transform to use.
"""
self.transforms.append(tr)
tr.changed.connect(self._subtr_changed)
self._rebuild_shaders()
self.update()
|
Add a new transform to the end of this chain.
Parameters
----------
tr : instance of Transform
The transform to use.
|
entailment
|
def prepend(self, tr):
"""
Add a new transform to the beginning of this chain.
Parameters
----------
tr : instance of Transform
The transform to use.
"""
self.transforms.insert(0, tr)
tr.changed.connect(self._subtr_changed)
self._rebuild_shaders()
self.update()
|
Add a new transform to the beginning of this chain.
Parameters
----------
tr : instance of Transform
The transform to use.
|
entailment
|
def source_changed(self, event):
"""Generate a simplified chain by joining adjacent transforms.
"""
# bail out early if the chain is empty
transforms = self._chain.transforms[:]
if len(transforms) == 0:
self.transforms = []
return
# If the change signal comes from a transform that already appears in
# our simplified transform list, then there is no need to re-simplify.
if event is not None:
for source in event.sources[::-1]:
if source in self.transforms:
self.update(event)
return
# First flatten the chain by expanding all nested chains
new_chain = []
while len(transforms) > 0:
tr = transforms.pop(0)
if isinstance(tr, ChainTransform) and not tr.dynamic:
transforms = tr.transforms[:] + transforms
else:
new_chain.append(tr)
# Now combine together all compatible adjacent transforms
cont = True
tr = new_chain
while cont:
new_tr = [tr[0]]
cont = False
for t2 in tr[1:]:
t1 = new_tr[-1]
pr = t1 * t2
if (not t1.dynamic and not t2.dynamic and not
isinstance(pr, ChainTransform)):
cont = True
new_tr.pop()
new_tr.append(pr)
else:
new_tr.append(t2)
tr = new_tr
self.transforms = tr
|
Generate a simplified chain by joining adjacent transforms.
|
entailment
|
def pack_iterable(messages):
'''Pack an iterable of messages in the TCP protocol format'''
# [ 4-byte body size ]
# [ 4-byte num messages ]
# [ 4-byte message #1 size ][ N-byte binary data ]
# ... (repeated <num_messages> times)
return pack_string(
struct.pack('>l', len(messages)) +
''.join(map(pack_string, messages)))
|
Pack an iterable of messages in the TCP protocol format
|
entailment
|
def hexify(message):
'''Print out printable characters, but others in hex'''
import string
hexified = []
for char in message:
if (char in '\n\r \t') or (char not in string.printable):
hexified.append('\\x%02x' % ord(char))
else:
hexified.append(char)
return ''.join(hexified)
|
Print out printable characters, but others in hex
|
entailment
|
def distribute(total, objects):
'''Generator for (count, object) tuples that distributes count evenly among
the provided objects'''
for index, obj in enumerate(objects):
start = (index * total) / len(objects)
stop = ((index + 1) * total) / len(objects)
yield (stop - start, obj)
|
Generator for (count, object) tuples that distributes count evenly among
the provided objects
|
entailment
|
def clean(self):
"""Clean queue items from a previous session.
In case a previous session crashed and there are still some running
entries in the queue ('running', 'stopping', 'killing'), we clean those
and enqueue them again.
"""
for _, item in self.queue.items():
if item['status'] in ['paused', 'running', 'stopping', 'killing']:
item['status'] = 'queued'
item['start'] = ''
item['end'] = ''
|
Clean queue items from a previous session.
In case a previous session crashed and there are still some running
entries in the queue ('running', 'stopping', 'killing'), we clean those
and enqueue them again.
|
entailment
|
def clear(self):
"""Remove all completed tasks from the queue."""
for key in list(self.queue.keys()):
if self.queue[key]['status'] in ['done', 'failed']:
del self.queue[key]
self.write()
|
Remove all completed tasks from the queue.
|
entailment
|
def next(self):
"""Get the next processable item of the queue.
A processable item is supposed to have the status `queued`.
Returns:
None : If no key is found.
Int: If a valid entry is found.
"""
smallest = None
for key in self.queue.keys():
if self.queue[key]['status'] == 'queued':
if smallest is None or key < smallest:
smallest = key
return smallest
|
Get the next processable item of the queue.
A processable item is supposed to have the status `queued`.
Returns:
None : If no key is found.
Int: If a valid entry is found.
|
entailment
|
def read(self):
"""Read the queue of the last pueue session or set `self.queue = {}`."""
queue_path = os.path.join(self.config_dir, 'queue')
if os.path.exists(queue_path):
queue_file = open(queue_path, 'rb')
try:
self.queue = pickle.load(queue_file)
except Exception:
print('Queue file corrupted, deleting old queue')
os.remove(queue_path)
self.queue = {}
queue_file.close()
else:
self.queue = {}
|
Read the queue of the last pueue session or set `self.queue = {}`.
|
entailment
|
def write(self):
"""Write the current queue to a file. We need this to continue an earlier session."""
queue_path = os.path.join(self.config_dir, 'queue')
queue_file = open(queue_path, 'wb+')
try:
pickle.dump(self.queue, queue_file, -1)
except Exception:
print('Error while writing to queue file. Wrong file permissions?')
queue_file.close()
|
Write the current queue to a file. We need this to continue an earlier session.
|
entailment
|
def add_new(self, command):
"""Add a new entry to the queue."""
self.queue[self.next_key] = command
self.queue[self.next_key]['status'] = 'queued'
self.queue[self.next_key]['returncode'] = ''
self.queue[self.next_key]['stdout'] = ''
self.queue[self.next_key]['stderr'] = ''
self.queue[self.next_key]['start'] = ''
self.queue[self.next_key]['end'] = ''
self.next_key += 1
self.write()
|
Add a new entry to the queue.
|
entailment
|
def remove(self, key):
"""Remove a key from the queue, return `False` if no such key exists."""
if key in self.queue:
del self.queue[key]
self.write()
return True
return False
|
Remove a key from the queue, return `False` if no such key exists.
|
entailment
|
def restart(self, key):
"""Restart a previously finished entry."""
if key in self.queue:
if self.queue[key]['status'] in ['failed', 'done']:
new_entry = {'command': self.queue[key]['command'],
'path': self.queue[key]['path']}
self.add_new(new_entry)
self.write()
return True
return False
|
Restart a previously finished entry.
|
entailment
|
def switch(self, first, second):
"""Switch two entries in the queue. Return False if an entry doesn't exist."""
allowed_states = ['queued', 'stashed']
if first in self.queue and second in self.queue \
and self.queue[first]['status'] in allowed_states\
and self.queue[second]['status'] in allowed_states:
tmp = self.queue[second].copy()
self.queue[second] = self.queue[first].copy()
self.queue[first] = tmp
self.write()
return True
return False
|
Switch two entries in the queue. Return False if an entry doesn't exist.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.