sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def compute_positions(cls, screen_width, line):
"""Compute the relative position of the fields on a given line.
Args:
screen_width (int): the width of the screen
line (mpdlcd.display_fields.Field list): the list of fields on the
line
Returns:
((int, mpdlcd.display_fields.Field) list): the positions of fields,
as (position, field) tuples.
Raises:
FormatError: if the line contains more than one flexible field, or
is too long for the screen size.
"""
# First index
left = 1
# Last index
right = screen_width + 1
# Current 'flexible' field
flexible = None
# Compute the space to the left and to the right of the (optional)
# flexible field.
for field in line:
if field.is_flexible():
if flexible:
raise FormatError(
'There can be only one flexible field per line.')
flexible = field
elif not flexible:
left += field.width
else:
# Met a 'flexible', computing from the right
right -= field.width
# Available space for the 'flexible' field
available = right - left
if available <= 0:
raise FormatError("Too much data for screen width")
if flexible:
if available < 1:
raise FormatError(
"Not enough space to display flexible field %s" %
flexible.name)
flexible.width = available
positions = []
left = 1
for field in line:
positions.append((left, field))
left += field.width
logger.debug('Positions are %r', positions)
return positions
|
Compute the relative position of the fields on a given line.
Args:
screen_width (int): the width of the screen
line (mpdlcd.display_fields.Field list): the list of fields on the
line
Returns:
((int, mpdlcd.display_fields.Field) list): the positions of fields,
as (position, field) tuples.
Raises:
FormatError: if the line contains more than one flexible field, or
is too long for the screen size.
|
entailment
|
def add_to_screen(self, screen_width, screen):
"""Add the pattern to a screen.
Also fills self.widgets.
Args:
screen_width (int): the width of the screen
screen (lcdprod.Screen): the screen to fill.
"""
for lineno, fields in enumerate(self.line_fields):
for left, field in self.compute_positions(screen_width, fields):
logger.debug(
"Adding field %s to screen %s at x=%d->%d, y=%d",
field, screen.ref, left, left + field.width - 1, 1 + lineno,
)
self.widgets[field] = field.add_to_screen(screen, left, 1 + lineno)
self.register_hooks(field)
|
Add the pattern to a screen.
Also fills self.widgets.
Args:
screen_width (int): the width of the screen
screen (lcdprod.Screen): the screen to fill.
|
entailment
|
def register_hooks(self, field):
"""Register a field on its target hooks."""
for hook, subhooks in field.register_hooks():
self.hooks[hook].append(field)
self.subhooks[hook] |= set(subhooks)
|
Register a field on its target hooks.
|
entailment
|
def hook_changed(self, hook, new_data):
"""Called whenever the data for a hook changed."""
for field in self.hooks[hook]:
widget = self.widgets[field]
field.hook_changed(hook, widget, new_data)
|
Called whenever the data for a hook changed.
|
entailment
|
def parse_line(self, line):
"""Parse a line of text.
A format contains fields, within curly brackets, and free text.
Maximum one 'variable width' field is allowed per line.
You cannot use the '{' or '}' characters in the various text/... fields.
Format:
'''{<field_kind>[ <field_option>,<field_option]} text {...}'''
Example:
'''{song text="%(artist)s",speed=4} {elapsed}'''
'''{song text="%(title)s",speed=2} {mode}'''
Args:
line (str): the text to parse
Returns:
PatternLine: the parsed line pattern
"""
logger.debug('Parsing line %s', line)
OUT_FIELD = 0
IN_FIELD_KIND = 1
IN_FIELD_OPTION_NAME = 2
IN_FIELD_OPTION_VALUE = 3
class ParserState(object):
"""Holds the current state of the parser.
Attributes:
quote (str): the current quote character, or None
escaping (bool): whether the next character should be escaped
block (char list): the content of the current 'block'
kind (str): the kind of the current field, or ''
option_name (str): the name of the current option, or ''
options (dict(str => str)): maps option name to option value for
the current field
state (int): state of the parser,one of OUT_FIELD/IN_FIELD_*
fields ((str, dict(str => str)) list): list of fields, as
(kind, options) tuples.
"""
def __init__(self, logger=None):
self.quote = None
self.escaping = False
self.block = []
self.kind = ''
self.option_name = ''
self.options = {}
self.state = OUT_FIELD
self.fields = []
if not logger:
logger = logging.getLogger('%s.parser' % __name__)
self.logger = logger
def _reset(self):
"""Reset buffered state (quote/escape/block)."""
self.quote = None
self.escaping = False
self.block = []
def _register_field(self, kind, options):
"""Register a completed field."""
self.fields.append((kind, dict(options)))
def debug(self, msg, *args, **kwargs):
"""Print a debug message."""
self.logger.debug(msg, *args, **kwargs)
def save_fixed_text(self):
"""Register a completed, fixed text, field."""
assert self.state == OUT_FIELD
self._register_field(FIXED_TEXT_FIELD, {'text': ''.join(self.block)})
def enter_field(self):
"""Enter a new field."""
self.debug('Entering new field')
self.state = IN_FIELD_KIND
self.kind = ''
self.options = {}
self.option_name = ''
self._reset()
def leave_kind(self):
"""Leave the field kind."""
self.state = IN_FIELD_OPTION_NAME
self.kind = ''.join(self.block)
self.debug("Got widget kind '%s'", self.kind)
self._reset()
def leave_option_name(self):
"""Leave an option name."""
self.state = IN_FIELD_OPTION_VALUE
self.option_name = ''.join(self.block)
self.debug("Got option name '%s' for '%s'", self.option_name, self.kind)
self._reset()
def leave_option_value(self):
"""Leave an option value."""
self.state = IN_FIELD_OPTION_NAME
option_value = ''.join(self.block)
self.options[self.option_name] = option_value
self.debug("Got option '%s=%s' for '%s'", self.option_name, option_value, self.kind)
self._reset()
def leave_field(self):
"""Leave a field definition."""
self.state = OUT_FIELD
self._register_field(self.kind, self.options)
self.debug(
"Got widget '%s(%s)'", self.kind,
', '.join('%s=%r' % it for it in self.options.items()),
)
self._reset()
st = ParserState()
for pos, char in enumerate(line):
# Escaping
if st.escaping:
st.escaping = False
st.block.append(char)
elif char == '\\':
st.escaping = True
# Quoting
elif char in ['"', "'"]:
if st.state == IN_FIELD_OPTION_VALUE:
if st.quote: # Already in a quoted block
if char == st.quote:
st.leave_option_value()
else:
st.block.append(char)
elif not st.block: # First char of the block
st.quote = char
continue
else:
raise FormatError("Unexpected '%s' at %d in %s" % (char, pos, line))
elif st.state == OUT_FIELD:
st.block.append(char)
else:
raise FormatError("Unexpected '%s' at %d in %s" % (char, pos, line))
# Entering a field
elif char == '{':
if st.state == OUT_FIELD:
if st.block:
st.save_fixed_text()
st.enter_field()
elif st.state == IN_FIELD_OPTION_VALUE and st.quote:
st.block.append(char)
else:
raise FormatError("Unexpected '{' at %d in %s" % (pos, line))
# Leaving a field
elif char == '}':
if st.state == IN_FIELD_KIND:
st.leave_kind()
st.leave_field()
elif st.state == IN_FIELD_OPTION_NAME:
raise FormatError("Missing option value for %s at %d in %s" % (''.join(st.block), pos, line))
elif st.state == IN_FIELD_OPTION_VALUE:
if st.quote:
st.block.append(char)
else:
st.leave_option_value()
st.leave_field()
elif st.state == OUT_FIELD:
raise FormatError("Unexpected '}' at %d in %s" % (pos, line))
# Between kind and option name
elif char == ' ':
if st.state == IN_FIELD_KIND:
if not st.block:
raise FormatError("Missing field kind at %s in %s" % (pos, line))
st.leave_kind()
elif st.state == IN_FIELD_OPTION_VALUE and st.quote:
st.block.append(char)
elif st.state == OUT_FIELD:
st.block.append(char)
else:
raise FormatError("Unexpected ' ' at %d in %s" % (pos, line))
# Between options
elif char == ',':
if st.state == IN_FIELD_OPTION_NAME:
if st.block:
raise FormatError("Missing option value for %s at %d in %s" % (''.join(st.block), pos, line))
else:
# At the beginning of a new option
continue
elif st.state == IN_FIELD_KIND:
raise FormatError("Unexpected ',' in field definition %s at %d in %s" % (st.kind, pos, line))
elif st.state == IN_FIELD_OPTION_VALUE:
if st.quote:
st.block.append(char)
elif st.block:
st.leave_option_value()
else:
raise FormatError("Missing option value for %s at %d in %s" % (st.option_name, pos, line))
else: # OUT_FIELD
st.block.append(char)
# Between option name and option value
elif char == '=':
if st.state == IN_FIELD_OPTION_NAME:
if st.block:
st.leave_option_name()
else:
raise FormatError("Missing option name at %d in %s" % (pos, line))
elif st.state == OUT_FIELD:
st.block.append(char)
elif st.state == IN_FIELD_OPTION_VALUE:
if st.quote:
st.block.append(char)
elif not st.block:
# At the beginning of an option
continue
else:
raise FormatError(
"Unexpected '=' in option value for %s at %d in %s"
% (st.option_name, pos, line))
else:
raise FormatError("Unexpected '=' at %d in %s" % (pos, line))
# Everything else
else:
st.block.append(char)
# All input parsed
if st.state != OUT_FIELD:
raise FormatError("Unclosed field at %d in '%s'; block: '%s'" % (pos, line, ''.join(st.block)))
if st.block:
st.save_fixed_text()
return st.fields
|
Parse a line of text.
A format contains fields, within curly brackets, and free text.
Maximum one 'variable width' field is allowed per line.
You cannot use the '{' or '}' characters in the various text/... fields.
Format:
'''{<field_kind>[ <field_option>,<field_option]} text {...}'''
Example:
'''{song text="%(artist)s",speed=4} {elapsed}'''
'''{song text="%(title)s",speed=2} {mode}'''
Args:
line (str): the text to parse
Returns:
PatternLine: the parsed line pattern
|
entailment
|
def add(self, pattern_txt):
"""Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines.
"""
self.patterns[len(pattern_txt)] = pattern_txt
low = 0
high = len(pattern_txt) - 1
while not pattern_txt[low]:
low += 1
while not pattern_txt[high]:
high -= 1
min_pattern = pattern_txt[low:high + 1]
self.min_patterns[len(min_pattern)] = min_pattern
|
Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines.
|
entailment
|
def _arcball(xy, wh):
"""Convert x,y coordinates to w,x,y,z Quaternion parameters
Adapted from:
linalg library
Copyright (c) 2010-2015, Renaud Blanch <rndblnch at gmail dot com>
Licence at your convenience:
GPLv3 or higher <http://www.gnu.org/licenses/gpl.html>
BSD new <http://opensource.org/licenses/BSD-3-Clause>
"""
x, y = xy
w, h = wh
r = (w + h) / 2.
x, y = -(2. * x - w) / r, (2. * y - h) / r
h = np.sqrt(x*x + y*y)
return (0., x/h, y/h, 0.) if h > 1. else (0., x, y, np.sqrt(1. - h*h))
|
Convert x,y coordinates to w,x,y,z Quaternion parameters
Adapted from:
linalg library
Copyright (c) 2010-2015, Renaud Blanch <rndblnch at gmail dot com>
Licence at your convenience:
GPLv3 or higher <http://www.gnu.org/licenses/gpl.html>
BSD new <http://opensource.org/licenses/BSD-3-Clause>
|
entailment
|
def _update_rotation(self, event):
"""Update rotation parmeters based on mouse movement"""
p2 = event.mouse_event.pos
if self._event_value is None:
self._event_value = p2
wh = self._viewbox.size
self._quaternion = (Quaternion(*_arcball(p2, wh)) *
Quaternion(*_arcball(self._event_value, wh)) *
self._quaternion)
self._event_value = p2
self.view_changed()
|
Update rotation parmeters based on mouse movement
|
entailment
|
def _rotate_tr(self):
"""Rotate the transformation matrix based on camera parameters"""
rot, x, y, z = self._quaternion.get_axis_angle()
up, forward, right = self._get_dim_vectors()
self.transform.rotate(180 * rot / np.pi, (x, z, y))
|
Rotate the transformation matrix based on camera parameters
|
entailment
|
def _dist_to_trans(self, dist):
"""Convert mouse x, y movement into x, y, z translations"""
rot, x, y, z = self._quaternion.get_axis_angle()
tr = MatrixTransform()
tr.rotate(180 * rot / np.pi, (x, y, z))
dx, dz, dy = np.dot(tr.matrix[:3, :3], (dist[0], dist[1], 0.))
return dx, dy, dz
|
Convert mouse x, y movement into x, y, z translations
|
entailment
|
def arg_to_array(func):
"""
Decorator to convert argument to array.
Parameters
----------
func : function
The function to decorate.
Returns
-------
func : function
The decorated function.
"""
def fn(self, arg, *args, **kwargs):
"""Function
Parameters
----------
arg : array-like
Argument to convert.
*args : tuple
Arguments.
**kwargs : dict
Keyword arguments.
Returns
-------
value : object
The return value of the function.
"""
return func(self, np.array(arg), *args, **kwargs)
return fn
|
Decorator to convert argument to array.
Parameters
----------
func : function
The function to decorate.
Returns
-------
func : function
The decorated function.
|
entailment
|
def as_vec4(obj, default=(0, 0, 0, 1)):
"""
Convert `obj` to 4-element vector (numpy array with shape[-1] == 4)
Parameters
----------
obj : array-like
Original object.
default : array-like
The defaults to use if the object does not have 4 entries.
Returns
-------
obj : array-like
The object promoted to have 4 elements.
Notes
-----
`obj` will have at least two dimensions.
If `obj` has < 4 elements, then new elements are added from `default`.
For inputs intended as a position or translation, use default=(0,0,0,1).
For inputs intended as scale factors, use default=(1,1,1,1).
"""
obj = np.atleast_2d(obj)
# For multiple vectors, reshape to (..., 4)
if obj.shape[-1] < 4:
new = np.empty(obj.shape[:-1] + (4,), dtype=obj.dtype)
new[:] = default
new[..., :obj.shape[-1]] = obj
obj = new
elif obj.shape[-1] > 4:
raise TypeError("Array shape %s cannot be converted to vec4"
% obj.shape)
return obj
|
Convert `obj` to 4-element vector (numpy array with shape[-1] == 4)
Parameters
----------
obj : array-like
Original object.
default : array-like
The defaults to use if the object does not have 4 entries.
Returns
-------
obj : array-like
The object promoted to have 4 elements.
Notes
-----
`obj` will have at least two dimensions.
If `obj` has < 4 elements, then new elements are added from `default`.
For inputs intended as a position or translation, use default=(0,0,0,1).
For inputs intended as scale factors, use default=(1,1,1,1).
|
entailment
|
def arg_to_vec4(func, self_, arg, *args, **kwargs):
"""
Decorator for converting argument to vec4 format suitable for 4x4 matrix
multiplication.
[x, y] => [[x, y, 0, 1]]
[x, y, z] => [[x, y, z, 1]]
[[x1, y1], [[x1, y1, 0, 1],
[x2, y2], => [x2, y2, 0, 1],
[x3, y3]] [x3, y3, 0, 1]]
If 1D input is provided, then the return value will be flattened.
Accepts input of any dimension, as long as shape[-1] <= 4
Alternatively, any class may define its own transform conversion interface
by defining a _transform_in() method that returns an array with shape
(.., 4), and a _transform_out() method that accepts the same array shape
and returns a new (mapped) object.
"""
if isinstance(arg, (tuple, list, np.ndarray)):
arg = np.array(arg)
flatten = arg.ndim == 1
arg = as_vec4(arg)
ret = func(self_, arg, *args, **kwargs)
if flatten and ret is not None:
return ret.flatten()
return ret
elif hasattr(arg, '_transform_in'):
arr = arg._transform_in()
ret = func(self_, arr, *args, **kwargs)
return arg._transform_out(ret)
else:
raise TypeError("Cannot convert argument to 4D vector: %s" % arg)
|
Decorator for converting argument to vec4 format suitable for 4x4 matrix
multiplication.
[x, y] => [[x, y, 0, 1]]
[x, y, z] => [[x, y, z, 1]]
[[x1, y1], [[x1, y1, 0, 1],
[x2, y2], => [x2, y2, 0, 1],
[x3, y3]] [x3, y3, 0, 1]]
If 1D input is provided, then the return value will be flattened.
Accepts input of any dimension, as long as shape[-1] <= 4
Alternatively, any class may define its own transform conversion interface
by defining a _transform_in() method that returns an array with shape
(.., 4), and a _transform_out() method that accepts the same array shape
and returns a new (mapped) object.
|
entailment
|
def get(self, path):
""" Get a transform from the cache that maps along *path*, which must
be a list of Transforms to apply in reverse order (last transform is
applied first).
Accessed items have their age reset to 0.
"""
key = tuple(map(id, path))
item = self._cache.get(key, None)
if item is None:
logger.debug("Transform cache miss: %s", key)
item = [0, self._create(path)]
self._cache[key] = item
item[0] = 0 # reset age for this item
# make sure the chain is up to date
#tr = item[1]
#for i, node in enumerate(path[1:]):
# if tr.transforms[i] is not node.transform:
# tr[i] = node.transform
return item[1]
|
Get a transform from the cache that maps along *path*, which must
be a list of Transforms to apply in reverse order (last transform is
applied first).
Accessed items have their age reset to 0.
|
entailment
|
def roll(self):
""" Increase the age of all items in the cache by 1. Items whose age
is greater than self.max_age will be removed from the cache.
"""
rem = []
for key, item in self._cache.items():
if item[0] > self.max_age:
rem.append(key)
item[0] += 1
for key in rem:
logger.debug("TransformCache remove: %s", key)
del self._cache[key]
|
Increase the age of all items in the cache by 1. Items whose age
is greater than self.max_age will be removed from the cache.
|
entailment
|
def histogram(self, data, bins=10, color='w', orientation='h'):
"""Calculate and show a histogram of data
Parameters
----------
data : array-like
Data to histogram. Currently only 1D data is supported.
bins : int | array-like
Number of bins, or bin edges.
color : instance of Color
Color of the histogram.
orientation : {'h', 'v'}
Orientation of the histogram.
Returns
-------
hist : instance of Polygon
The histogram polygon.
"""
self._configure_2d()
hist = scene.Histogram(data, bins, color, orientation)
self.view.add(hist)
self.view.camera.set_range()
return hist
|
Calculate and show a histogram of data
Parameters
----------
data : array-like
Data to histogram. Currently only 1D data is supported.
bins : int | array-like
Number of bins, or bin edges.
color : instance of Color
Color of the histogram.
orientation : {'h', 'v'}
Orientation of the histogram.
Returns
-------
hist : instance of Polygon
The histogram polygon.
|
entailment
|
def image(self, data, cmap='cubehelix', clim='auto', fg_color=None):
"""Show an image
Parameters
----------
data : ndarray
Should have shape (N, M), (N, M, 3) or (N, M, 4).
cmap : str
Colormap name.
clim : str | tuple
Colormap limits. Should be ``'auto'`` or a two-element tuple of
min and max values.
fg_color : Color or None
Sets the plot foreground color if specified.
Returns
-------
image : instance of Image
The image.
Notes
-----
The colormap is only used if the image pixels are scalars.
"""
self._configure_2d(fg_color)
image = scene.Image(data, cmap=cmap, clim=clim)
self.view.add(image)
self.view.camera.aspect = 1
self.view.camera.set_range()
return image
|
Show an image
Parameters
----------
data : ndarray
Should have shape (N, M), (N, M, 3) or (N, M, 4).
cmap : str
Colormap name.
clim : str | tuple
Colormap limits. Should be ``'auto'`` or a two-element tuple of
min and max values.
fg_color : Color or None
Sets the plot foreground color if specified.
Returns
-------
image : instance of Image
The image.
Notes
-----
The colormap is only used if the image pixels are scalars.
|
entailment
|
def mesh(self, vertices=None, faces=None, vertex_colors=None,
face_colors=None, color=(0.5, 0.5, 1.), fname=None,
meshdata=None):
"""Show a 3D mesh
Parameters
----------
vertices : array
Vertices.
faces : array | None
Face definitions.
vertex_colors : array | None
Vertex colors.
face_colors : array | None
Face colors.
color : instance of Color
Color to use.
fname : str | None
Filename to load. If not None, then vertices, faces, and meshdata
must be None.
meshdata : MeshData | None
Meshdata to use. If not None, then vertices, faces, and fname
must be None.
Returns
-------
mesh : instance of Mesh
The mesh.
"""
self._configure_3d()
if fname is not None:
if not all(x is None for x in (vertices, faces, meshdata)):
raise ValueError('vertices, faces, and meshdata must be None '
'if fname is not None')
vertices, faces = read_mesh(fname)[:2]
if meshdata is not None:
if not all(x is None for x in (vertices, faces, fname)):
raise ValueError('vertices, faces, and fname must be None if '
'fname is not None')
else:
meshdata = MeshData(vertices, faces)
mesh = scene.Mesh(meshdata=meshdata, vertex_colors=vertex_colors,
face_colors=face_colors, color=color,
shading='smooth')
self.view.add(mesh)
self.view.camera.set_range()
return mesh
|
Show a 3D mesh
Parameters
----------
vertices : array
Vertices.
faces : array | None
Face definitions.
vertex_colors : array | None
Vertex colors.
face_colors : array | None
Face colors.
color : instance of Color
Color to use.
fname : str | None
Filename to load. If not None, then vertices, faces, and meshdata
must be None.
meshdata : MeshData | None
Meshdata to use. If not None, then vertices, faces, and fname
must be None.
Returns
-------
mesh : instance of Mesh
The mesh.
|
entailment
|
def plot(self, data, color='k', symbol=None, line_kind='-', width=1.,
marker_size=10., edge_color='k', face_color='b', edge_width=1.,
title=None, xlabel=None, ylabel=None):
"""Plot a series of data using lines and markers
Parameters
----------
data : array | two arrays
Arguments can be passed as ``(Y,)``, ``(X, Y)`` or
``np.array((X, Y))``.
color : instance of Color
Color of the line.
symbol : str
Marker symbol to use.
line_kind : str
Kind of line to draw. For now, only solid lines (``'-'``)
are supported.
width : float
Line width.
marker_size : float
Marker size. If `size == 0` markers will not be shown.
edge_color : instance of Color
Color of the marker edge.
face_color : instance of Color
Color of the marker face.
edge_width : float
Edge width of the marker.
title : str | None
The title string to be displayed above the plot
xlabel : str | None
The label to display along the bottom axis
ylabel : str | None
The label to display along the left axis.
Returns
-------
line : instance of LinePlot
The line plot.
See also
--------
marker_types, LinePlot
"""
self._configure_2d()
line = scene.LinePlot(data, connect='strip', color=color,
symbol=symbol, line_kind=line_kind,
width=width, marker_size=marker_size,
edge_color=edge_color,
face_color=face_color,
edge_width=edge_width)
self.view.add(line)
self.view.camera.set_range()
self.visuals.append(line)
if title is not None:
self.title.text = title
if xlabel is not None:
self.xlabel.text = xlabel
if ylabel is not None:
self.ylabel.text = ylabel
return line
|
Plot a series of data using lines and markers
Parameters
----------
data : array | two arrays
Arguments can be passed as ``(Y,)``, ``(X, Y)`` or
``np.array((X, Y))``.
color : instance of Color
Color of the line.
symbol : str
Marker symbol to use.
line_kind : str
Kind of line to draw. For now, only solid lines (``'-'``)
are supported.
width : float
Line width.
marker_size : float
Marker size. If `size == 0` markers will not be shown.
edge_color : instance of Color
Color of the marker edge.
face_color : instance of Color
Color of the marker face.
edge_width : float
Edge width of the marker.
title : str | None
The title string to be displayed above the plot
xlabel : str | None
The label to display along the bottom axis
ylabel : str | None
The label to display along the left axis.
Returns
-------
line : instance of LinePlot
The line plot.
See also
--------
marker_types, LinePlot
|
entailment
|
def spectrogram(self, x, n_fft=256, step=None, fs=1., window='hann',
color_scale='log', cmap='cubehelix', clim='auto'):
"""Calculate and show a spectrogram
Parameters
----------
x : array-like
1D signal to operate on. ``If len(x) < n_fft``, x will be
zero-padded to length ``n_fft``.
n_fft : int
Number of FFT points. Much faster for powers of two.
step : int | None
Step size between calculations. If None, ``n_fft // 2``
will be used.
fs : float
The sample rate of the data.
window : str | None
Window function to use. Can be ``'hann'`` for Hann window, or None
for no windowing.
color_scale : {'linear', 'log'}
Scale to apply to the result of the STFT.
``'log'`` will use ``10 * log10(power)``.
cmap : str
Colormap name.
clim : str | tuple
Colormap limits. Should be ``'auto'`` or a two-element tuple of
min and max values.
Returns
-------
spec : instance of Spectrogram
The spectrogram.
See also
--------
Image
"""
self._configure_2d()
# XXX once we have axes, we should use "fft_freqs", too
spec = scene.Spectrogram(x, n_fft, step, fs, window,
color_scale, cmap, clim)
self.view.add(spec)
self.view.camera.set_range()
return spec
|
Calculate and show a spectrogram
Parameters
----------
x : array-like
1D signal to operate on. ``If len(x) < n_fft``, x will be
zero-padded to length ``n_fft``.
n_fft : int
Number of FFT points. Much faster for powers of two.
step : int | None
Step size between calculations. If None, ``n_fft // 2``
will be used.
fs : float
The sample rate of the data.
window : str | None
Window function to use. Can be ``'hann'`` for Hann window, or None
for no windowing.
color_scale : {'linear', 'log'}
Scale to apply to the result of the STFT.
``'log'`` will use ``10 * log10(power)``.
cmap : str
Colormap name.
clim : str | tuple
Colormap limits. Should be ``'auto'`` or a two-element tuple of
min and max values.
Returns
-------
spec : instance of Spectrogram
The spectrogram.
See also
--------
Image
|
entailment
|
def volume(self, vol, clim=None, method='mip', threshold=None,
cmap='grays'):
"""Show a 3D volume
Parameters
----------
vol : ndarray
Volume to render.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'iso', 'translucent', 'additive'}
The render style to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render style. By default
the mean of the given volume is used.
cmap : str
The colormap to use.
Returns
-------
volume : instance of Volume
The volume visualization.
See also
--------
Volume
"""
self._configure_3d()
volume = scene.Volume(vol, clim, method, threshold, cmap=cmap)
self.view.add(volume)
self.view.camera.set_range()
return volume
|
Show a 3D volume
Parameters
----------
vol : ndarray
Volume to render.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'iso', 'translucent', 'additive'}
The render style to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render style. By default
the mean of the given volume is used.
cmap : str
The colormap to use.
Returns
-------
volume : instance of Volume
The volume visualization.
See also
--------
Volume
|
entailment
|
def surface(self, zdata, **kwargs):
"""Show a 3D surface plot.
Extra keyword arguments are passed to `SurfacePlot()`.
Parameters
----------
zdata : array-like
A 2D array of the surface Z values.
"""
self._configure_3d()
surf = scene.SurfacePlot(z=zdata, **kwargs)
self.view.add(surf)
self.view.camera.set_range()
return surf
|
Show a 3D surface plot.
Extra keyword arguments are passed to `SurfacePlot()`.
Parameters
----------
zdata : array-like
A 2D array of the surface Z values.
|
entailment
|
def colorbar(self, cmap, position="right",
label="", clim=("", ""),
border_width=0.0, border_color="black",
**kwargs):
"""Show a ColorBar
Parameters
----------
cmap : str | vispy.color.ColorMap
Either the name of the ColorMap to be used from the standard
set of names (refer to `vispy.color.get_colormap`),
or a custom ColorMap object.
The ColorMap is used to apply a gradient on the colorbar.
position : {'left', 'right', 'top', 'bottom'}
The position of the colorbar with respect to the plot.
'top' and 'bottom' are placed horizontally, while
'left' and 'right' are placed vertically
label : str
The label that is to be drawn with the colorbar
that provides information about the colorbar.
clim : tuple (min, max)
the minimum and maximum values of the data that
is given to the colorbar. This is used to draw the scale
on the side of the colorbar.
border_width : float (in px)
The width of the border the colormap should have. This measurement
is given in pixels
border_color : str | vispy.color.Color
The color of the border of the colormap. This can either be a
str as the color's name or an actual instace of a vipy.color.Color
Returns
-------
colorbar : instance of ColorBarWidget
See also
--------
ColorBarWidget
"""
self._configure_2d()
cbar = scene.ColorBarWidget(orientation=position,
label_str=label,
cmap=cmap,
clim=clim,
border_width=border_width,
border_color=border_color,
**kwargs)
CBAR_LONG_DIM = 50
if cbar.orientation == "bottom":
self.grid.remove_widget(self.cbar_bottom)
self.cbar_bottom = self.grid.add_widget(cbar, row=5, col=4)
self.cbar_bottom.height_max = \
self.cbar_bottom.height_max = CBAR_LONG_DIM
elif cbar.orientation == "top":
self.grid.remove_widget(self.cbar_top)
self.cbar_top = self.grid.add_widget(cbar, row=1, col=4)
self.cbar_top.height_max = self.cbar_top.height_max = CBAR_LONG_DIM
elif cbar.orientation == "left":
self.grid.remove_widget(self.cbar_left)
self.cbar_left = self.grid.add_widget(cbar, row=2, col=1)
self.cbar_left.width_max = self.cbar_left.width_min = CBAR_LONG_DIM
else: # cbar.orientation == "right"
self.grid.remove_widget(self.cbar_right)
self.cbar_right = self.grid.add_widget(cbar, row=2, col=5)
self.cbar_right.width_max = \
self.cbar_right.width_min = CBAR_LONG_DIM
return cbar
|
Show a ColorBar
Parameters
----------
cmap : str | vispy.color.ColorMap
Either the name of the ColorMap to be used from the standard
set of names (refer to `vispy.color.get_colormap`),
or a custom ColorMap object.
The ColorMap is used to apply a gradient on the colorbar.
position : {'left', 'right', 'top', 'bottom'}
The position of the colorbar with respect to the plot.
'top' and 'bottom' are placed horizontally, while
'left' and 'right' are placed vertically
label : str
The label that is to be drawn with the colorbar
that provides information about the colorbar.
clim : tuple (min, max)
the minimum and maximum values of the data that
is given to the colorbar. This is used to draw the scale
on the side of the colorbar.
border_width : float (in px)
The width of the border the colormap should have. This measurement
is given in pixels
border_color : str | vispy.color.Color
The color of the border of the colormap. This can either be a
str as the color's name or an actual instace of a vipy.color.Color
Returns
-------
colorbar : instance of ColorBarWidget
See also
--------
ColorBarWidget
|
entailment
|
def redraw(self):
"""
Redraw the Vispy canvas
"""
if self._multiscat is not None:
self._multiscat._update()
self.vispy_widget.canvas.update()
|
Redraw the Vispy canvas
|
entailment
|
def remove(self):
"""
Remove the layer artist from the visualization
"""
if self._multiscat is None:
return
self._multiscat.deallocate(self.id)
self._multiscat = None
self._viewer_state.remove_global_callback(self._update_scatter)
self.state.remove_global_callback(self._update_scatter)
|
Remove the layer artist from the visualization
|
entailment
|
def _check_valid(key, val, valid):
"""Helper to check valid options"""
if val not in valid:
raise ValueError('%s must be one of %s, not "%s"'
% (key, valid, val))
|
Helper to check valid options
|
entailment
|
def _to_args(x):
"""Convert to args representation"""
if not isinstance(x, (list, tuple, np.ndarray)):
x = [x]
return x
|
Convert to args representation
|
entailment
|
def _check_conversion(key, valid_dict):
"""Check for existence of key in dict, return value or raise error"""
if key not in valid_dict and key not in valid_dict.values():
# Only show users the nice string values
keys = [v for v in valid_dict.keys() if isinstance(v, string_types)]
raise ValueError('value must be one of %s, not %s' % (keys, key))
return valid_dict[key] if key in valid_dict else key
|
Check for existence of key in dict, return value or raise error
|
entailment
|
def read_pixels(viewport=None, alpha=True, out_type='unsigned_byte'):
"""Read pixels from the currently selected buffer.
Under most circumstances, this function reads from the front buffer.
Unlike all other functions in vispy.gloo, this function directly executes
an OpenGL command.
Parameters
----------
viewport : array-like | None
4-element list of x, y, w, h parameters. If None (default),
the current GL viewport will be queried and used.
alpha : bool
If True (default), the returned array has 4 elements (RGBA).
If False, it has 3 (RGB).
out_type : str | dtype
Can be 'unsigned_byte' or 'float'. Note that this does not
use casting, but instead determines how values are read from
the current buffer. Can also be numpy dtypes ``np.uint8``,
``np.ubyte``, or ``np.float32``.
Returns
-------
pixels : array
3D array of pixels in np.uint8 or np.float32 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left corner
of the framebuffer at index [0, 0] in the returned array.
"""
# Check whether the GL context is direct or remote
context = get_current_canvas().context
if context.shared.parser.is_remote():
raise RuntimeError('Cannot use read_pixels() with remote GLIR parser')
finish() # noqa - finish first, also flushes GLIR commands
type_dict = {'unsigned_byte': gl.GL_UNSIGNED_BYTE,
np.uint8: gl.GL_UNSIGNED_BYTE,
'float': gl.GL_FLOAT,
np.float32: gl.GL_FLOAT}
type_ = _check_conversion(out_type, type_dict)
if viewport is None:
viewport = gl.glGetParameter(gl.GL_VIEWPORT)
viewport = np.array(viewport, int)
if viewport.ndim != 1 or viewport.size != 4:
raise ValueError('viewport should be 1D 4-element array-like, not %s'
% (viewport,))
x, y, w, h = viewport
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) # PACK, not UNPACK
fmt = gl.GL_RGBA if alpha else gl.GL_RGB
im = gl.glReadPixels(x, y, w, h, fmt, type_)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 4)
# reshape, flip, and return
if not isinstance(im, np.ndarray):
np_dtype = np.uint8 if type_ == gl.GL_UNSIGNED_BYTE else np.float32
im = np.frombuffer(im, np_dtype)
im.shape = h, w, (4 if alpha else 3) # RGBA vs RGB
im = im[::-1, :, :] # flip the image
return im
|
Read pixels from the currently selected buffer.
Under most circumstances, this function reads from the front buffer.
Unlike all other functions in vispy.gloo, this function directly executes
an OpenGL command.
Parameters
----------
viewport : array-like | None
4-element list of x, y, w, h parameters. If None (default),
the current GL viewport will be queried and used.
alpha : bool
If True (default), the returned array has 4 elements (RGBA).
If False, it has 3 (RGB).
out_type : str | dtype
Can be 'unsigned_byte' or 'float'. Note that this does not
use casting, but instead determines how values are read from
the current buffer. Can also be numpy dtypes ``np.uint8``,
``np.ubyte``, or ``np.float32``.
Returns
-------
pixels : array
3D array of pixels in np.uint8 or np.float32 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left corner
of the framebuffer at index [0, 0] in the returned array.
|
entailment
|
def get_gl_configuration():
"""Read the current gl configuration
This function uses constants that are not in the OpenGL ES 2.1
namespace, so only use this on desktop systems.
Returns
-------
config : dict
The currently active OpenGL configuration.
"""
# XXX eventually maybe we can ask `gl` whether or not we can access these
gl.check_error('pre-config check')
config = dict()
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
fb_param = gl.glGetFramebufferAttachmentParameter
# copied since they aren't in ES:
GL_FRONT_LEFT = 1024
GL_DEPTH = 6145
GL_STENCIL = 6146
GL_SRGB = 35904
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING = 33296
GL_STEREO = 3123
GL_DOUBLEBUFFER = 3122
sizes = dict(red=(GL_FRONT_LEFT, 33298),
green=(GL_FRONT_LEFT, 33299),
blue=(GL_FRONT_LEFT, 33300),
alpha=(GL_FRONT_LEFT, 33301),
depth=(GL_DEPTH, 33302),
stencil=(GL_STENCIL, 33303))
for key, val in sizes.items():
config[key + '_size'] = fb_param(gl.GL_FRAMEBUFFER, val[0], val[1])
val = fb_param(gl.GL_FRAMEBUFFER, GL_FRONT_LEFT,
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING)
if val not in (gl.GL_LINEAR, GL_SRGB):
raise RuntimeError('unknown value for SRGB: %s' % val)
config['srgb'] = True if val == GL_SRGB else False # GL_LINEAR
config['stereo'] = True if gl.glGetParameter(GL_STEREO) else False
config['double_buffer'] = (True if gl.glGetParameter(GL_DOUBLEBUFFER)
else False)
config['samples'] = gl.glGetParameter(gl.GL_SAMPLES)
gl.check_error('post-config check')
return config
|
Read the current gl configuration
This function uses constants that are not in the OpenGL ES 2.1
namespace, so only use this on desktop systems.
Returns
-------
config : dict
The currently active OpenGL configuration.
|
entailment
|
def set_viewport(self, *args):
"""Set the OpenGL viewport
This is a wrapper for gl.glViewport.
Parameters
----------
*args : tuple
X and Y coordinates, plus width and height. Can be passed in as
individual components, or as a single tuple with four values.
"""
x, y, w, h = args[0] if len(args) == 1 else args
self.glir.command('FUNC', 'glViewport', int(x), int(y), int(w), int(h))
|
Set the OpenGL viewport
This is a wrapper for gl.glViewport.
Parameters
----------
*args : tuple
X and Y coordinates, plus width and height. Can be passed in as
individual components, or as a single tuple with four values.
|
entailment
|
def set_depth_range(self, near=0., far=1.):
"""Set depth values
Parameters
----------
near : float
Near clipping plane.
far : float
Far clipping plane.
"""
self.glir.command('FUNC', 'glDepthRange', float(near), float(far))
|
Set depth values
Parameters
----------
near : float
Near clipping plane.
far : float
Far clipping plane.
|
entailment
|
def set_line_width(self, width=1.):
"""Set line width
Parameters
----------
width : float
The line width.
"""
width = float(width)
if width < 0:
raise RuntimeError('Cannot have width < 0')
self.glir.command('FUNC', 'glLineWidth', width)
|
Set line width
Parameters
----------
width : float
The line width.
|
entailment
|
def set_polygon_offset(self, factor=0., units=0.):
"""Set the scale and units used to calculate depth values
Parameters
----------
factor : float
Scale factor used to create a variable depth offset for
each polygon.
units : float
Multiplied by an implementation-specific value to create a
constant depth offset.
"""
self.glir.command('FUNC', 'glPolygonOffset', float(factor),
float(units))
|
Set the scale and units used to calculate depth values
Parameters
----------
factor : float
Scale factor used to create a variable depth offset for
each polygon.
units : float
Multiplied by an implementation-specific value to create a
constant depth offset.
|
entailment
|
def clear(self, color=True, depth=True, stencil=True):
"""Clear the screen buffers
This is a wrapper for gl.glClear.
Parameters
----------
color : bool | str | tuple | instance of Color
Clear the color buffer bit. If not bool, ``set_clear_color`` will
be used to set the color clear value.
depth : bool | float
Clear the depth buffer bit. If float, ``set_clear_depth`` will
be used to set the depth clear value.
stencil : bool | int
Clear the stencil buffer bit. If int, ``set_clear_stencil`` will
be used to set the stencil clear index.
"""
bits = 0
if isinstance(color, np.ndarray) or bool(color):
if not isinstance(color, bool):
self.set_clear_color(color)
bits |= gl.GL_COLOR_BUFFER_BIT
if depth:
if not isinstance(depth, bool):
self.set_clear_depth(depth)
bits |= gl.GL_DEPTH_BUFFER_BIT
if stencil:
if not isinstance(stencil, bool):
self.set_clear_stencil(stencil)
bits |= gl.GL_STENCIL_BUFFER_BIT
self.glir.command('FUNC', 'glClear', bits)
|
Clear the screen buffers
This is a wrapper for gl.glClear.
Parameters
----------
color : bool | str | tuple | instance of Color
Clear the color buffer bit. If not bool, ``set_clear_color`` will
be used to set the color clear value.
depth : bool | float
Clear the depth buffer bit. If float, ``set_clear_depth`` will
be used to set the depth clear value.
stencil : bool | int
Clear the stencil buffer bit. If int, ``set_clear_stencil`` will
be used to set the stencil clear index.
|
entailment
|
def set_clear_color(self, color='black', alpha=None):
"""Set the screen clear color
This is a wrapper for gl.glClearColor.
Parameters
----------
color : str | tuple | instance of Color
Color to use. See vispy.color.Color for options.
alpha : float | None
Alpha to use.
"""
self.glir.command('FUNC', 'glClearColor', *Color(color, alpha).rgba)
|
Set the screen clear color
This is a wrapper for gl.glClearColor.
Parameters
----------
color : str | tuple | instance of Color
Color to use. See vispy.color.Color for options.
alpha : float | None
Alpha to use.
|
entailment
|
def set_blend_func(self, srgb='one', drgb='zero',
salpha=None, dalpha=None):
"""Specify pixel arithmetic for RGB and alpha
Parameters
----------
srgb : str
Source RGB factor.
drgb : str
Destination RGB factor.
salpha : str | None
Source alpha factor. If None, ``srgb`` is used.
dalpha : str
Destination alpha factor. If None, ``drgb`` is used.
"""
salpha = srgb if salpha is None else salpha
dalpha = drgb if dalpha is None else dalpha
self.glir.command('FUNC', 'glBlendFuncSeparate',
srgb, drgb, salpha, dalpha)
|
Specify pixel arithmetic for RGB and alpha
Parameters
----------
srgb : str
Source RGB factor.
drgb : str
Destination RGB factor.
salpha : str | None
Source alpha factor. If None, ``srgb`` is used.
dalpha : str
Destination alpha factor. If None, ``drgb`` is used.
|
entailment
|
def set_blend_equation(self, mode_rgb, mode_alpha=None):
"""Specify the equation for RGB and alpha blending
Parameters
----------
mode_rgb : str
Mode for RGB.
mode_alpha : str | None
Mode for Alpha. If None, ``mode_rgb`` is used.
Notes
-----
See ``set_blend_equation`` for valid modes.
"""
mode_alpha = mode_rgb if mode_alpha is None else mode_alpha
self.glir.command('FUNC', 'glBlendEquationSeparate',
mode_rgb, mode_alpha)
|
Specify the equation for RGB and alpha blending
Parameters
----------
mode_rgb : str
Mode for RGB.
mode_alpha : str | None
Mode for Alpha. If None, ``mode_rgb`` is used.
Notes
-----
See ``set_blend_equation`` for valid modes.
|
entailment
|
def set_scissor(self, x, y, w, h):
"""Define the scissor box
Parameters
----------
x : int
Left corner of the box.
y : int
Lower corner of the box.
w : int
The width of the box.
h : int
The height of the box.
"""
self.glir.command('FUNC', 'glScissor', int(x), int(y), int(w), int(h))
|
Define the scissor box
Parameters
----------
x : int
Left corner of the box.
y : int
Lower corner of the box.
w : int
The width of the box.
h : int
The height of the box.
|
entailment
|
def set_stencil_func(self, func='always', ref=0, mask=8,
face='front_and_back'):
"""Set front or back function and reference value
Parameters
----------
func : str
See set_stencil_func.
ref : int
Reference value for the stencil test.
mask : int
Mask that is ANDed with ref and stored stencil value.
face : str
Can be 'front', 'back', or 'front_and_back'.
"""
self.glir.command('FUNC', 'glStencilFuncSeparate',
face, func, int(ref), int(mask))
|
Set front or back function and reference value
Parameters
----------
func : str
See set_stencil_func.
ref : int
Reference value for the stencil test.
mask : int
Mask that is ANDed with ref and stored stencil value.
face : str
Can be 'front', 'back', or 'front_and_back'.
|
entailment
|
def set_stencil_mask(self, mask=8, face='front_and_back'):
"""Control the front or back writing of individual bits in the stencil
Parameters
----------
mask : int
Mask that is ANDed with ref and stored stencil value.
face : str
Can be 'front', 'back', or 'front_and_back'.
"""
self.glir.command('FUNC', 'glStencilMaskSeparate', face, int(mask))
|
Control the front or back writing of individual bits in the stencil
Parameters
----------
mask : int
Mask that is ANDed with ref and stored stencil value.
face : str
Can be 'front', 'back', or 'front_and_back'.
|
entailment
|
def set_stencil_op(self, sfail='keep', dpfail='keep', dppass='keep',
face='front_and_back'):
"""Set front or back stencil test actions
Parameters
----------
sfail : str
Action to take when the stencil fails. Must be one of
'keep', 'zero', 'replace', 'incr', 'incr_wrap',
'decr', 'decr_wrap', or 'invert'.
dpfail : str
Action to take when the stencil passes.
dppass : str
Action to take when both the stencil and depth tests pass,
or when the stencil test passes and either there is no depth
buffer or depth testing is not enabled.
face : str
Can be 'front', 'back', or 'front_and_back'.
"""
self.glir.command('FUNC', 'glStencilOpSeparate',
face, sfail, dpfail, dppass)
|
Set front or back stencil test actions
Parameters
----------
sfail : str
Action to take when the stencil fails. Must be one of
'keep', 'zero', 'replace', 'incr', 'incr_wrap',
'decr', 'decr_wrap', or 'invert'.
dpfail : str
Action to take when the stencil passes.
dppass : str
Action to take when both the stencil and depth tests pass,
or when the stencil test passes and either there is no depth
buffer or depth testing is not enabled.
face : str
Can be 'front', 'back', or 'front_and_back'.
|
entailment
|
def set_color_mask(self, red, green, blue, alpha):
"""Toggle writing of frame buffer color components
Parameters
----------
red : bool
Red toggle.
green : bool
Green toggle.
blue : bool
Blue toggle.
alpha : bool
Alpha toggle.
"""
self.glir.command('FUNC', 'glColorMask', bool(red), bool(green),
bool(blue), bool(alpha))
|
Toggle writing of frame buffer color components
Parameters
----------
red : bool
Red toggle.
green : bool
Green toggle.
blue : bool
Blue toggle.
alpha : bool
Alpha toggle.
|
entailment
|
def set_sample_coverage(self, value=1.0, invert=False):
"""Specify multisample coverage parameters
Parameters
----------
value : float
Sample coverage value (will be clamped between 0. and 1.).
invert : bool
Specify if the coverage masks should be inverted.
"""
self.glir.command('FUNC', 'glSampleCoverage', float(value),
bool(invert))
|
Specify multisample coverage parameters
Parameters
----------
value : float
Sample coverage value (will be clamped between 0. and 1.).
invert : bool
Specify if the coverage masks should be inverted.
|
entailment
|
def set_state(self, preset=None, **kwargs):
"""Set OpenGL rendering state, optionally using a preset
Parameters
----------
preset : str | None
Can be one of ('opaque', 'translucent', 'additive') to use
use reasonable defaults for these typical use cases.
**kwargs : keyword arguments
Other supplied keyword arguments will override any preset defaults.
Options to be enabled or disabled should be supplied as booleans
(e.g., ``'depth_test=True'``, ``cull_face=False``), non-boolean
entries will be passed as arguments to ``set_*`` functions (e.g.,
``blend_func=('src_alpha', 'one')`` will call ``set_blend_func``).
Notes
-----
This serves three purposes:
1. Set GL state using reasonable presets.
2. Wrapping glEnable/glDisable functionality.
3. Convienence wrapping of other ``gloo.set_*`` functions.
For example, one could do the following:
>>> from vispy import gloo
>>> gloo.set_state('translucent', depth_test=False, clear_color=(1, 1, 1, 1)) # noqa, doctest:+SKIP
This would take the preset defaults for 'translucent', turn
depth testing off (which would normally be on for that preset),
and additionally set the glClearColor parameter to be white.
Another example to showcase glEnable/glDisable wrapping:
>>> gloo.set_state(blend=True, depth_test=True, polygon_offset_fill=False) # noqa, doctest:+SKIP
This would be equivalent to calling
>>> from vispy.gloo import gl
>>> gl.glDisable(gl.GL_BLEND)
>>> gl.glEnable(gl.GL_DEPTH_TEST)
>>> gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)
Or here's another example:
>>> gloo.set_state(clear_color=(0, 0, 0, 1), blend=True, blend_func=('src_alpha', 'one')) # noqa, doctest:+SKIP
Thus arbitrary GL state components can be set directly using
``set_state``. Note that individual functions are exposed e.g.,
as ``set_clear_color``, with some more informative docstrings
about those particular functions.
"""
kwargs = deepcopy(kwargs)
# Load preset, if supplied
if preset is not None:
_check_valid('preset', preset, tuple(list(_gl_presets.keys())))
for key, val in _gl_presets[preset].items():
# only overwrite user input with preset if user's input is None
if key not in kwargs:
kwargs[key] = val
# cull_face is an exception because GL_CULL_FACE, glCullFace both exist
if 'cull_face' in kwargs:
cull_face = kwargs.pop('cull_face')
if isinstance(cull_face, bool):
funcname = 'glEnable' if cull_face else 'glDisable'
self.glir.command('FUNC', funcname, 'cull_face')
else:
self.glir.command('FUNC', 'glEnable', 'cull_face')
self.set_cull_face(*_to_args(cull_face))
# Iterate over kwargs
for key, val in kwargs.items():
if key in _setters:
# Setter
args = _to_args(val)
# these actually need tuples
if key in ('blend_color', 'clear_color') and \
not isinstance(args[0], string_types):
args = [args]
getattr(self, 'set_' + key)(*args)
else:
# Enable / disable
funcname = 'glEnable' if val else 'glDisable'
self.glir.command('FUNC', funcname, key)
|
Set OpenGL rendering state, optionally using a preset
Parameters
----------
preset : str | None
Can be one of ('opaque', 'translucent', 'additive') to use
use reasonable defaults for these typical use cases.
**kwargs : keyword arguments
Other supplied keyword arguments will override any preset defaults.
Options to be enabled or disabled should be supplied as booleans
(e.g., ``'depth_test=True'``, ``cull_face=False``), non-boolean
entries will be passed as arguments to ``set_*`` functions (e.g.,
``blend_func=('src_alpha', 'one')`` will call ``set_blend_func``).
Notes
-----
This serves three purposes:
1. Set GL state using reasonable presets.
2. Wrapping glEnable/glDisable functionality.
3. Convienence wrapping of other ``gloo.set_*`` functions.
For example, one could do the following:
>>> from vispy import gloo
>>> gloo.set_state('translucent', depth_test=False, clear_color=(1, 1, 1, 1)) # noqa, doctest:+SKIP
This would take the preset defaults for 'translucent', turn
depth testing off (which would normally be on for that preset),
and additionally set the glClearColor parameter to be white.
Another example to showcase glEnable/glDisable wrapping:
>>> gloo.set_state(blend=True, depth_test=True, polygon_offset_fill=False) # noqa, doctest:+SKIP
This would be equivalent to calling
>>> from vispy.gloo import gl
>>> gl.glDisable(gl.GL_BLEND)
>>> gl.glEnable(gl.GL_DEPTH_TEST)
>>> gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)
Or here's another example:
>>> gloo.set_state(clear_color=(0, 0, 0, 1), blend=True, blend_func=('src_alpha', 'one')) # noqa, doctest:+SKIP
Thus arbitrary GL state components can be set directly using
``set_state``. Note that individual functions are exposed e.g.,
as ``set_clear_color``, with some more informative docstrings
about those particular functions.
|
entailment
|
def finish(self):
"""Wait for GL commands to to finish
This creates a GLIR command for glFinish and then processes the
GLIR commands. If the GLIR interpreter is remote (e.g. WebGL), this
function will return before GL has finished processing the commands.
"""
if hasattr(self, 'flush_commands'):
context = self
else:
context = get_current_canvas().context
context.glir.command('FUNC', 'glFinish')
context.flush_commands()
|
Wait for GL commands to to finish
This creates a GLIR command for glFinish and then processes the
GLIR commands. If the GLIR interpreter is remote (e.g. WebGL), this
function will return before GL has finished processing the commands.
|
entailment
|
def flush(self):
"""Flush GL commands
This is a wrapper for glFlush(). This also flushes the GLIR
command queue.
"""
if hasattr(self, 'flush_commands'):
context = self
else:
context = get_current_canvas().context
context.glir.command('FUNC', 'glFlush')
context.flush_commands()
|
Flush GL commands
This is a wrapper for glFlush(). This also flushes the GLIR
command queue.
|
entailment
|
def set_hint(self, target, mode):
"""Set OpenGL drawing hint
Parameters
----------
target : str
The target, e.g. 'fog_hint', 'line_smooth_hint',
'point_smooth_hint'.
mode : str
The mode to set (e.g., 'fastest', 'nicest', 'dont_care').
"""
if not all(isinstance(tm, string_types) for tm in (target, mode)):
raise TypeError('target and mode must both be strings')
self.glir.command('FUNC', 'glHint', target, mode)
|
Set OpenGL drawing hint
Parameters
----------
target : str
The target, e.g. 'fog_hint', 'line_smooth_hint',
'point_smooth_hint'.
mode : str
The mode to set (e.g., 'fastest', 'nicest', 'dont_care').
|
entailment
|
def glir(self):
""" The GLIR queue corresponding to the current canvas
"""
canvas = get_current_canvas()
if canvas is None:
msg = ("If you want to use gloo without vispy.app, " +
"use a gloo.context.FakeCanvas.")
raise RuntimeError('Gloo requires a Canvas to run.\n' + msg)
return canvas.context.glir
|
The GLIR queue corresponding to the current canvas
|
entailment
|
def use_gl(target='gl2'):
""" Let Vispy use the target OpenGL ES 2.0 implementation
Also see ``vispy.use()``.
Parameters
----------
target : str
The target GL backend to use.
Available backends:
* gl2 - Use ES 2.0 subset of desktop (i.e. normal) OpenGL
* gl+ - Use the desktop ES 2.0 subset plus all non-deprecated GL
functions on your system (requires PyOpenGL)
* es2 - Use the ES2 library (Angle/DirectX on Windows)
* pyopengl2 - Use ES 2.0 subset of pyopengl (for fallback and testing)
* dummy - Prevent usage of gloo.gl (for when rendering occurs elsewhere)
You can use vispy's config option "gl_debug" to check for errors
on each API call. Or, one can specify it as the target, e.g. "gl2
debug". (Debug does not apply to 'gl+', since PyOpenGL has its own
debug mechanism)
"""
target = target or 'gl2'
target = target.replace('+', 'plus')
# Get options
target, _, options = target.partition(' ')
debug = config['gl_debug'] or 'debug' in options
# Select modules to import names from
try:
mod = __import__(target, globals(), level=1)
except ImportError as err:
msg = 'Could not import gl target "%s":\n%s' % (target, str(err))
raise RuntimeError(msg)
# Apply
global current_backend
current_backend = mod
_clear_namespace()
if 'plus' in target:
# Copy PyOpenGL funcs, extra funcs, constants, no debug
_copy_gl_functions(mod._pyopengl2, globals())
_copy_gl_functions(mod, globals(), True)
elif debug:
_copy_gl_functions(_debug_proxy, globals())
else:
_copy_gl_functions(mod, globals())
|
Let Vispy use the target OpenGL ES 2.0 implementation
Also see ``vispy.use()``.
Parameters
----------
target : str
The target GL backend to use.
Available backends:
* gl2 - Use ES 2.0 subset of desktop (i.e. normal) OpenGL
* gl+ - Use the desktop ES 2.0 subset plus all non-deprecated GL
functions on your system (requires PyOpenGL)
* es2 - Use the ES2 library (Angle/DirectX on Windows)
* pyopengl2 - Use ES 2.0 subset of pyopengl (for fallback and testing)
* dummy - Prevent usage of gloo.gl (for when rendering occurs elsewhere)
You can use vispy's config option "gl_debug" to check for errors
on each API call. Or, one can specify it as the target, e.g. "gl2
debug". (Debug does not apply to 'gl+', since PyOpenGL has its own
debug mechanism)
|
entailment
|
def _clear_namespace():
""" Clear names that are not part of the strict ES API
"""
ok_names = set(default_backend.__dict__)
ok_names.update(['gl2', 'glplus']) # don't remove the module
NS = globals()
for name in list(NS.keys()):
if name.lower().startswith('gl'):
if name not in ok_names:
del NS[name]
|
Clear names that are not part of the strict ES API
|
entailment
|
def _copy_gl_functions(source, dest, constants=False):
""" Inject all objects that start with 'gl' from the source
into the dest. source and dest can be dicts, modules or BaseGLProxy's.
"""
# Get dicts
if isinstance(source, BaseGLProxy):
s = {}
for key in dir(source):
s[key] = getattr(source, key)
source = s
elif not isinstance(source, dict):
source = source.__dict__
if not isinstance(dest, dict):
dest = dest.__dict__
# Copy names
funcnames = [name for name in source.keys() if name.startswith('gl')]
for name in funcnames:
dest[name] = source[name]
# Copy constants
if constants:
constnames = [name for name in source.keys() if name.startswith('GL_')]
for name in constnames:
dest[name] = source[name]
|
Inject all objects that start with 'gl' from the source
into the dest. source and dest can be dicts, modules or BaseGLProxy's.
|
entailment
|
def check_error(when='periodic check'):
""" Check this from time to time to detect GL errors.
Parameters
----------
when : str
Shown in the exception to help the developer determine when
this check was done.
"""
errors = []
while True:
err = glGetError()
if err == GL_NO_ERROR or (errors and err == errors[-1]):
break
errors.append(err)
if errors:
msg = ', '.join([repr(ENUM_MAP.get(e, e)) for e in errors])
err = RuntimeError('OpenGL got errors (%s): %s' % (when, msg))
err.errors = errors
err.err = errors[-1] # pyopengl compat
raise err
|
Check this from time to time to detect GL errors.
Parameters
----------
when : str
Shown in the exception to help the developer determine when
this check was done.
|
entailment
|
def _arg_repr(self, arg):
""" Get a useful (and not too large) represetation of an argument.
"""
r = repr(arg)
max = 40
if len(r) > max:
if hasattr(arg, 'shape'):
r = 'array:' + 'x'.join([repr(s) for s in arg.shape])
else:
r = r[:max-3] + '...'
return r
|
Get a useful (and not too large) represetation of an argument.
|
entailment
|
def set_data(self, data):
""" Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
"""
self._data = data
# if using matplotlib isoline algorithm we have to check for meshgrid
# and we can setup the tracer object here
if _HAS_MPL:
if self._X is None or self._X.T.shape != data.shape:
self._X, self._Y = np.meshgrid(np.arange(data.shape[0]),
np.arange(data.shape[1]))
self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
if self._clim is None:
self._clim = (data.min(), data.max())
# sanity check,
# should we raise an error here, since no isolines can be drawn?
# for now, _prepare_draw returns False if no isoline can be drawn
if self._data.min() != self._data.max():
self._data_is_uniform = False
else:
self._data_is_uniform = True
self._need_recompute = True
self.update()
|
Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
|
entailment
|
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
"""
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect
|
retrieve vertices and connects from given paths-list
|
entailment
|
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts)
|
compute LineVisual vertices, connects and color-index
|
entailment
|
def connect(self, callback, ref=False, position='first',
before=None, after=None):
"""Connect this emitter to a new callback.
Parameters
----------
callback : function | tuple
*callback* may be either a callable object or a tuple
(object, attr_name) where object.attr_name will point to a
callable object. Note that only a weak reference to ``object``
will be kept.
ref : bool | str
Reference used to identify the callback in ``before``/``after``.
If True, the callback ref will automatically determined (see
Notes). If False, the callback cannot be referred to by a string.
If str, the given string will be used. Note that if ``ref``
is not unique in ``callback_refs``, an error will be thrown.
position : str
If ``'first'``, the first eligible position is used (that
meets the before and after criteria), ``'last'`` will use
the last position.
before : str | callback | list of str or callback | None
List of callbacks that the current callback should precede.
Can be None if no before-criteria should be used.
after : str | callback | list of str or callback | None
List of callbacks that the current callback should follow.
Can be None if no after-criteria should be used.
Notes
-----
If ``ref=True``, the callback reference will be determined from:
1. If ``callback`` is ``tuple``, the secend element in the tuple.
2. The ``__name__`` attribute.
3. The ``__class__.__name__`` attribute.
The current list of callback refs can be obtained using
``event.callback_refs``. Callbacks can be referred to by either
their string reference (if given), or by the actual callback that
was attached (e.g., ``(canvas, 'swap_buffers')``).
If the specified callback is already connected, then the request is
ignored.
If before is None and after is None (default), the new callback will
be added to the beginning of the callback list. Thus the
callback that is connected _last_ will be the _first_ to receive
events from the emitter.
"""
callbacks = self.callbacks
callback_refs = self.callback_refs
callback = self._normalize_cb(callback)
if callback in callbacks:
return
# deal with the ref
if isinstance(ref, bool):
if ref:
if isinstance(callback, tuple):
ref = callback[1]
elif hasattr(callback, '__name__'): # function
ref = callback.__name__
else: # Method, or other
ref = callback.__class__.__name__
else:
ref = None
elif not isinstance(ref, string_types):
raise TypeError('ref must be a bool or string')
if ref is not None and ref in self._callback_refs:
raise ValueError('ref "%s" is not unique' % ref)
# positions
if position not in ('first', 'last'):
raise ValueError('position must be "first" or "last", not %s'
% position)
# bounds
bounds = list() # upper & lower bnds (inclusive) of possible cb locs
for ri, criteria in enumerate((before, after)):
if criteria is None or criteria == []:
bounds.append(len(callback_refs) if ri == 0 else 0)
else:
if not isinstance(criteria, list):
criteria = [criteria]
for c in criteria:
count = sum([(c == cn or c == cc) for cn, cc
in zip(callback_refs, callbacks)])
if count != 1:
raise ValueError('criteria "%s" is in the current '
'callback list %s times:\n%s\n%s'
% (criteria, count,
callback_refs, callbacks))
matches = [ci for ci, (cn, cc) in enumerate(zip(callback_refs,
callbacks))
if (cc in criteria or cn in criteria)]
bounds.append(matches[0] if ri == 0 else (matches[-1] + 1))
if bounds[0] < bounds[1]: # i.e., "place before" < "place after"
raise RuntimeError('cannot place callback before "%s" '
'and after "%s" for callbacks: %s'
% (before, after, callback_refs))
idx = bounds[1] if position == 'first' else bounds[0] # 'last'
# actually add the callback
self._callbacks.insert(idx, callback)
self._callback_refs.insert(idx, ref)
return callback
|
Connect this emitter to a new callback.
Parameters
----------
callback : function | tuple
*callback* may be either a callable object or a tuple
(object, attr_name) where object.attr_name will point to a
callable object. Note that only a weak reference to ``object``
will be kept.
ref : bool | str
Reference used to identify the callback in ``before``/``after``.
If True, the callback ref will automatically determined (see
Notes). If False, the callback cannot be referred to by a string.
If str, the given string will be used. Note that if ``ref``
is not unique in ``callback_refs``, an error will be thrown.
position : str
If ``'first'``, the first eligible position is used (that
meets the before and after criteria), ``'last'`` will use
the last position.
before : str | callback | list of str or callback | None
List of callbacks that the current callback should precede.
Can be None if no before-criteria should be used.
after : str | callback | list of str or callback | None
List of callbacks that the current callback should follow.
Can be None if no after-criteria should be used.
Notes
-----
If ``ref=True``, the callback reference will be determined from:
1. If ``callback`` is ``tuple``, the secend element in the tuple.
2. The ``__name__`` attribute.
3. The ``__class__.__name__`` attribute.
The current list of callback refs can be obtained using
``event.callback_refs``. Callbacks can be referred to by either
their string reference (if given), or by the actual callback that
was attached (e.g., ``(canvas, 'swap_buffers')``).
If the specified callback is already connected, then the request is
ignored.
If before is None and after is None (default), the new callback will
be added to the beginning of the callback list. Thus the
callback that is connected _last_ will be the _first_ to receive
events from the emitter.
|
entailment
|
def disconnect(self, callback=None):
"""Disconnect a callback from this emitter.
If no callback is specified, then *all* callbacks are removed.
If the callback was not already connected, then the call does nothing.
"""
if callback is None:
self._callbacks = []
self._callback_refs = []
else:
callback = self._normalize_cb(callback)
if callback in self._callbacks:
idx = self._callbacks.index(callback)
self._callbacks.pop(idx)
self._callback_refs.pop(idx)
|
Disconnect a callback from this emitter.
If no callback is specified, then *all* callbacks are removed.
If the callback was not already connected, then the call does nothing.
|
entailment
|
def block(self, callback=None):
"""Block this emitter. Any attempts to emit an event while blocked
will be silently ignored. If *callback* is given, then the emitter
is only blocked for that specific callback.
Calls to block are cumulative; the emitter must be unblocked the same
number of times as it is blocked.
"""
self._blocked[callback] = self._blocked.get(callback, 0) + 1
|
Block this emitter. Any attempts to emit an event while blocked
will be silently ignored. If *callback* is given, then the emitter
is only blocked for that specific callback.
Calls to block are cumulative; the emitter must be unblocked the same
number of times as it is blocked.
|
entailment
|
def unblock(self, callback=None):
""" Unblock this emitter. See :func:`event.EventEmitter.block`.
Note: Use of ``unblock(None)`` only reverses the effect of
``block(None)``; it does not unblock callbacks that were explicitly
blocked using ``block(callback)``.
"""
if callback not in self._blocked or self._blocked[callback] == 0:
raise RuntimeError("Cannot unblock %s for callback %s; emitter "
"was not previously blocked." %
(self, callback))
b = self._blocked[callback] - 1
if b == 0 and callback is not None:
del self._blocked[callback]
else:
self._blocked[callback] = b
|
Unblock this emitter. See :func:`event.EventEmitter.block`.
Note: Use of ``unblock(None)`` only reverses the effect of
``block(None)``; it does not unblock callbacks that were explicitly
blocked using ``block(callback)``.
|
entailment
|
def add(self, auto_connect=None, **kwargs):
""" Add one or more EventEmitter instances to this emitter group.
Each keyword argument may be specified as either an EventEmitter
instance or an Event subclass, in which case an EventEmitter will be
generated automatically::
# This statement:
group.add(mouse_press=MouseEvent,
mouse_release=MouseEvent)
# ..is equivalent to this statement:
group.add(mouse_press=EventEmitter(group.source, 'mouse_press',
MouseEvent),
mouse_release=EventEmitter(group.source, 'mouse_press',
MouseEvent))
"""
if auto_connect is None:
auto_connect = self.auto_connect
# check all names before adding anything
for name in kwargs:
if name in self._emitters:
raise ValueError(
"EmitterGroup already has an emitter named '%s'" %
name)
elif hasattr(self, name):
raise ValueError("The name '%s' cannot be used as an emitter; "
"it is already an attribute of EmitterGroup"
% name)
# add each emitter specified in the keyword arguments
for name, emitter in kwargs.items():
if emitter is None:
emitter = Event
if inspect.isclass(emitter) and issubclass(emitter, Event):
emitter = EventEmitter(
source=self.source,
type=name,
event_class=emitter)
elif not isinstance(emitter, EventEmitter):
raise Exception('Emitter must be specified as either an '
'EventEmitter instance or Event subclass. '
'(got %s=%s)' % (name, emitter))
# give this emitter the same source as the group.
emitter.source = self.source
setattr(self, name, emitter)
self._emitters[name] = emitter
if auto_connect and self.source is not None:
emitter.connect((self.source, self.auto_connect_format % name))
# If emitters are connected to the group already, then this one
# should be connected as well.
if self._emitters_connected:
emitter.connect(self)
|
Add one or more EventEmitter instances to this emitter group.
Each keyword argument may be specified as either an EventEmitter
instance or an Event subclass, in which case an EventEmitter will be
generated automatically::
# This statement:
group.add(mouse_press=MouseEvent,
mouse_release=MouseEvent)
# ..is equivalent to this statement:
group.add(mouse_press=EventEmitter(group.source, 'mouse_press',
MouseEvent),
mouse_release=EventEmitter(group.source, 'mouse_press',
MouseEvent))
|
entailment
|
def block_all(self):
""" Block all emitters in this group.
"""
self.block()
for em in self._emitters.values():
em.block()
|
Block all emitters in this group.
|
entailment
|
def unblock_all(self):
""" Unblock all emitters in this group.
"""
self.unblock()
for em in self._emitters.values():
em.unblock()
|
Unblock all emitters in this group.
|
entailment
|
def connect(self, callback, ref=False, position='first',
before=None, after=None):
""" Connect the callback to the event group. The callback will receive
events from *all* of the emitters in the group.
See :func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>`
for arguments.
"""
self._connect_emitters(True)
return EventEmitter.connect(self, callback, ref, position,
before, after)
|
Connect the callback to the event group. The callback will receive
events from *all* of the emitters in the group.
See :func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>`
for arguments.
|
entailment
|
def disconnect(self, callback=None):
""" Disconnect the callback from this group. See
:func:`connect() <vispy.event.EmitterGroup.connect>` and
:func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>` for
more information.
"""
ret = EventEmitter.disconnect(self, callback)
if len(self._callbacks) == 0:
self._connect_emitters(False)
return ret
|
Disconnect the callback from this group. See
:func:`connect() <vispy.event.EmitterGroup.connect>` and
:func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>` for
more information.
|
entailment
|
def validate_input_format(utterance, intent):
""" TODO add handling for bad input"""
slots = {slot["name"] for slot in intent["slots"]}
split_utt = re.split("{(.*)}", utterance)
banned = set("-/\\()^%$#@~`-_=+><;:") # Banned characters
for token in split_utt:
if (banned & set(token)):
print (" - Banned character found in substring", token)
print (" - Banned character list", banned)
return False
if "|" in token:
split_token = token.split("|")
if len(split_token)!=2:
print (" - Error, token is incorrect in", token, split_token)
return False
word, slot = split_token
if slot.strip() not in slots:
print (" -", slot, "is not a valid slot for this Intent, valid slots are", slots)
return False
return True
|
TODO add handling for bad input
|
entailment
|
def isosurface(data, level):
"""
Generate isosurface from volumetric data using marching cubes algorithm.
See Paul Bourke, "Polygonising a Scalar Field"
(http://paulbourke.net/geometry/polygonise/)
*data* 3D numpy array of scalar values
*level* The level at which to generate an isosurface
Returns an array of vertex coordinates (Nv, 3) and an array of
per-face vertex indexes (Nf, 3)
"""
# For improvement, see:
#
# Efficient implementation of Marching Cubes' cases with topological
# guarantees.
# Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan Tavares.
# Journal of Graphics Tools 8(2): pp. 1-15 (december 2003)
(face_shift_tables, edge_shifts,
edge_table, n_table_faces) = _get_data_cache()
## mark everything below the isosurface level
mask = data < level
# Because we make use of the strides data attribute below, we have to make
# sure that the data is contiguous (which it won't be if the user did
# data.transpose() for example). Note that this doesn't copy the data if it
# is already contiguous.
data = np.ascontiguousarray(data)
### make eight sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2, 2, 2), dtype=object)
slices = [slice(0, -1), slice(1, None)]
for i in [0, 1]:
for j in [0, 1]:
for k in [0, 1]:
fields[i, j, k] = mask[slices[i], slices[j], slices[k]]
# this is just to match Bourk's vertex numbering scheme:
vertIndex = i - 2*j*i + 3*j + 4*k
index += (fields[i, j, k] * 2**vertIndex).astype(np.ubyte)
### Generate table of edges that have been cut
cut_edges = np.zeros([x+1 for x in index.shape]+[3], dtype=np.uint32)
edges = edge_table[index]
for i, shift in enumerate(edge_shifts[:12]):
slices = [slice(shift[j], cut_edges.shape[j]+(shift[j]-1))
for j in range(3)]
cut_edges[slices[0], slices[1], slices[2], shift[3]] += edges & 2**i
# for each cut edge, interpolate to see where exactly the edge is cut and
# generate vertex positions
m = cut_edges > 0
vertex_inds = np.argwhere(m) # argwhere is slow!
vertexes = vertex_inds[:, :3].astype(np.float32).copy()
dataFlat = data.reshape(data.shape[0]*data.shape[1]*data.shape[2])
## re-use the cut_edges array as a lookup table for vertex IDs
cut_edges[vertex_inds[:, 0],
vertex_inds[:, 1],
vertex_inds[:, 2],
vertex_inds[:, 3]] = np.arange(vertex_inds.shape[0])
for i in [0, 1, 2]:
vim = vertex_inds[:, 3] == i
vi = vertex_inds[vim, :3]
vi_flat = (vi * (np.array(data.strides[:3]) //
data.itemsize)[np.newaxis, :]).sum(axis=1)
v1 = dataFlat[vi_flat]
v2 = dataFlat[vi_flat + data.strides[i]//data.itemsize]
vertexes[vim, i] += (level-v1) / (v2-v1)
### compute the set of vertex indexes for each face.
## This works, but runs a bit slower.
## all cells with at least one face:
#cells = np.argwhere((index != 0) & (index != 255))
#cellInds = index[cells[:, 0], cells[:, 1], cells[:, 2]]
#verts = faceTable[cellInds]
#mask = verts[..., 0, 0] != 9
## we now have indexes into cut_edges:
#verts[...,:3] += cells[:, np.newaxis, np.newaxis,:]
#verts = verts[mask]
## and these are the vertex indexes we want:
#faces = cut_edges[verts[..., 0], verts[..., 1], verts[..., 2],
# verts[..., 3]]
# To allow this to be vectorized efficiently, we count the number of faces
# in each grid cell and handle each group of cells with the same number
# together.
# determine how many faces to assign to each grid cell
n_faces = n_table_faces[index]
tot_faces = n_faces.sum()
faces = np.empty((tot_faces, 3), dtype=np.uint32)
ptr = 0
## this helps speed up an indexing operation later on
cs = np.array(cut_edges.strides)//cut_edges.itemsize
cut_edges = cut_edges.flatten()
## this, strangely, does not seem to help.
#ins = np.array(index.strides)/index.itemsize
#index = index.flatten()
for i in range(1, 6):
# expensive:
# all cells which require i faces (argwhere is expensive)
cells = np.argwhere(n_faces == i)
if cells.shape[0] == 0:
continue
# index values of cells to process for this round:
cellInds = index[cells[:, 0], cells[:, 1], cells[:, 2]]
# expensive:
verts = face_shift_tables[i][cellInds]
# we now have indexes into cut_edges:
verts[..., :3] += (cells[:, np.newaxis,
np.newaxis, :]).astype(np.uint16)
verts = verts.reshape((verts.shape[0]*i,)+verts.shape[2:])
# expensive:
verts = (verts * cs[np.newaxis, np.newaxis, :]).sum(axis=2)
vert_inds = cut_edges[verts]
nv = vert_inds.shape[0]
faces[ptr:ptr+nv] = vert_inds # .reshape((nv, 3))
ptr += nv
return vertexes, faces
|
Generate isosurface from volumetric data using marching cubes algorithm.
See Paul Bourke, "Polygonising a Scalar Field"
(http://paulbourke.net/geometry/polygonise/)
*data* 3D numpy array of scalar values
*level* The level at which to generate an isosurface
Returns an array of vertex coordinates (Nv, 3) and an array of
per-face vertex indexes (Nf, 3)
|
entailment
|
def _extract_buffers(commands):
"""Extract all data buffers from the list of GLIR commands, and replace
them by buffer pointers {buffer: <buffer_index>}. Return the modified list
# of GILR commands and the list of buffers as well."""
# First, filter all DATA commands.
data_commands = [command for command in commands if command[0] == 'DATA']
# Extract the arrays.
buffers = [data_command[3] for data_command in data_commands]
# Modify the commands by replacing the array buffers with pointers.
commands_modified = list(commands)
buffer_index = 0
for i, command in enumerate(commands_modified):
if command[0] == 'DATA':
commands_modified[i] = command[:3] + \
({'buffer_index': buffer_index},)
buffer_index += 1
return commands_modified, buffers
|
Extract all data buffers from the list of GLIR commands, and replace
them by buffer pointers {buffer: <buffer_index>}. Return the modified list
# of GILR commands and the list of buffers as well.
|
entailment
|
def _serialize_item(item):
"""Internal function: serialize native types."""
# Recursively serialize lists, tuples, and dicts.
if isinstance(item, (list, tuple)):
return [_serialize_item(subitem) for subitem in item]
elif isinstance(item, dict):
return dict([(key, _serialize_item(value))
for (key, value) in iteritems(item)])
# Serialize strings.
elif isinstance(item, string_types):
# Replace glSomething by something (needed for WebGL commands).
if item.startswith('gl'):
return re.sub(r'^gl([A-Z])', lambda m: m.group(1).lower(), item)
else:
return item
# Process NumPy arrays that are not buffers (typically, uniform values).
elif isinstance(item, np.ndarray):
return _serialize_item(item.ravel().tolist())
# Serialize numbers.
else:
try:
return np.asscalar(item)
except Exception:
return item
|
Internal function: serialize native types.
|
entailment
|
def create_glir_message(commands, array_serialization=None):
"""Create a JSON-serializable message of GLIR commands. NumPy arrays
are serialized according to the specified method.
Arguments
---------
commands : list
List of GLIR commands.
array_serialization : string or None
Serialization method for NumPy arrays. Possible values are:
'binary' (default) : use a binary string
'base64' : base64 encoded string of the array
"""
# Default serialization method for NumPy arrays.
if array_serialization is None:
array_serialization = 'binary'
# Extract the buffers.
commands_modified, buffers = _extract_buffers(commands)
# Serialize the modified commands (with buffer pointers) and the buffers.
commands_serialized = [_serialize_command(command_modified)
for command_modified in commands_modified]
buffers_serialized = [_serialize_buffer(buffer, array_serialization)
for buffer in buffers]
# Create the final message.
msg = {
'msg_type': 'glir_commands',
'commands': commands_serialized,
'buffers': buffers_serialized,
}
return msg
|
Create a JSON-serializable message of GLIR commands. NumPy arrays
are serialized according to the specified method.
Arguments
---------
commands : list
List of GLIR commands.
array_serialization : string or None
Serialization method for NumPy arrays. Possible values are:
'binary' (default) : use a binary string
'base64' : base64 encoded string of the array
|
entailment
|
def start_session(self):
""" Start Session """
response = self.request("hello")
bits = response.split(" ")
self.server_info.update({
"server_version": bits[2],
"protocol_version": bits[4],
"screen_width": int(bits[7]),
"screen_height": int(bits[9]),
"cell_width": int(bits[11]),
"cell_height": int(bits[13])
})
return response
|
Start Session
|
entailment
|
def request(self, command_string):
""" Request """
self.send(command_string)
if self.debug:
print("Telnet Request: %s" % (command_string))
while True:
response = urllib.parse.unquote(self.tn.read_until(b"\n").decode())
if "success" in response: # Normal successful reply
break
if "huh" in response: # Something went wrong
break
if "connect" in response: # Special reply to "hello"
break
# TODO Keep track of which screen is displayed
# Try again if response was key, menu or visibility notification.
if "huh" in response or self.debug:
print("Telnet Response: %s" % (response[:-1]))
return response
|
Request
|
entailment
|
def add_screen(self, ref):
""" Add Screen """
if ref not in self.screens:
screen = Screen(self, ref)
screen.clear() # TODO Check this is needed, new screens should be clear.
self.screens[ref] = screen
return self.screens[ref]
|
Add Screen
|
entailment
|
def add_key(self, ref, mode="shared"):
"""
Add a key.
(ref)
Return key name or None on error
"""
if ref not in self.keys:
response = self.request("client_add_key %s -%s" % (ref, mode))
if "success" not in response:
return None
self.keys.append(ref)
return ref
|
Add a key.
(ref)
Return key name or None on error
|
entailment
|
def del_key(self, ref):
"""
Delete a key.
(ref)
Return None or LCDd response on error
"""
if ref not in self.keys:
response = self.request("client_del_key %s" % (ref))
self.keys.remove(ref)
if "success" in response:
return None
else:
return response
|
Delete a key.
(ref)
Return None or LCDd response on error
|
entailment
|
def launch(self):
"""launch browser and virtual display, first of all to be launched"""
try:
# init virtual Display
self.vbro = Display()
self.vbro.start()
logger.debug("virtual display launched")
except Exception:
raise exceptions.VBroException()
try:
self.browser = Browser(self.brow_name)
logger.debug(f"browser {self.brow_name} launched")
except Exception:
raise exceptions.BrowserException(
self.brow_name, "failed to launch")
return True
|
launch browser and virtual display, first of all to be launched
|
entailment
|
def css(self, css_path, dom=None):
"""css find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_css, args=[css_path])
|
css find function abbreviation
|
entailment
|
def css1(self, css_path, dom=None):
"""return the first value of self.css"""
if dom is None:
dom = self.browser
def _css1(path, domm):
"""virtual local func"""
return self.css(path, domm)[0]
return expect(_css1, args=[css_path, dom])
|
return the first value of self.css
|
entailment
|
def search_name(self, name, dom=None):
"""name find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_name, args=[name])
|
name find function abbreviation
|
entailment
|
def xpath(self, xpath, dom=None):
"""xpath find function abbreviation"""
if dom is None:
dom = self.browser
return expect(dom.find_by_xpath, args=[xpath])
|
xpath find function abbreviation
|
entailment
|
def elCss(self, css_path, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_css, args=[css_path])
|
check if element is present by css
|
entailment
|
def elXpath(self, xpath, dom=None):
"""check if element is present by css"""
if dom is None:
dom = self.browser
return expect(dom.is_element_present_by_xpath, args=[xpath])
|
check if element is present by css
|
entailment
|
def login(self, username, password, mode="demo"):
"""login function"""
url = "https://trading212.com/it/login"
try:
logger.debug(f"visiting %s" % url)
self.browser.visit(url)
logger.debug(f"connected to %s" % url)
except selenium.common.exceptions.WebDriverException:
logger.critical("connection timed out")
raise
try:
self.search_name("login[username]").fill(username)
self.search_name("login[password]").fill(password)
self.css1(path['log']).click()
# define a timeout for logging in
timeout = time.time() + 30
while not self.elCss(path['logo']):
if time.time() > timeout:
logger.critical("login failed")
raise CredentialsException(username)
time.sleep(1)
logger.info(f"logged in as {username}")
# check if it's a weekend
if mode == "demo" and datetime.now().isoweekday() in range(5, 8):
timeout = time.time() + 10
while not self.elCss(path['alert-box']):
if time.time() > timeout:
logger.warning("weekend trading alert-box not closed")
break
if self.elCss(path['alert-box']):
self.css1(path['alert-box']).click()
logger.debug("weekend trading alert-box closed")
except Exception as e:
logger.critical("login failed")
raise exceptions.BaseExc(e)
return True
|
login function
|
entailment
|
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
|
logout func (quit browser)
|
entailment
|
def new_pos(self, html_div):
"""factory method pattern"""
pos = self.Position(self, html_div)
pos.bind_mov()
self.positions.append(pos)
return pos
|
factory method pattern
|
entailment
|
def _make_hostport(conn, default_host, default_port, default_user='', default_password=None):
"""Convert a '[user[:pass]@]host:port' string to a Connection tuple.
If the given connection is empty, use defaults.
If no port is given, use the default.
Args:
conn (str): the string describing the target hsot/port
default_host (str): the host to use if ``conn`` is empty
default_port (int): the port to use if not given in ``conn``.
Returns:
(str, int): a (host, port) tuple.
"""
parsed = urllib.parse.urlparse('//%s' % conn)
return Connection(
parsed.hostname or default_host,
parsed.port or default_port,
parsed.username if parsed.username is not None else default_user,
parsed.password if parsed.password is not None else default_password,
)
|
Convert a '[user[:pass]@]host:port' string to a Connection tuple.
If the given connection is empty, use defaults.
If no port is given, use the default.
Args:
conn (str): the string describing the target hsot/port
default_host (str): the host to use if ``conn`` is empty
default_port (int): the port to use if not given in ``conn``.
Returns:
(str, int): a (host, port) tuple.
|
entailment
|
def _make_lcdproc(
lcd_host, lcd_port, retry_config,
charset=DEFAULT_LCDPROC_CHARSET, lcdd_debug=False):
"""Create and connect to the LCDd server.
Args:
lcd_host (str): the hostname to connect to
lcd_prot (int): the port to connect to
charset (str): the charset to use when sending messages to lcdproc
lcdd_debug (bool): whether to enable full LCDd debug
retry_attempts (int): the number of connection attempts
retry_wait (int): the time to wait between connection attempts
retry_backoff (int): the backoff for increasing inter-attempt delay
Returns:
lcdproc.server.Server
"""
class ServerSpawner(utils.AutoRetryCandidate):
"""Spawn the server, using auto-retry."""
@utils.auto_retry
def connect(self):
return lcdrunner.LcdProcServer(
lcd_host, lcd_port, charset=charset, debug=lcdd_debug)
spawner = ServerSpawner(retry_config=retry_config, logger=logger)
try:
return spawner.connect()
except socket.error as e:
logger.error('Unable to connect to lcdproc %s:%s : %r', lcd_host, lcd_port, e)
raise SystemExit(1)
|
Create and connect to the LCDd server.
Args:
lcd_host (str): the hostname to connect to
lcd_prot (int): the port to connect to
charset (str): the charset to use when sending messages to lcdproc
lcdd_debug (bool): whether to enable full LCDd debug
retry_attempts (int): the number of connection attempts
retry_wait (int): the time to wait between connection attempts
retry_backoff (int): the backoff for increasing inter-attempt delay
Returns:
lcdproc.server.Server
|
entailment
|
def _make_patterns(patterns):
"""Create a ScreenPatternList from a given pattern text.
Args:
pattern_txt (str list): the patterns
Returns:
mpdlcd.display_pattern.ScreenPatternList: a list of patterns from the
given entries.
"""
field_registry = display_fields.FieldRegistry()
pattern_list = display_pattern.ScreenPatternList(
field_registry=field_registry,
)
for pattern in patterns:
pattern_list.add(pattern.split('\n'))
return pattern_list
|
Create a ScreenPatternList from a given pattern text.
Args:
pattern_txt (str list): the patterns
Returns:
mpdlcd.display_pattern.ScreenPatternList: a list of patterns from the
given entries.
|
entailment
|
def run_forever(
lcdproc='', mpd='', lcdproc_screen=DEFAULT_LCD_SCREEN_NAME,
lcdproc_charset=DEFAULT_LCDPROC_CHARSET,
lcdd_debug=False,
pattern='', patterns=[],
refresh=DEFAULT_REFRESH,
backlight_on=DEFAULT_BACKLIGHT_ON,
priority_playing=DEFAULT_PRIORITY,
priority_not_playing=DEFAULT_PRIORITY,
retry_attempts=DEFAULT_RETRY_ATTEMPTS,
retry_wait=DEFAULT_RETRY_WAIT,
retry_backoff=DEFAULT_RETRY_BACKOFF):
"""Run the server.
Args:
lcdproc (str): the target connection (host:port) for lcdproc
mpd (str): the target connection ([pwd@]host:port) for mpd
lcdproc_screen (str): the name of the screen to use for lcdproc
lcdproc_charset (str): the charset to use with lcdproc
lcdd_debug (bool): whether to enable full LCDd debug
pattern (str): the pattern to use
patterns (str list): the patterns to use
refresh (float): how often to refresh the display
backlight_on (str): the rules for activating backlight
retry_attempts (int): number of connection attempts
retry_wait (int): time between connection attempts
retry_backoff (int): increase to between-attempts delay
"""
# Compute host/ports
lcd_conn = _make_hostport(lcdproc, 'localhost', 13666)
mpd_conn = _make_hostport(mpd, 'localhost', 6600)
# Prepare auto-retry
retry_config = utils.AutoRetryConfig(
retry_attempts=retry_attempts,
retry_backoff=retry_backoff,
retry_wait=retry_wait)
# Setup MPD client
mpd_client = mpdwrapper.MPDClient(
host=mpd_conn.hostname,
port=mpd_conn.port,
password=mpd_conn.username,
retry_config=retry_config,
)
# Setup LCDd client
lcd = _make_lcdproc(
lcd_conn.hostname, lcd_conn.port,
lcdd_debug=lcdd_debug,
charset=lcdproc_charset,
retry_config=retry_config,
)
# Setup connector
runner = lcdrunner.MpdRunner(
mpd_client, lcd,
lcdproc_screen=lcdproc_screen,
refresh_rate=refresh,
retry_config=retry_config,
backlight_on=backlight_on,
priority_playing=priority_playing,
priority_not_playing=priority_not_playing,
)
# Fill pattern
if pattern:
# If a specific pattern was given, use it
patterns = [pattern]
elif not patterns:
# If no patterns were given, use the defaults
patterns = DEFAULT_PATTERNS
pattern_list = _make_patterns(patterns)
mpd_hook_registry = mpdhooks.HookRegistry()
runner.setup_pattern(pattern_list, hook_registry=mpd_hook_registry)
# Launch
mpd_client.connect()
runner.run()
# Exit
logging.shutdown()
|
Run the server.
Args:
lcdproc (str): the target connection (host:port) for lcdproc
mpd (str): the target connection ([pwd@]host:port) for mpd
lcdproc_screen (str): the name of the screen to use for lcdproc
lcdproc_charset (str): the charset to use with lcdproc
lcdd_debug (bool): whether to enable full LCDd debug
pattern (str): the pattern to use
patterns (str list): the patterns to use
refresh (float): how often to refresh the display
backlight_on (str): the rules for activating backlight
retry_attempts (int): number of connection attempts
retry_wait (int): time between connection attempts
retry_backoff (int): increase to between-attempts delay
|
entailment
|
def _read_config(filename):
"""Read configuration from the given file.
Parsing is performed through the configparser library.
Returns:
dict: a flattened dict of (option_name, value), using defaults.
"""
parser = configparser.RawConfigParser()
if filename and not parser.read(filename):
sys.stderr.write("Unable to open configuration file %s. Use --config='' to disable this warning.\n" % filename)
config = {}
for section, defaults in BASE_CONFIG.items():
# Patterns are handled separately
if section == 'patterns':
continue
for name, descr in defaults.items():
kind, default = descr
if section in parser.sections() and name in parser.options(section):
if kind == 'int':
value = parser.getint(section, name)
elif kind == 'float':
value = parser.getfloat(section, name)
elif kind == 'bool':
value = parser.getboolean(section, name)
else:
value = parser.get(section, name)
else:
value = default
config[name] = value
if 'patterns' in parser.sections():
patterns = [parser.get('patterns', opt) for opt in parser.options('patterns')]
else:
patterns = DEFAULT_PATTERNS
config['patterns'] = patterns
return config
|
Read configuration from the given file.
Parsing is performed through the configparser library.
Returns:
dict: a flattened dict of (option_name, value), using defaults.
|
entailment
|
def _extract_options(config, options, *args):
"""Extract options values from a configparser, optparse pair.
Options given on command line take precedence over options read in the
configuration file.
Args:
config (dict): option values read from a config file through
configparser
options (optparse.Options): optparse 'options' object containing options
values from the command line
*args (str tuple): name of the options to extract
"""
extract = {}
for key in args:
if key not in args:
continue
extract[key] = config[key]
option = getattr(options, key, None)
if option is not None:
extract[key] = option
return extract
|
Extract options values from a configparser, optparse pair.
Options given on command line take precedence over options read in the
configuration file.
Args:
config (dict): option values read from a config file through
configparser
options (optparse.Options): optparse 'options' object containing options
values from the command line
*args (str tuple): name of the options to extract
|
entailment
|
def resize(self, shape, format=None):
""" Set the render-buffer size and format
Parameters
----------
shape : tuple of integers
New shape in yx order. A render buffer is always 2D. For
symmetry with the texture class, a 3-element tuple can also
be given, in which case the last dimension is ignored.
format : {None, 'color', 'depth', 'stencil'}
The buffer format. If None, the current format is maintained.
If that is also None, the format will be set upon attaching
it to a framebuffer. One can also specify the explicit enum:
GL_RGB565, GL_RGBA4, GL_RGB5_A1, GL_DEPTH_COMPONENT16, or
GL_STENCIL_INDEX8
"""
if not self._resizeable:
raise RuntimeError("RenderBuffer is not resizeable")
# Check shape
if not (isinstance(shape, tuple) and len(shape) in (2, 3)):
raise ValueError('RenderBuffer shape must be a 2/3 element tuple')
# Check format
if format is None:
format = self._format # Use current format (may be None)
elif isinstance(format, int):
pass # Do not check, maybe user needs desktop GL formats
elif isinstance(format, string_types):
if format not in ('color', 'depth', 'stencil'):
raise ValueError('RenderBuffer format must be "color", "depth"'
' or "stencil", not %r' % format)
else:
raise ValueError('Invalid RenderBuffer format: %r' % format)
# Store and send GLIR command
self._shape = tuple(shape[:2])
self._format = format
if self._format is not None:
self._glir.command('SIZE', self._id, self._shape, self._format)
|
Set the render-buffer size and format
Parameters
----------
shape : tuple of integers
New shape in yx order. A render buffer is always 2D. For
symmetry with the texture class, a 3-element tuple can also
be given, in which case the last dimension is ignored.
format : {None, 'color', 'depth', 'stencil'}
The buffer format. If None, the current format is maintained.
If that is also None, the format will be set upon attaching
it to a framebuffer. One can also specify the explicit enum:
GL_RGB565, GL_RGBA4, GL_RGB5_A1, GL_DEPTH_COMPONENT16, or
GL_STENCIL_INDEX8
|
entailment
|
def activate(self):
""" Activate/use this frame buffer.
"""
# Send command
self._glir.command('FRAMEBUFFER', self._id, True)
# Associate canvas now
canvas = get_current_canvas()
if canvas is not None:
canvas.context.glir.associate(self.glir)
|
Activate/use this frame buffer.
|
entailment
|
def shape(self):
""" The shape of the Texture/RenderBuffer attached to this FrameBuffer
"""
if self.color_buffer is not None:
return self.color_buffer.shape[:2] # in case its a texture
if self.depth_buffer is not None:
return self.depth_buffer.shape[:2]
if self.stencil_buffer is not None:
return self.stencil_buffer.shape[:2]
raise RuntimeError('FrameBuffer without buffers has undefined shape')
|
The shape of the Texture/RenderBuffer attached to this FrameBuffer
|
entailment
|
def resize(self, shape):
""" Resize all attached buffers with the given shape
Parameters
----------
shape : tuple of two integers
New buffer shape (h, w), to be applied to all currently
attached buffers. For buffers that are a texture, the number
of color channels is preserved.
"""
# Check
if not (isinstance(shape, tuple) and len(shape) == 2):
raise ValueError('RenderBuffer shape must be a 2-element tuple')
# Resize our buffers
for buf in (self.color_buffer, self.depth_buffer, self.stencil_buffer):
if buf is None:
continue
shape_ = shape
if isinstance(buf, Texture2D):
shape_ = shape + (self.color_buffer.shape[-1], )
buf.resize(shape_, buf.format)
|
Resize all attached buffers with the given shape
Parameters
----------
shape : tuple of two integers
New buffer shape (h, w), to be applied to all currently
attached buffers. For buffers that are a texture, the number
of color channels is preserved.
|
entailment
|
def read(self, mode='color', alpha=True):
""" Return array of pixel values in an attached buffer
Parameters
----------
mode : str
The buffer type to read. May be 'color', 'depth', or 'stencil'.
alpha : bool
If True, returns RGBA array. Otherwise, returns RGB.
Returns
-------
buffer : array
3D array of pixels in np.uint8 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left
corner of the framebuffer at index [0, 0] in the returned array.
"""
_check_valid('mode', mode, ['color', 'depth', 'stencil'])
buffer = getattr(self, mode+'_buffer')
h, w = buffer.shape[:2]
# todo: this is ostensibly required, but not available in gloo.gl
#gl.glReadBuffer(buffer._target)
return read_pixels((0, 0, w, h), alpha=alpha)
|
Return array of pixel values in an attached buffer
Parameters
----------
mode : str
The buffer type to read. May be 'color', 'depth', or 'stencil'.
alpha : bool
If True, returns RGBA array. Otherwise, returns RGB.
Returns
-------
buffer : array
3D array of pixels in np.uint8 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left
corner of the framebuffer at index [0, 0] in the returned array.
|
entailment
|
def set_data(self, pos=None, color=None, width=None, connect=None):
""" Set the data used to draw this visual.
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
width:
The width of the line in px. Line widths < 1 px will be rounded up
to 1 px when using the 'gl' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* int numpy arrays specify the exact set of segment pairs to
connect.
* bool numpy arrays specify which _adjacent_ pairs to connect.
"""
if pos is not None:
self._bounds = None
self._pos = pos
self._changed['pos'] = True
if color is not None:
self._color = color
self._changed['color'] = True
if width is not None:
self._width = width
self._changed['width'] = True
if connect is not None:
self._connect = connect
self._changed['connect'] = True
self.update()
|
Set the data used to draw this visual.
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
width:
The width of the line in px. Line widths < 1 px will be rounded up
to 1 px when using the 'gl' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* int numpy arrays specify the exact set of segment pairs to
connect.
* bool numpy arrays specify which _adjacent_ pairs to connect.
|
entailment
|
def _compute_bounds(self, axis, view):
"""Get the bounds
Parameters
----------
mode : str
Describes the type of boundary requested. Can be "visual", "data",
or "mouse".
axis : 0, 1, 2
The axis along which to measure the bounding values, in
x-y-z order.
"""
# Can and should we calculate bounds?
if (self._bounds is None) and self._pos is not None:
pos = self._pos
self._bounds = [(pos[:, d].min(), pos[:, d].max())
for d in range(pos.shape[1])]
# Return what we can
if self._bounds is None:
return
else:
if axis < len(self._bounds):
return self._bounds[axis]
else:
return (0, 0)
|
Get the bounds
Parameters
----------
mode : str
Describes the type of boundary requested. Can be "visual", "data",
or "mouse".
axis : 0, 1, 2
The axis along which to measure the bounding values, in
x-y-z order.
|
entailment
|
def _agg_bake(cls, vertices, color, closed=False):
"""
Bake a list of 2D vertices for rendering them as thick line. Each line
segment must have its own vertices because of antialias (this means no
vertex sharing between two adjacent line segments).
"""
n = len(vertices)
P = np.array(vertices).reshape(n, 2).astype(float)
idx = np.arange(n) # used to eventually tile the color array
dx, dy = P[0] - P[-1]
d = np.sqrt(dx*dx+dy*dy)
# If closed, make sure first vertex = last vertex (+/- epsilon=1e-10)
if closed and d > 1e-10:
P = np.append(P, P[0]).reshape(n+1, 2)
idx = np.append(idx, idx[-1])
n += 1
V = np.zeros(len(P), dtype=cls._agg_vtype)
V['a_position'] = P
# Tangents & norms
T = P[1:] - P[:-1]
N = np.sqrt(T[:, 0]**2 + T[:, 1]**2)
# T /= N.reshape(len(T),1)
V['a_tangents'][+1:, :2] = T
V['a_tangents'][0, :2] = T[-1] if closed else T[0]
V['a_tangents'][:-1, 2:] = T
V['a_tangents'][-1, 2:] = T[0] if closed else T[-1]
# Angles
T1 = V['a_tangents'][:, :2]
T2 = V['a_tangents'][:, 2:]
A = np.arctan2(T1[:, 0]*T2[:, 1]-T1[:, 1]*T2[:, 0],
T1[:, 0]*T2[:, 0]+T1[:, 1]*T2[:, 1])
V['a_angles'][:-1, 0] = A[:-1]
V['a_angles'][:-1, 1] = A[+1:]
# Segment
L = np.cumsum(N)
V['a_segment'][+1:, 0] = L
V['a_segment'][:-1, 1] = L
# V['a_lengths'][:,2] = L[-1]
# Step 1: A -- B -- C => A -- B, B' -- C
V = np.repeat(V, 2, axis=0)[1:-1]
V['a_segment'][1:] = V['a_segment'][:-1]
V['a_angles'][1:] = V['a_angles'][:-1]
V['a_texcoord'][0::2] = -1
V['a_texcoord'][1::2] = +1
idx = np.repeat(idx, 2)[1:-1]
# Step 2: A -- B, B' -- C -> A0/A1 -- B0/B1, B'0/B'1 -- C0/C1
V = np.repeat(V, 2, axis=0)
V['a_texcoord'][0::2, 1] = -1
V['a_texcoord'][1::2, 1] = +1
idx = np.repeat(idx, 2)
I = np.resize(np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32),
(n-1)*(2*3))
I += np.repeat(4*np.arange(n-1, dtype=np.uint32), 6)
# Length
V['alength'] = L[-1] * np.ones(len(V))
# Color
if color.ndim == 1:
color = np.tile(color, (len(V), 1))
elif color.ndim == 2 and len(color) == n:
color = color[idx]
else:
raise ValueError('Color length %s does not match number of '
'vertices %s' % (len(color), n))
V['color'] = color
return V, I
|
Bake a list of 2D vertices for rendering them as thick line. Each line
segment must have its own vertices because of antialias (this means no
vertex sharing between two adjacent line segments).
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.