sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def has_face_color(self):
"""Return True if this data set has face color information"""
for v in (self._face_colors, self._face_colors_indexed_by_faces,
self._face_colors_indexed_by_edges):
if v is not None:
return True
return False
|
Return True if this data set has face color information
|
entailment
|
def get_face_normals(self, indexed=None):
"""Get face normals
Parameters
----------
indexed : str | None
If None, return an array (Nf, 3) of normal vectors for each face.
If 'faces', then instead return an indexed array (Nf, 3, 3)
(this is just the same array with each vector copied three times).
Returns
-------
normals : ndarray
The normals.
"""
if self._face_normals is None:
v = self.get_vertices(indexed='faces')
self._face_normals = np.cross(v[:, 1] - v[:, 0],
v[:, 2] - v[:, 0])
if indexed is None:
return self._face_normals
elif indexed == 'faces':
if self._face_normals_indexed_by_faces is None:
norms = np.empty((self._face_normals.shape[0], 3, 3),
dtype=np.float32)
norms[:] = self._face_normals[:, np.newaxis, :]
self._face_normals_indexed_by_faces = norms
return self._face_normals_indexed_by_faces
else:
raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
|
Get face normals
Parameters
----------
indexed : str | None
If None, return an array (Nf, 3) of normal vectors for each face.
If 'faces', then instead return an indexed array (Nf, 3, 3)
(this is just the same array with each vector copied three times).
Returns
-------
normals : ndarray
The normals.
|
entailment
|
def get_vertex_normals(self, indexed=None):
"""Get vertex normals
Parameters
----------
indexed : str | None
If None, return an (N, 3) array of normal vectors with one entry
per unique vertex in the mesh. If indexed is 'faces', then the
array will contain three normal vectors per face (and some
vertices may be repeated).
Returns
-------
normals : ndarray
The normals.
"""
if self._vertex_normals is None:
faceNorms = self.get_face_normals()
vertFaces = self.get_vertex_faces()
self._vertex_normals = np.empty(self._vertices.shape,
dtype=np.float32)
for vindex in xrange(self._vertices.shape[0]):
faces = vertFaces[vindex]
if len(faces) == 0:
self._vertex_normals[vindex] = (0, 0, 0)
continue
norms = faceNorms[faces] # get all face normals
norm = norms.sum(axis=0) # sum normals
renorm = (norm**2).sum()**0.5
if renorm > 0:
norm /= renorm
self._vertex_normals[vindex] = norm
if indexed is None:
return self._vertex_normals
elif indexed == 'faces':
return self._vertex_normals[self.get_faces()]
else:
raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
|
Get vertex normals
Parameters
----------
indexed : str | None
If None, return an (N, 3) array of normal vectors with one entry
per unique vertex in the mesh. If indexed is 'faces', then the
array will contain three normal vectors per face (and some
vertices may be repeated).
Returns
-------
normals : ndarray
The normals.
|
entailment
|
def get_vertex_colors(self, indexed=None):
"""Get vertex colors
Parameters
----------
indexed : str | None
If None, return an array (Nv, 4) of vertex colors.
If indexed=='faces', then instead return an indexed array
(Nf, 3, 4).
Returns
-------
colors : ndarray
The vertex colors.
"""
if indexed is None:
return self._vertex_colors
elif indexed == 'faces':
if self._vertex_colors_indexed_by_faces is None:
self._vertex_colors_indexed_by_faces = \
self._vertex_colors[self.get_faces()]
return self._vertex_colors_indexed_by_faces
else:
raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
|
Get vertex colors
Parameters
----------
indexed : str | None
If None, return an array (Nv, 4) of vertex colors.
If indexed=='faces', then instead return an indexed array
(Nf, 3, 4).
Returns
-------
colors : ndarray
The vertex colors.
|
entailment
|
def set_vertex_colors(self, colors, indexed=None):
"""Set the vertex color array
Parameters
----------
colors : array
Array of colors. Must have shape (Nv, 4) (indexing by vertex)
or shape (Nf, 3, 4) (vertices indexed by face).
indexed : str | None
Should be 'faces' if colors are indexed by faces.
"""
colors = _fix_colors(np.asarray(colors))
if indexed is None:
if colors.ndim != 2:
raise ValueError('colors must be 2D if indexed is None')
if colors.shape[0] != self.n_vertices:
raise ValueError('incorrect number of colors %s, expected %s'
% (colors.shape[0], self.n_vertices))
self._vertex_colors = colors
self._vertex_colors_indexed_by_faces = None
elif indexed == 'faces':
if colors.ndim != 3:
raise ValueError('colors must be 3D if indexed is "faces"')
if colors.shape[0] != self.n_faces:
raise ValueError('incorrect number of faces')
self._vertex_colors = None
self._vertex_colors_indexed_by_faces = colors
else:
raise ValueError('indexed must be None or "faces"')
|
Set the vertex color array
Parameters
----------
colors : array
Array of colors. Must have shape (Nv, 4) (indexing by vertex)
or shape (Nf, 3, 4) (vertices indexed by face).
indexed : str | None
Should be 'faces' if colors are indexed by faces.
|
entailment
|
def get_face_colors(self, indexed=None):
"""Get the face colors
Parameters
----------
indexed : str | None
If indexed is None, return (Nf, 4) array of face colors.
If indexed=='faces', then instead return an indexed array
(Nf, 3, 4) (note this is just the same array with each color
repeated three times).
Returns
-------
colors : ndarray
The colors.
"""
if indexed is None:
return self._face_colors
elif indexed == 'faces':
if (self._face_colors_indexed_by_faces is None and
self._face_colors is not None):
Nf = self._face_colors.shape[0]
self._face_colors_indexed_by_faces = \
np.empty((Nf, 3, 4), dtype=self._face_colors.dtype)
self._face_colors_indexed_by_faces[:] = \
self._face_colors.reshape(Nf, 1, 4)
return self._face_colors_indexed_by_faces
else:
raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
|
Get the face colors
Parameters
----------
indexed : str | None
If indexed is None, return (Nf, 4) array of face colors.
If indexed=='faces', then instead return an indexed array
(Nf, 3, 4) (note this is just the same array with each color
repeated three times).
Returns
-------
colors : ndarray
The colors.
|
entailment
|
def set_face_colors(self, colors, indexed=None):
"""Set the face color array
Parameters
----------
colors : array
Array of colors. Must have shape (Nf, 4) (indexed by face),
or shape (Nf, 3, 4) (face colors indexed by faces).
indexed : str | None
Should be 'faces' if colors are indexed by faces.
"""
colors = _fix_colors(colors)
if colors.shape[0] != self.n_faces:
raise ValueError('incorrect number of colors %s, expected %s'
% (colors.shape[0], self.n_faces))
if indexed is None:
if colors.ndim != 2:
raise ValueError('colors must be 2D if indexed is None')
self._face_colors = colors
self._face_colors_indexed_by_faces = None
elif indexed == 'faces':
if colors.ndim != 3:
raise ValueError('colors must be 3D if indexed is "faces"')
self._face_colors = None
self._face_colors_indexed_by_faces = colors
else:
raise ValueError('indexed must be None or "faces"')
|
Set the face color array
Parameters
----------
colors : array
Array of colors. Must have shape (Nf, 4) (indexed by face),
or shape (Nf, 3, 4) (face colors indexed by faces).
indexed : str | None
Should be 'faces' if colors are indexed by faces.
|
entailment
|
def n_faces(self):
"""The number of faces in the mesh"""
if self._faces is not None:
return self._faces.shape[0]
elif self._vertices_indexed_by_faces is not None:
return self._vertices_indexed_by_faces.shape[0]
|
The number of faces in the mesh
|
entailment
|
def get_vertex_faces(self):
"""
List mapping each vertex index to a list of face indices that use it.
"""
if self._vertex_faces is None:
self._vertex_faces = [[] for i in xrange(len(self.get_vertices()))]
for i in xrange(self._faces.shape[0]):
face = self._faces[i]
for ind in face:
self._vertex_faces[ind].append(i)
return self._vertex_faces
|
List mapping each vertex index to a list of face indices that use it.
|
entailment
|
def save(self):
"""Serialize this mesh to a string appropriate for disk storage
Returns
-------
state : dict
The state.
"""
import pickle
if self._faces is not None:
names = ['_vertices', '_faces']
else:
names = ['_vertices_indexed_by_faces']
if self._vertex_colors is not None:
names.append('_vertex_colors')
elif self._vertex_colors_indexed_by_faces is not None:
names.append('_vertex_colors_indexed_by_faces')
if self._face_colors is not None:
names.append('_face_colors')
elif self._face_colors_indexed_by_faces is not None:
names.append('_face_colors_indexed_by_faces')
state = dict([(n, getattr(self, n)) for n in names])
return pickle.dumps(state)
|
Serialize this mesh to a string appropriate for disk storage
Returns
-------
state : dict
The state.
|
entailment
|
def restore(self, state):
"""Restore the state of a mesh previously saved using save()
Parameters
----------
state : dict
The previous state.
"""
import pickle
state = pickle.loads(state)
for k in state:
if isinstance(state[k], list):
state[k] = np.array(state[k])
setattr(self, k, state[k])
|
Restore the state of a mesh previously saved using save()
Parameters
----------
state : dict
The previous state.
|
entailment
|
def cubehelix(start=0.5, rot=1, gamma=1.0, reverse=True, nlev=256.,
minSat=1.2, maxSat=1.2, minLight=0., maxLight=1.,
**kwargs):
"""
A full implementation of Dave Green's "cubehelix" for Matplotlib.
Based on the FORTRAN 77 code provided in
D.A. Green, 2011, BASI, 39, 289.
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
User can adjust all parameters of the cubehelix algorithm.
This enables much greater flexibility in choosing color maps, while
always ensuring the color map scales in intensity from black
to white. A few simple examples:
Default color map settings produce the standard "cubehelix".
Create color map in only blues by setting rot=0 and start=0.
Create reverse (white to black) backwards through the rainbow once
by setting rot=1 and reverse=True.
Parameters
----------
start : scalar, optional
Sets the starting position in the color space. 0=blue, 1=red,
2=green. Defaults to 0.5.
rot : scalar, optional
The number of rotations through the rainbow. Can be positive
or negative, indicating direction of rainbow. Negative values
correspond to Blue->Red direction. Defaults to -1.5
gamma : scalar, optional
The gamma correction for intensity. Defaults to 1.0
reverse : boolean, optional
Set to True to reverse the color map. Will go from black to
white. Good for density plots where shade~density. Defaults to False
nlev : scalar, optional
Defines the number of discrete levels to render colors at.
Defaults to 256.
sat : scalar, optional
The saturation intensity factor. Defaults to 1.2
NOTE: this was formerly known as "hue" parameter
minSat : scalar, optional
Sets the minimum-level saturation. Defaults to 1.2
maxSat : scalar, optional
Sets the maximum-level saturation. Defaults to 1.2
startHue : scalar, optional
Sets the starting color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in start parameter
endHue : scalar, optional
Sets the ending color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in rot parameter
minLight : scalar, optional
Sets the minimum lightness value. Defaults to 0.
maxLight : scalar, optional
Sets the maximum lightness value. Defaults to 1.
Returns
-------
data : ndarray, shape (N, 3)
Control points.
"""
# override start and rot if startHue and endHue are set
if kwargs is not None:
if 'startHue' in kwargs:
start = (kwargs.get('startHue') / 360. - 1.) * 3.
if 'endHue' in kwargs:
rot = kwargs.get('endHue') / 360. - start / 3. - 1.
if 'sat' in kwargs:
minSat = kwargs.get('sat')
maxSat = kwargs.get('sat')
# set up the parameters
fract = np.linspace(minLight, maxLight, nlev)
angle = 2.0 * pi * (start / 3.0 + rot * fract + 1.)
fract = fract**gamma
satar = np.linspace(minSat, maxSat, nlev)
amp = satar * fract * (1. - fract) / 2.
# compute the RGB vectors according to main equations
red = fract + amp * (-0.14861 * np.cos(angle) + 1.78277 * np.sin(angle))
grn = fract + amp * (-0.29227 * np.cos(angle) - 0.90649 * np.sin(angle))
blu = fract + amp * (1.97294 * np.cos(angle))
# find where RBB are outside the range [0,1], clip
red[np.where((red > 1.))] = 1.
grn[np.where((grn > 1.))] = 1.
blu[np.where((blu > 1.))] = 1.
red[np.where((red < 0.))] = 0.
grn[np.where((grn < 0.))] = 0.
blu[np.where((blu < 0.))] = 0.
# optional color reverse
if reverse is True:
red = red[::-1]
blu = blu[::-1]
grn = grn[::-1]
return np.array((red, grn, blu)).T
|
A full implementation of Dave Green's "cubehelix" for Matplotlib.
Based on the FORTRAN 77 code provided in
D.A. Green, 2011, BASI, 39, 289.
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
User can adjust all parameters of the cubehelix algorithm.
This enables much greater flexibility in choosing color maps, while
always ensuring the color map scales in intensity from black
to white. A few simple examples:
Default color map settings produce the standard "cubehelix".
Create color map in only blues by setting rot=0 and start=0.
Create reverse (white to black) backwards through the rainbow once
by setting rot=1 and reverse=True.
Parameters
----------
start : scalar, optional
Sets the starting position in the color space. 0=blue, 1=red,
2=green. Defaults to 0.5.
rot : scalar, optional
The number of rotations through the rainbow. Can be positive
or negative, indicating direction of rainbow. Negative values
correspond to Blue->Red direction. Defaults to -1.5
gamma : scalar, optional
The gamma correction for intensity. Defaults to 1.0
reverse : boolean, optional
Set to True to reverse the color map. Will go from black to
white. Good for density plots where shade~density. Defaults to False
nlev : scalar, optional
Defines the number of discrete levels to render colors at.
Defaults to 256.
sat : scalar, optional
The saturation intensity factor. Defaults to 1.2
NOTE: this was formerly known as "hue" parameter
minSat : scalar, optional
Sets the minimum-level saturation. Defaults to 1.2
maxSat : scalar, optional
Sets the maximum-level saturation. Defaults to 1.2
startHue : scalar, optional
Sets the starting color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in start parameter
endHue : scalar, optional
Sets the ending color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in rot parameter
minLight : scalar, optional
Sets the minimum lightness value. Defaults to 0.
maxLight : scalar, optional
Sets the maximum lightness value. Defaults to 1.
Returns
-------
data : ndarray, shape (N, 3)
Control points.
|
entailment
|
def color_to_hex(color):
"""Convert matplotlib color code to hex color code"""
if color is None or colorConverter.to_rgba(color)[3] == 0:
return 'none'
else:
rgb = colorConverter.to_rgb(color)
return '#{0:02X}{1:02X}{2:02X}'.format(*(int(255 * c) for c in rgb))
|
Convert matplotlib color code to hex color code
|
entailment
|
def _many_to_one(input_dict):
"""Convert a many-to-one mapping to a one-to-one mapping"""
return dict((key, val)
for keys, val in input_dict.items()
for key in keys)
|
Convert a many-to-one mapping to a one-to-one mapping
|
entailment
|
def get_dasharray(obj):
"""Get an SVG dash array for the given matplotlib linestyle
Parameters
----------
obj : matplotlib object
The matplotlib line or path object, which must have a get_linestyle()
method which returns a valid matplotlib line code
Returns
-------
dasharray : string
The HTML/SVG dasharray code associated with the object.
"""
if obj.__dict__.get('_dashSeq', None) is not None:
return ','.join(map(str, obj._dashSeq))
else:
ls = obj.get_linestyle()
dasharray = LINESTYLES.get(ls, 'not found')
if dasharray == 'not found':
warnings.warn("line style '{0}' not understood: "
"defaulting to solid line.".format(ls))
dasharray = LINESTYLES['solid']
return dasharray
|
Get an SVG dash array for the given matplotlib linestyle
Parameters
----------
obj : matplotlib object
The matplotlib line or path object, which must have a get_linestyle()
method which returns a valid matplotlib line code
Returns
-------
dasharray : string
The HTML/SVG dasharray code associated with the object.
|
entailment
|
def SVG_path(path, transform=None, simplify=False):
"""Construct the vertices and SVG codes for the path
Parameters
----------
path : matplotlib.Path object
transform : matplotlib transform (optional)
if specified, the path will be transformed before computing the output.
Returns
-------
vertices : array
The shape (M, 2) array of vertices of the Path. Note that some Path
codes require multiple vertices, so the length of these vertices may
be longer than the list of path codes.
path_codes : list
A length N list of single-character path codes, N <= M. Each code is
a single character, in ['L','M','S','C','Z']. See the standard SVG
path specification for a description of these.
"""
if transform is not None:
path = path.transformed(transform)
vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [],
PATH_DICT[path_code])
for (vertices, path_code)
in path.iter_segments(simplify=simplify)]
if not vc_tuples:
# empty path is a special case
return np.zeros((0, 2)), []
else:
vertices, codes = zip(*vc_tuples)
vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2)
return vertices, list(codes)
|
Construct the vertices and SVG codes for the path
Parameters
----------
path : matplotlib.Path object
transform : matplotlib transform (optional)
if specified, the path will be transformed before computing the output.
Returns
-------
vertices : array
The shape (M, 2) array of vertices of the Path. Note that some Path
codes require multiple vertices, so the length of these vertices may
be longer than the list of path codes.
path_codes : list
A length N list of single-character path codes, N <= M. Each code is
a single character, in ['L','M','S','C','Z']. See the standard SVG
path specification for a description of these.
|
entailment
|
def get_path_style(path, fill=True):
"""Get the style dictionary for matplotlib path objects"""
style = {}
style['alpha'] = path.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['edgecolor'] = color_to_hex(path.get_edgecolor())
if fill:
style['facecolor'] = color_to_hex(path.get_facecolor())
else:
style['facecolor'] = 'none'
style['edgewidth'] = path.get_linewidth()
style['dasharray'] = get_dasharray(path)
style['zorder'] = path.get_zorder()
return style
|
Get the style dictionary for matplotlib path objects
|
entailment
|
def get_line_style(line):
"""Get the style dictionary for matplotlib line objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style
|
Get the style dictionary for matplotlib line objects
|
entailment
|
def get_marker_style(line):
"""Get the style dictionary for matplotlib marker objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['facecolor'] = color_to_hex(line.get_markerfacecolor())
style['edgecolor'] = color_to_hex(line.get_markeredgecolor())
style['edgewidth'] = line.get_markeredgewidth()
style['marker'] = line.get_marker()
markerstyle = MarkerStyle(line.get_marker())
markersize = line.get_markersize()
markertransform = (markerstyle.get_transform() +
Affine2D().scale(markersize, -markersize))
style['markerpath'] = SVG_path(markerstyle.get_path(),
markertransform)
style['markersize'] = markersize
style['zorder'] = line.get_zorder()
return style
|
Get the style dictionary for matplotlib marker objects
|
entailment
|
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['malign'] = text._multialignment # text alignment when '\n' in text
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style
|
Return the text style dict for a text instance
|
entailment
|
def get_axis_properties(axis):
"""Return the property dictionary for a matplotlib.Axis instance"""
props = {}
label1On = axis._major_tick_kw.get('label1On', True)
if isinstance(axis, matplotlib.axis.XAxis):
if label1On:
props['position'] = "bottom"
else:
props['position'] = "top"
elif isinstance(axis, matplotlib.axis.YAxis):
if label1On:
props['position'] = "left"
else:
props['position'] = "right"
else:
raise ValueError("{0} should be an Axis instance".format(axis))
# Use tick values if appropriate
locator = axis.get_major_locator()
props['nticks'] = len(locator())
if isinstance(locator, ticker.FixedLocator):
props['tickvalues'] = list(locator())
else:
props['tickvalues'] = None
# Find tick formats
formatter = axis.get_major_formatter()
if isinstance(formatter, ticker.NullFormatter):
props['tickformat'] = ""
elif isinstance(formatter, ticker.FixedFormatter):
props['tickformat'] = list(formatter.seq)
elif not any(label.get_visible() for label in axis.get_ticklabels()):
props['tickformat'] = ""
else:
props['tickformat'] = None
# Get axis scale
props['scale'] = axis.get_scale()
# Get major tick label size (assumes that's all we really care about!)
labels = axis.get_ticklabels()
if labels:
props['fontsize'] = labels[0].get_fontsize()
else:
props['fontsize'] = None
# Get associated grid
props['grid'] = get_grid_style(axis)
return props
|
Return the property dictionary for a matplotlib.Axis instance
|
entailment
|
def iter_all_children(obj, skipContainers=False):
"""
Returns an iterator over all childen and nested children using
obj's get_children() method
if skipContainers is true, only childless objects are returned.
"""
if hasattr(obj, 'get_children') and len(obj.get_children()) > 0:
for child in obj.get_children():
if not skipContainers:
yield child
# could use `yield from` in python 3...
for grandchild in iter_all_children(child, skipContainers):
yield grandchild
else:
yield obj
|
Returns an iterator over all childen and nested children using
obj's get_children() method
if skipContainers is true, only childless objects are returned.
|
entailment
|
def image_to_base64(image):
"""
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
"""
ax = image.axes
binary_buffer = io.BytesIO()
# image is saved in axes coordinates: we need to temporarily
# set the correct limits to get the correct image
lim = ax.axis()
ax.axis(image.get_extent())
image.write_png(binary_buffer)
ax.axis(lim)
binary_buffer.seek(0)
return base64.b64encode(binary_buffer.read()).decode('utf-8')
|
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
|
entailment
|
def set_interactive(enabled=True, app=None):
"""Activate the IPython hook for VisPy. If the app is not specified, the
default is used.
"""
if enabled:
inputhook_manager.enable_gui('vispy', app)
else:
inputhook_manager.disable_gui()
|
Activate the IPython hook for VisPy. If the app is not specified, the
default is used.
|
entailment
|
def _resize_buffers(self, font_scale):
"""Resize buffers only if necessary"""
new_sizes = (font_scale,) + self.size
if new_sizes == self._current_sizes: # don't need resize
return
self._n_rows = int(max(self.size[1] /
(self._char_height * font_scale), 1))
self._n_cols = int(max(self.size[0] /
(self._char_width * font_scale), 1))
self._bytes_012 = np.zeros((self._n_rows, self._n_cols, 3), np.float32)
self._bytes_345 = np.zeros((self._n_rows, self._n_cols, 3), np.float32)
pos = np.empty((self._n_rows, self._n_cols, 2), np.float32)
C, R = np.meshgrid(np.arange(self._n_cols), np.arange(self._n_rows))
# We are in left, top orientation
x_off = 4.
y_off = 4 - self.size[1] / font_scale
pos[..., 0] = x_off + self._char_width * C
pos[..., 1] = y_off + self._char_height * R
self._position = VertexBuffer(pos)
# Restore lines
for ii, line in enumerate(self._text_lines[:self._n_rows]):
self._insert_text_buf(line, ii)
self._current_sizes = new_sizes
|
Resize buffers only if necessary
|
entailment
|
def clear(self):
"""Clear the console"""
if hasattr(self, '_bytes_012'):
self._bytes_012.fill(0)
self._bytes_345.fill(0)
self._text_lines = [] * self._n_rows
self._pending_writes = []
|
Clear the console
|
entailment
|
def write(self, text='', wrap=True):
"""Write text and scroll
Parameters
----------
text : str
Text to write. ``''`` can be used for a blank line, as a newline
is automatically added to the end of each line.
wrap : str
If True, long messages will be wrapped to span multiple lines.
"""
# Clear line
if not isinstance(text, string_types):
raise TypeError('text must be a string')
# ensure we only have ASCII chars
text = text.encode('utf-8').decode('ascii', errors='replace')
self._pending_writes.append((text, wrap))
self.update()
|
Write text and scroll
Parameters
----------
text : str
Text to write. ``''`` can be used for a blank line, as a newline
is automatically added to the end of each line.
wrap : str
If True, long messages will be wrapped to span multiple lines.
|
entailment
|
def _do_pending_writes(self):
"""Do any pending text writes"""
for text, wrap in self._pending_writes:
# truncate in case of *really* long messages
text = text[-self._n_cols*self._n_rows:]
text = text.split('\n')
text = [t if len(t) > 0 else '' for t in text]
nr, nc = self._n_rows, self._n_cols
for para in text:
para = para[:nc] if not wrap else para
lines = [para[ii:(ii+nc)] for ii in range(0, len(para), nc)]
lines = [''] if len(lines) == 0 else lines
for line in lines:
# Update row and scroll if necessary
self._text_lines.insert(0, line)
self._text_lines = self._text_lines[:nr]
self._bytes_012[1:] = self._bytes_012[:-1]
self._bytes_345[1:] = self._bytes_345[:-1]
self._insert_text_buf(line, 0)
self._pending_writes = []
|
Do any pending text writes
|
entailment
|
def _insert_text_buf(self, line, idx):
"""Insert text into bytes buffers"""
self._bytes_012[idx] = 0
self._bytes_345[idx] = 0
# Crop text if necessary
I = np.array([ord(c) - 32 for c in line[:self._n_cols]])
I = np.clip(I, 0, len(__font_6x8__)-1)
if len(I) > 0:
b = __font_6x8__[I]
self._bytes_012[idx, :len(I)] = b[:, :3]
self._bytes_345[idx, :len(I)] = b[:, 3:]
|
Insert text into bytes buffers
|
entailment
|
def replace(self, str1, str2):
""" Set verbatim code replacement
It is strongly recommended to use function['$foo'] = 'bar' where
possible because template variables are less likely to changed
than the code itself in future versions of vispy.
Parameters
----------
str1 : str
String to replace
str2 : str
String to replace str1 with
"""
if str2 != self._replacements.get(str1, None):
self._replacements[str1] = str2
self.changed(code_changed=True)
|
Set verbatim code replacement
It is strongly recommended to use function['$foo'] = 'bar' where
possible because template variables are less likely to changed
than the code itself in future versions of vispy.
Parameters
----------
str1 : str
String to replace
str2 : str
String to replace str1 with
|
entailment
|
def _parse_template_vars(self):
""" find all template variables in self._code, excluding the
function name.
"""
template_vars = set()
for var in parsing.find_template_variables(self._code):
var = var.lstrip('$')
if var == self.name:
continue
if var in ('pre', 'post'):
raise ValueError('GLSL uses reserved template variable $%s' %
var)
template_vars.add(var)
return template_vars
|
find all template variables in self._code, excluding the
function name.
|
entailment
|
def _get_replaced_code(self, names):
""" Return code, with new name, expressions, and replacements applied.
"""
code = self._code
# Modify name
fname = names[self]
code = code.replace(" " + self.name + "(", " " + fname + "(")
# Apply string replacements first -- these may contain $placeholders
for key, val in self._replacements.items():
code = code.replace(key, val)
# Apply assignments to the end of the function
# Collect post lines
post_lines = []
for key, val in self._assignments.items():
if isinstance(key, Variable):
key = names[key]
if isinstance(val, ShaderObject):
val = val.expression(names)
line = ' %s = %s;' % (key, val)
post_lines.append(line)
# Add a default $post placeholder if needed
if 'post' in self._expressions:
post_lines.append(' $post')
# Apply placeholders for hooks
post_text = '\n'.join(post_lines)
if post_text:
post_text = '\n' + post_text + '\n'
code = code.rpartition('}')
code = code[0] + post_text + code[1] + code[2]
# Add a default $pre placeholder if needed
if 'pre' in self._expressions:
m = re.search(fname + r'\s*\([^{]*\)\s*{', code)
if m is None:
raise RuntimeError("Cound not find beginning of function '%s'"
% fname)
ind = m.span()[1]
code = code[:ind] + "\n $pre\n" + code[ind:]
# Apply template variables
for key, val in self._expressions.items():
val = val.expression(names)
search = r'\$' + key + r'($|[^a-zA-Z0-9_])'
code = re.sub(search, val+r'\1', code)
# Done
if '$' in code:
v = parsing.find_template_variables(code)
logger.warning('Unsubstituted placeholders in code: %s\n'
' replacements made: %s',
v, list(self._expressions.keys()))
return code + '\n'
|
Return code, with new name, expressions, and replacements applied.
|
entailment
|
def _clean_code(self, code):
""" Return *code* with indentation and leading/trailing blank lines
removed.
"""
lines = code.split("\n")
min_indent = 100
for line in lines:
if line.strip() != "":
indent = len(line) - len(line.lstrip())
min_indent = min(indent, min_indent)
if min_indent > 0:
lines = [line[min_indent:] for line in lines]
code = "\n".join(lines)
return code
|
Return *code* with indentation and leading/trailing blank lines
removed.
|
entailment
|
def add_chain(self, var):
"""
Create a new ChainFunction and attach to $var.
"""
chain = FunctionChain(var, [])
self._chains[var] = chain
self[var] = chain
|
Create a new ChainFunction and attach to $var.
|
entailment
|
def append(self, function, update=True):
""" Append a new function to the end of this chain.
"""
self._funcs.append(function)
self._add_dep(function)
if update:
self._update()
|
Append a new function to the end of this chain.
|
entailment
|
def insert(self, index, function, update=True):
""" Insert a new function into the chain at *index*.
"""
self._funcs.insert(index, function)
self._add_dep(function)
if update:
self._update()
|
Insert a new function into the chain at *index*.
|
entailment
|
def remove(self, function, update=True):
""" Remove a function from the chain.
"""
self._funcs.remove(function)
self._remove_dep(function)
if update:
self._update()
|
Remove a function from the chain.
|
entailment
|
def add(self, item, position=5):
"""Add an item to the list unless it is already present.
If the item is an expression, then a semicolon will be appended to it
in the final compiled code.
"""
if item in self.items:
return
self.items[item] = position
self._add_dep(item)
self.order = None
self.changed(code_changed=True)
|
Add an item to the list unless it is already present.
If the item is an expression, then a semicolon will be appended to it
in the final compiled code.
|
entailment
|
def remove(self, item):
"""Remove an item from the list.
"""
self.items.pop(item)
self._remove_dep(item)
self.order = None
self.changed(code_changed=True)
|
Remove an item from the list.
|
entailment
|
def faces(self):
"""Return an array (Nf, 3) of vertex indexes, three per triangular
face in the mesh.
If faces have not been computed for this mesh, the function
computes them.
If no vertices or faces are specified, the function returns None.
"""
if self._faces is None:
if self._vertices is None:
return None
self.triangulate()
return self._faces
|
Return an array (Nf, 3) of vertex indexes, three per triangular
face in the mesh.
If faces have not been computed for this mesh, the function
computes them.
If no vertices or faces are specified, the function returns None.
|
entailment
|
def vertices(self):
"""Return an array (Nf, 3) of vertices.
If only faces exist, the function computes the vertices and
returns them.
If no vertices or faces are specified, the function returns None.
"""
if self._faces is None:
if self._vertices is None:
return None
self.triangulate()
return self._vertices
|
Return an array (Nf, 3) of vertices.
If only faces exist, the function computes the vertices and
returns them.
If no vertices or faces are specified, the function returns None.
|
entailment
|
def convex_hull(self):
"""Return an array of vertex indexes representing the convex hull.
If faces have not been computed for this mesh, the function
computes them.
If no vertices or faces are specified, the function returns None.
"""
if self._faces is None:
if self._vertices is None:
return None
self.triangulate()
return self._convex_hull
|
Return an array of vertex indexes representing the convex hull.
If faces have not been computed for this mesh, the function
computes them.
If no vertices or faces are specified, the function returns None.
|
entailment
|
def triangulate(self):
"""
Triangulates the set of vertices and stores the triangles in faces and
the convex hull in convex_hull.
"""
npts = self._vertices.shape[0]
if np.any(self._vertices[0] != self._vertices[1]):
# start != end, so edges must wrap around to beginning.
edges = np.empty((npts, 2), dtype=np.uint32)
edges[:, 0] = np.arange(npts)
edges[:, 1] = edges[:, 0] + 1
edges[-1, 1] = 0
else:
# start == end; no wrapping required.
edges = np.empty((npts-1, 2), dtype=np.uint32)
edges[:, 0] = np.arange(npts)
edges[:, 1] = edges[:, 0] + 1
tri = Triangulation(self._vertices, edges)
tri.triangulate()
return tri.pts, tri.tris
|
Triangulates the set of vertices and stores the triangles in faces and
the convex hull in convex_hull.
|
entailment
|
def find(name):
"""Locate a filename into the shader library."""
if op.exists(name):
return name
path = op.dirname(__file__) or '.'
paths = [path] + config['include_path']
for path in paths:
filename = op.abspath(op.join(path, name))
if op.exists(filename):
return filename
for d in os.listdir(path):
fullpath = op.abspath(op.join(path, d))
if op.isdir(fullpath):
filename = op.abspath(op.join(fullpath, name))
if op.exists(filename):
return filename
return None
|
Locate a filename into the shader library.
|
entailment
|
def get(name):
"""Retrieve code from the given filename."""
filename = find(name)
if filename is None:
raise RuntimeError('Could not find %s' % name)
with open(filename) as fid:
return fid.read()
|
Retrieve code from the given filename.
|
entailment
|
def expect(func, args, times=7, sleep_t=0.5):
"""try many times as in times with sleep time"""
while times > 0:
try:
return func(*args)
except Exception as e:
times -= 1
logger.debug("expect failed - attempts left: %d" % times)
time.sleep(sleep_t)
if times == 0:
raise exceptions.BaseExc(e)
|
try many times as in times with sleep time
|
entailment
|
def num(string):
"""convert a string to float"""
if not isinstance(string, type('')):
raise ValueError(type(''))
try:
string = re.sub('[^a-zA-Z0-9\.\-]', '', string)
number = re.findall(r"[-+]?\d*\.\d+|[-+]?\d+", string)
return float(number[0])
except Exception as e:
logger = logging.getLogger('tradingAPI.utils.num')
logger.debug("number not found in %s" % string)
logger.debug(e)
return None
|
convert a string to float
|
entailment
|
def get_number_unit(number):
"""get the unit of number"""
n = str(float(number))
mult, submult = n.split('.')
if float(submult) != 0:
unit = '0.' + (len(submult)-1)*'0' + '1'
return float(unit)
else:
return float(1)
|
get the unit of number
|
entailment
|
def get_pip(mov=None, api=None, name=None):
"""get value of pip"""
# ~ check args
if mov is None and api is None:
logger.error("need at least one of those")
raise ValueError()
elif mov is not None and api is not None:
logger.error("mov and api are exclusive")
raise ValueError()
if api is not None:
if name is None:
logger.error("need a name")
raise ValueError()
mov = api.new_mov(name)
mov.open()
if mov is not None:
mov._check_open()
# find in the collection
try:
logger.debug(len(Glob().theCollector.collection))
pip = Glob().theCollector.collection['pip']
if name is not None:
pip_res = pip[name]
elif mov is not None:
pip_res = pip[mov.product]
logger.debug("pip found in the collection")
return pip_res
except KeyError:
logger.debug("pip not found in the collection")
# ~ vars
records = []
intervals = [10, 20, 30]
def _check_price(interval=10):
timeout = time.time() + interval
while time.time() < timeout:
records.append(mov.get_price())
time.sleep(0.5)
# find variation
for interval in intervals:
_check_price(interval)
if min(records) == max(records):
logger.debug("no variation in %d seconds" % interval)
if interval == intervals[-1]:
raise TimeoutError("no variation")
else:
break
# find longer price
for price in records:
if 'best_price' not in locals():
best_price = price
if len(str(price)) > len(str(best_price)):
logger.debug("found new best_price %f" % price)
best_price = price
# get pip
pip = get_number_unit(best_price)
Glob().pipHandler.add_val({mov.product: pip})
return pip
|
get value of pip
|
entailment
|
def itemsize(self):
""" Individual item sizes """
return self._items[:self._count, 1] - self._items[:self._count, 0]
|
Individual item sizes
|
entailment
|
def reserve(self, capacity):
""" Set current capacity of the underlying array"""
if capacity >= self._data.size:
capacity = int(2 ** np.ceil(np.log2(capacity)))
self._data = np.resize(self._data, capacity)
|
Set current capacity of the underlying array
|
entailment
|
def insert(self, index, data, itemsize=None):
""" Insert data before index
Parameters
----------
index : int
Index before which data will be inserted.
data : array_like
An array, any object exposing the array interface, an object
whose __array__ method returns an array, or any (nested) sequence.
itemsize: int or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
"""
if not self._sizeable:
raise AttributeError("List is not sizeable")
if isinstance(data, (list, tuple)) and isinstance(data[0], (list, tuple)): # noqa
itemsize = [len(l) for l in data]
data = [item for sublist in data for item in sublist]
data = np.array(data, copy=False).ravel()
size = data.size
# Check item size and get item number
if itemsize is not None:
if isinstance(itemsize, int):
if (size % itemsize) != 0:
raise ValueError("Cannot partition data as requested")
_count = size // itemsize
_itemsize = np.ones(_count, dtype=int) * (size // _count)
else:
_itemsize = np.array(itemsize, copy=False)
_count = len(itemsize)
if _itemsize.sum() != size:
raise ValueError("Cannot partition data as requested")
else:
_count = 1
# Check if data array is big enough and resize it if necessary
if self._size + size >= self._data.size:
capacity = int(2 ** np.ceil(np.log2(self._size + size)))
self._data = np.resize(self._data, capacity)
# Check if item array is big enough and resize it if necessary
if self._count + _count >= len(self._items):
capacity = int(2 ** np.ceil(np.log2(self._count + _count)))
self._items = np.resize(self._items, (capacity, 2))
# Check index
if index < 0:
index += len(self)
if index < 0 or index > len(self):
raise IndexError("List insertion index out of range")
# Inserting
if index < self._count:
istart = index
dstart = self._items[istart][0]
dstop = self._items[istart][1]
# Move data
Z = self._data[dstart:self._size]
self._data[dstart + size:self._size + size] = Z
# Update moved items
I = self._items[istart:self._count] + size
self._items[istart + _count:self._count + _count] = I
# Appending
else:
dstart = self._size
istart = self._count
# Only one item (faster)
if _count == 1:
# Store data
self._data[dstart:dstart + size] = data
self._size += size
# Store data location (= item)
self._items[istart][0] = dstart
self._items[istart][1] = dstart + size
self._count += 1
# Several items
else:
# Store data
dstop = dstart + size
self._data[dstart:dstop] = data
self._size += size
# Store items
items = np.ones((_count, 2), int) * dstart
C = _itemsize.cumsum()
items[1:, 0] += C[:-1]
items[0:, 1] += C
istop = istart + _count
self._items[istart:istop] = items
self._count += _count
|
Insert data before index
Parameters
----------
index : int
Index before which data will be inserted.
data : array_like
An array, any object exposing the array interface, an object
whose __array__ method returns an array, or any (nested) sequence.
itemsize: int or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
|
entailment
|
def append(self, data, itemsize=None):
"""
Append data to the end.
Parameters
----------
data : array_like
An array, any object exposing the array interface, an object
whose __array__ method returns an array, or any (nested) sequence.
itemsize: int or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
"""
self.insert(len(self), data, itemsize)
|
Append data to the end.
Parameters
----------
data : array_like
An array, any object exposing the array interface, an object
whose __array__ method returns an array, or any (nested) sequence.
itemsize: int or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
|
entailment
|
def minimize(func, bounds=None, nvar=None, args=(), disp=False,
eps=1e-4,
maxf=20000,
maxT=6000,
algmethod=0,
fglobal=-1e100,
fglper=0.01,
volper=-1.0,
sigmaper=-1.0,
**kwargs
):
"""
Solve an optimization problem using the DIRECT (Dividing Rectangles) algorithm.
It can be used to solve general nonlinear programming problems of the form:
.. math::
\min_ {x \in R^n} f(x)
subject to
.. math::
x_L \leq x \leq x_U
Where :math:`x` are the optimization variables (with upper and lower
bounds), :math:`f(x)` is the objective function.
Parameters
----------
func : objective function
called as `func(x, *args)`; does not need to be defined everywhere,
raise an Exception where function is not defined
bounds : array-like
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter.
nvar: integer
Dimensionality of x (only needed if `bounds` is not defined)
eps : float
Ensures sufficient decrease in function value when a new potentially
optimal interval is chosen.
maxf : integer
Approximate upper bound on objective function evaluations.
.. note::
Maximal allowed value is 90000 see documentation of Fortran library.
maxT : integer
Maximum number of iterations.
.. note::
Maximal allowed value is 6000 see documentation of Fortran library.
algmethod : integer
Whether to use the original or modified DIRECT algorithm. Possible values:
* ``algmethod=0`` - use the original DIRECT algorithm
* ``algmethod=1`` - use the modified DIRECT-l algorithm
fglobal : float
Function value of the global optimum. If this value is not known set this
to a very large negative value.
fglper : float
Terminate the optimization when the percent error satisfies:
.. math::
100*(f_{min} - f_{global})/\max(1, |f_{global}|) \leq f_{glper}
volper : float
Terminate the optimization once the volume of a hyperrectangle is less
than volper percent of the original hyperrectangel.
sigmaper : float
Terminate the optimization once the measure of the hyperrectangle is less
than sigmaper.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
"""
if bounds is None:
l = np.zeros(nvar, dtype=np.float64)
u = np.ones(nvar, dtype=np.float64)
else:
bounds = np.asarray(bounds)
l = bounds[:, 0]
u = bounds[:, 1]
def _objective_wrap(x, iidata, ddata, cdata, n, iisize, idsize, icsize):
"""
Wrap the python objective to comply with the signature required by the
Fortran library.
Returns the function value and a flag indicating whether function is defined.
If function is not defined return np.nan
"""
try:
return func(x, *args), 0
except:
return np.nan, 1
#
# Dummy values so that the python wrapper will comply with the required
# signature of the fortran library.
#
iidata = np.ones(0, dtype=np.int32)
ddata = np.ones(0, dtype=np.float64)
cdata = np.ones([0, 40], dtype=np.uint8)
#
# Call the DIRECT algorithm
#
x, fun, ierror = direct(
_objective_wrap,
eps,
maxf,
maxT,
l,
u,
algmethod,
'dummylogfile',
fglobal,
fglper,
volper,
sigmaper,
iidata,
ddata,
cdata,
disp
)
return OptimizeResult(x=x,fun=fun, status=ierror, success=ierror>0,
message=SUCCESS_MESSAGES[ierror-1] if ierror>0 else ERROR_MESSAGES[abs(ierror)-1])
|
Solve an optimization problem using the DIRECT (Dividing Rectangles) algorithm.
It can be used to solve general nonlinear programming problems of the form:
.. math::
\min_ {x \in R^n} f(x)
subject to
.. math::
x_L \leq x \leq x_U
Where :math:`x` are the optimization variables (with upper and lower
bounds), :math:`f(x)` is the objective function.
Parameters
----------
func : objective function
called as `func(x, *args)`; does not need to be defined everywhere,
raise an Exception where function is not defined
bounds : array-like
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter.
nvar: integer
Dimensionality of x (only needed if `bounds` is not defined)
eps : float
Ensures sufficient decrease in function value when a new potentially
optimal interval is chosen.
maxf : integer
Approximate upper bound on objective function evaluations.
.. note::
Maximal allowed value is 90000 see documentation of Fortran library.
maxT : integer
Maximum number of iterations.
.. note::
Maximal allowed value is 6000 see documentation of Fortran library.
algmethod : integer
Whether to use the original or modified DIRECT algorithm. Possible values:
* ``algmethod=0`` - use the original DIRECT algorithm
* ``algmethod=1`` - use the modified DIRECT-l algorithm
fglobal : float
Function value of the global optimum. If this value is not known set this
to a very large negative value.
fglper : float
Terminate the optimization when the percent error satisfies:
.. math::
100*(f_{min} - f_{global})/\max(1, |f_{global}|) \leq f_{glper}
volper : float
Terminate the optimization once the volume of a hyperrectangle is less
than volper percent of the original hyperrectangel.
sigmaper : float
Terminate the optimization once the measure of the hyperrectangle is less
than sigmaper.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
|
entailment
|
def get_dpi(raise_error=True):
"""Get screen DPI from the OS
Parameters
----------
raise_error : bool
If True, raise an error if DPI could not be determined.
Returns
-------
dpi : float
Dots per inch of the primary screen.
"""
try:
user32.SetProcessDPIAware()
except AttributeError:
pass # not present on XP
dc = user32.GetDC(0)
h_size = gdi32.GetDeviceCaps(dc, HORZSIZE)
v_size = gdi32.GetDeviceCaps(dc, VERTSIZE)
h_res = gdi32.GetDeviceCaps(dc, HORZRES)
v_res = gdi32.GetDeviceCaps(dc, VERTRES)
user32.ReleaseDC(None, dc)
return (h_res/float(h_size) + v_res/float(v_size)) * 0.5 * 25.4
|
Get screen DPI from the OS
Parameters
----------
raise_error : bool
If True, raise an error if DPI could not be determined.
Returns
-------
dpi : float
Dots per inch of the primary screen.
|
entailment
|
def build_if_needed(self):
""" Reset shader source if necesssary.
"""
if self._need_build:
self._build()
self._need_build = False
self.update_variables()
|
Reset shader source if necesssary.
|
entailment
|
def nmap(a, b, c, d, curvefn=None, normfn=None):
"""
Returns a function that maps a number n from range (a, b) onto a range
(c, d). If no curvefn is given, linear mapping will be used. Optionally a
normalisation function normfn can be provided to transform output.
"""
if not curvefn:
curvefn = lambda x: x
def map(n):
r = 1.0 * (n - a) / (b - a)
out = curvefn(r) * (d - c) + c
if not normfn:
return out
return normfn(out)
return map
|
Returns a function that maps a number n from range (a, b) onto a range
(c, d). If no curvefn is given, linear mapping will be used. Optionally a
normalisation function normfn can be provided to transform output.
|
entailment
|
def link_view(self, view):
"""Link this axis to a ViewBox
This makes it so that the axis's domain always matches the
visible range in the ViewBox.
Parameters
----------
view : instance of ViewBox
The ViewBox to link.
"""
if view is self._linked_view:
return
if self._linked_view is not None:
self._linked_view.scene.transform.changed.disconnect(
self._view_changed)
self._linked_view = view
view.scene.transform.changed.connect(self._view_changed)
self._view_changed()
|
Link this axis to a ViewBox
This makes it so that the axis's domain always matches the
visible range in the ViewBox.
Parameters
----------
view : instance of ViewBox
The ViewBox to link.
|
entailment
|
def _view_changed(self, event=None):
"""Linked view transform has changed; update ticks.
"""
tr = self.node_transform(self._linked_view.scene)
p1, p2 = tr.map(self._axis_ends())
if self.orientation in ('left', 'right'):
self.axis.domain = (p1[1], p2[1])
else:
self.axis.domain = (p1[0], p2[0])
|
Linked view transform has changed; update ticks.
|
entailment
|
def viewbox_mouse_event(self, event):
"""ViewBox mouse event handler
Parameters
----------
event : instance of Event
The mouse event.
"""
# When the attached ViewBox reseives a mouse event, it is sent to the
# camera here.
self.mouse_pos = event.pos[:2]
if event.type == 'mouse_wheel':
# wheel rolled; adjust the magnification factor and hide the
# event from the superclass
m = self.mag_target
m *= 1.2 ** event.delta[1]
m = m if m > 1 else 1
self.mag_target = m
else:
# send everything _except_ wheel events to the superclass
super(MagnifyCamera, self).viewbox_mouse_event(event)
# start the timer to smoothly modify the transform properties.
if not self.timer.running:
self.timer.start()
self._update_transform()
|
ViewBox mouse event handler
Parameters
----------
event : instance of Event
The mouse event.
|
entailment
|
def on_timer(self, event=None):
"""Timer event handler
Parameters
----------
event : instance of Event
The timer event.
"""
# Smoothly update center and magnification properties of the transform
k = np.clip(100. / self.mag.mag, 10, 100)
s = 10**(-k * event.dt)
c = np.array(self.mag.center)
c1 = c * s + self.mouse_pos * (1-s)
m = self.mag.mag * s + self.mag_target * (1-s)
# If changes are very small, then it is safe to stop the timer.
if (np.all(np.abs((c - c1) / c1) < 1e-5) and
(np.abs(np.log(m / self.mag.mag)) < 1e-3)):
self.timer.stop()
self.mag.center = c1
self.mag.mag = m
self._update_transform()
|
Timer event handler
Parameters
----------
event : instance of Event
The timer event.
|
entailment
|
def glBufferData(target, data, usage):
""" Data can be numpy array or the size of data to allocate.
"""
if isinstance(data, int):
size = data
data = ctypes.c_voidp(0)
else:
if not data.flags['C_CONTIGUOUS'] or not data.flags['ALIGNED']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
try:
nativefunc = glBufferData._native
except AttributeError:
nativefunc = glBufferData._native = _get_gl_func("glBufferData", None, (ctypes.c_uint, ctypes.c_int, ctypes.c_void_p, ctypes.c_uint,))
res = nativefunc(target, size, data, usage)
|
Data can be numpy array or the size of data to allocate.
|
entailment
|
def next_power_of_2(n):
""" Return next power of 2 greater than or equal to n """
n -= 1 # greater than OR EQUAL TO n
shift = 1
while (n + 1) & n: # n+1 is not a power of 2 yet
n |= n >> shift
shift *= 2
return max(4, n + 1)
|
Return next power of 2 greater than or equal to n
|
entailment
|
def append(self, vertices, uniforms=None, indices=None, itemsize=None):
"""
Parameters
----------
vertices : numpy array
An array whose dtype is compatible with self.vdtype
uniforms: numpy array
An array whose dtype is compatible with self.utype
indices : numpy array
An array whose dtype is compatible with self.idtype
All index values must be between 0 and len(vertices)
itemsize: int, tuple or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
"""
# Vertices
# -----------------------------
vertices = np.array(vertices).astype(self.vtype).ravel()
vsize = self._vertices_list.size
# No itemsize given
# -----------------
if itemsize is None:
index = 0
count = 1
# Uniform itemsize (int)
# ----------------------
elif isinstance(itemsize, int):
count = len(vertices) / itemsize
index = np.repeat(np.arange(count), itemsize)
# Individual itemsize (array)
# ---------------------------
elif isinstance(itemsize, (np.ndarray, list)):
count = len(itemsize)
index = np.repeat(np.arange(count), itemsize)
else:
raise ValueError("Itemsize not understood")
if self.utype:
vertices["collection_index"] = index + len(self)
self._vertices_list.append(vertices, itemsize)
# Indices
# -----------------------------
if self.itype is not None:
# No indices given (-> automatic generation)
if indices is None:
indices = vsize + np.arange(len(vertices))
self._indices_list.append(indices, itemsize)
# Indices given
# FIXME: variables indices (list of list or ArrayList)
else:
if itemsize is None:
I = np.array(indices) + vsize
elif isinstance(itemsize, int):
I = vsize + (np.tile(indices, count) +
itemsize * np.repeat(np.arange(count), len(indices))) # noqa
else:
raise ValueError("Indices not compatible with items")
self._indices_list.append(I, len(indices))
# Uniforms
# -----------------------------
if self.utype:
if uniforms is None:
uniforms = np.zeros(count, dtype=self.utype)
else:
uniforms = np.array(uniforms).astype(self.utype).ravel()
self._uniforms_list.append(uniforms, itemsize=1)
self._need_update = True
|
Parameters
----------
vertices : numpy array
An array whose dtype is compatible with self.vdtype
uniforms: numpy array
An array whose dtype is compatible with self.utype
indices : numpy array
An array whose dtype is compatible with self.idtype
All index values must be between 0 and len(vertices)
itemsize: int, tuple or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
|
entailment
|
def _compute_texture_shape(self, size=1):
""" Compute uniform texture shape """
# We should use this line but we may not have a GL context yet
# linesize = gl.glGetInteger(gl.GL_MAX_TEXTURE_SIZE)
linesize = 1024
count = self._uniforms_float_count
cols = 4 * linesize // int(count)
rows = max(1, int(math.ceil(size / float(cols))))
shape = rows, cols * (count // 4), count
self._ushape = shape
return shape
|
Compute uniform texture shape
|
entailment
|
def _update(self):
""" Update vertex buffers & texture """
if self._vertices_buffer is not None:
self._vertices_buffer.delete()
self._vertices_buffer = VertexBuffer(self._vertices_list.data)
if self.itype is not None:
if self._indices_buffer is not None:
self._indices_buffer.delete()
self._indices_buffer = IndexBuffer(self._indices_list.data)
if self.utype is not None:
if self._uniforms_texture is not None:
self._uniforms_texture.delete()
# We take the whole array (_data), not the data one
texture = self._uniforms_list._data.view(np.float32)
size = len(texture) / self._uniforms_float_count
shape = self._compute_texture_shape(size)
# shape[2] = float count is only used in vertex shader code
texture = texture.reshape(shape[0], shape[1], 4)
self._uniforms_texture = Texture2D(texture)
self._uniforms_texture.data = texture
self._uniforms_texture.interpolation = 'nearest'
if len(self._programs):
for program in self._programs:
program.bind(self._vertices_buffer)
if self._uniforms_list is not None:
program["uniforms"] = self._uniforms_texture
program["uniforms_shape"] = self._ushape
|
Update vertex buffers & texture
|
entailment
|
def get_layout(name, *args, **kwargs):
"""
Retrieve a graph layout
Some graph layouts accept extra options. Please refer to their
documentation for more information.
Parameters
----------
name : string
The name of the layout. The variable `AVAILABLE_LAYOUTS`
contains all available layouts.
*args
Positional arguments which are passed to the layout.
**kwargs
Keyword arguments which are passed to the layout.
Returns
-------
layout : callable
The callable generator which will calculate the graph layout
"""
if name not in _layout_map:
raise KeyError("Graph layout '%s' not found. Should be one of %s"
% (name, AVAILABLE_LAYOUTS))
layout = _layout_map[name]
if inspect.isclass(layout):
layout = layout(*args, **kwargs)
return layout
|
Retrieve a graph layout
Some graph layouts accept extra options. Please refer to their
documentation for more information.
Parameters
----------
name : string
The name of the layout. The variable `AVAILABLE_LAYOUTS`
contains all available layouts.
*args
Positional arguments which are passed to the layout.
**kwargs
Keyword arguments which are passed to the layout.
Returns
-------
layout : callable
The callable generator which will calculate the graph layout
|
entailment
|
def update_viewer_state(rec, context):
"""
Given viewer session information, make sure the session information is
compatible with the current version of the viewers, and if not, update
the session information in-place.
"""
if '_protocol' not in rec:
rec.pop('properties')
rec['state'] = {}
rec['state']['values'] = rec.pop('options')
layer_states = []
for layer in rec['layers']:
state_id = str(uuid.uuid4())
state_cls = STATE_CLASS[layer['_type'].split('.')[-1]]
state = state_cls(layer=context.object(layer.pop('layer')))
properties = set(layer.keys()) - set(['_type'])
for prop in sorted(properties, key=state.update_priority, reverse=True):
value = layer.pop(prop)
value = context.object(value)
if isinstance(value, six.string_types) and value == 'fixed':
value = 'Fixed'
if isinstance(value, six.string_types) and value == 'linear':
value = 'Linear'
setattr(state, prop, value)
context.register_object(state_id, state)
layer['state'] = state_id
layer_states.append(state)
list_id = str(uuid.uuid4())
context.register_object(list_id, layer_states)
rec['state']['values']['layers'] = list_id
rec['state']['values']['visible_axes'] = rec['state']['values'].pop('visible_box')
|
Given viewer session information, make sure the session information is
compatible with the current version of the viewers, and if not, update
the session information in-place.
|
entailment
|
def remove_comments(code):
"""Remove C-style comment from GLSL code string."""
pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*\n)"
# first group captures quoted strings (double or single)
# second group captures comments (//single-line or /* multi-line */)
regex = re.compile(pattern, re.MULTILINE | re.DOTALL)
def do_replace(match):
# if the 2nd group (capturing comments) is not None,
# it means we have captured a non-quoted (real) comment string.
if match.group(2) is not None:
return "" # so we will return empty to remove the comment
else: # otherwise, we will return the 1st group
return match.group(1) # captured quoted-string
return regex.sub(do_replace, code)
|
Remove C-style comment from GLSL code string.
|
entailment
|
def merge_includes(code):
"""Merge all includes recursively."""
pattern = '\#\s*include\s*"(?P<filename>[a-zA-Z0-9\_\-\.\/]+)"'
regex = re.compile(pattern)
includes = []
def replace(match):
filename = match.group("filename")
if filename not in includes:
includes.append(filename)
path = glsl.find(filename)
if not path:
logger.critical('"%s" not found' % filename)
raise RuntimeError("File not found", filename)
text = '\n// --- start of "%s" ---\n' % filename
with open(path) as fh:
text += fh.read()
text += '// --- end of "%s" ---\n' % filename
return text
return ''
# Limit recursion to depth 10
for i in range(10):
if re.search(regex, code):
code = re.sub(regex, replace, code)
else:
break
return code
|
Merge all includes recursively.
|
entailment
|
def add_widget(self, widget=None, row=None, col=None, row_span=1,
col_span=1, **kwargs):
"""
Add a new widget to this grid. This will cause other widgets in the
grid to be resized to make room for the new widget. Can be used
to replace a widget as well
Parameters
----------
widget : Widget | None
The Widget to add. New widget is constructed if widget is None.
row : int
The row in which to add the widget (0 is the topmost row)
col : int
The column in which to add the widget (0 is the leftmost column)
row_span : int
The number of rows to be occupied by this widget. Default is 1.
col_span : int
The number of columns to be occupied by this widget. Default is 1.
**kwargs : dict
parameters sent to the new Widget that is constructed if
widget is None
Notes
-----
The widget's parent is automatically set to this grid, and all other
parent(s) are removed.
"""
if row is None:
row = self._next_cell[0]
if col is None:
col = self._next_cell[1]
if widget is None:
widget = Widget(**kwargs)
else:
if kwargs:
raise ValueError("cannot send kwargs if widget is given")
_row = self._cells.setdefault(row, {})
_row[col] = widget
self._grid_widgets[self._n_added] = (row, col, row_span, col_span,
widget)
self._n_added += 1
widget.parent = self
self._next_cell = [row, col+col_span]
widget._var_w = Variable("w-(row: %s | col: %s)" % (row, col))
widget._var_h = Variable("h-(row: %s | col: %s)" % (row, col))
# update stretch based on colspan/rowspan
# usually, if you make something consume more grids or columns,
# you also want it to actually *take it up*, ratio wise.
# otherwise, it will never *use* the extra rows and columns,
# thereby collapsing the extras to 0.
stretch = list(widget.stretch)
stretch[0] = col_span if stretch[0] is None else stretch[0]
stretch[1] = row_span if stretch[1] is None else stretch[1]
widget.stretch = stretch
self._need_solver_recreate = True
return widget
|
Add a new widget to this grid. This will cause other widgets in the
grid to be resized to make room for the new widget. Can be used
to replace a widget as well
Parameters
----------
widget : Widget | None
The Widget to add. New widget is constructed if widget is None.
row : int
The row in which to add the widget (0 is the topmost row)
col : int
The column in which to add the widget (0 is the leftmost column)
row_span : int
The number of rows to be occupied by this widget. Default is 1.
col_span : int
The number of columns to be occupied by this widget. Default is 1.
**kwargs : dict
parameters sent to the new Widget that is constructed if
widget is None
Notes
-----
The widget's parent is automatically set to this grid, and all other
parent(s) are removed.
|
entailment
|
def remove_widget(self, widget):
"""Remove a widget from this grid
Parameters
----------
widget : Widget
The Widget to remove
"""
self._grid_widgets = dict((key, val)
for (key, val) in self._grid_widgets.items()
if val[-1] != widget)
self._need_solver_recreate = True
|
Remove a widget from this grid
Parameters
----------
widget : Widget
The Widget to remove
|
entailment
|
def resize_widget(self, widget, row_span, col_span):
"""Resize a widget in the grid to new dimensions.
Parameters
----------
widget : Widget
The widget to resize
row_span : int
The number of rows to be occupied by this widget.
col_span : int
The number of columns to be occupied by this widget.
"""
row = None
col = None
for (r, c, rspan, cspan, w) in self._grid_widgets.values():
if w == widget:
row = r
col = c
break
if row is None or col is None:
raise ValueError("%s not found in grid" % widget)
self.remove_widget(widget)
self.add_widget(widget, row, col, row_span, col_span)
self._need_solver_recreate = True
|
Resize a widget in the grid to new dimensions.
Parameters
----------
widget : Widget
The widget to resize
row_span : int
The number of rows to be occupied by this widget.
col_span : int
The number of columns to be occupied by this widget.
|
entailment
|
def add_grid(self, row=None, col=None, row_span=1, col_span=1,
**kwargs):
"""
Create a new Grid and add it as a child widget.
Parameters
----------
row : int
The row in which to add the widget (0 is the topmost row)
col : int
The column in which to add the widget (0 is the leftmost column)
row_span : int
The number of rows to be occupied by this widget. Default is 1.
col_span : int
The number of columns to be occupied by this widget. Default is 1.
**kwargs : dict
Keyword arguments to pass to the new `Grid`.
"""
from .grid import Grid
grid = Grid(**kwargs)
return self.add_widget(grid, row, col, row_span, col_span)
|
Create a new Grid and add it as a child widget.
Parameters
----------
row : int
The row in which to add the widget (0 is the topmost row)
col : int
The column in which to add the widget (0 is the leftmost column)
row_span : int
The number of rows to be occupied by this widget. Default is 1.
col_span : int
The number of columns to be occupied by this widget. Default is 1.
**kwargs : dict
Keyword arguments to pass to the new `Grid`.
|
entailment
|
def add_view(self, row=None, col=None, row_span=1, col_span=1,
**kwargs):
"""
Create a new ViewBox and add it as a child widget.
Parameters
----------
row : int
The row in which to add the widget (0 is the topmost row)
col : int
The column in which to add the widget (0 is the leftmost column)
row_span : int
The number of rows to be occupied by this widget. Default is 1.
col_span : int
The number of columns to be occupied by this widget. Default is 1.
**kwargs : dict
Keyword arguments to pass to `ViewBox`.
"""
from .viewbox import ViewBox
view = ViewBox(**kwargs)
return self.add_widget(view, row, col, row_span, col_span)
|
Create a new ViewBox and add it as a child widget.
Parameters
----------
row : int
The row in which to add the widget (0 is the topmost row)
col : int
The column in which to add the widget (0 is the leftmost column)
row_span : int
The number of rows to be occupied by this widget. Default is 1.
col_span : int
The number of columns to be occupied by this widget. Default is 1.
**kwargs : dict
Keyword arguments to pass to `ViewBox`.
|
entailment
|
def find_font(face, bold, italic):
"""Find font"""
bold = FC_WEIGHT_BOLD if bold else FC_WEIGHT_REGULAR
italic = FC_SLANT_ITALIC if italic else FC_SLANT_ROMAN
face = face.encode('utf8')
fontconfig.FcInit()
pattern = fontconfig.FcPatternCreate()
fontconfig.FcPatternAddInteger(pattern, FC_WEIGHT, bold)
fontconfig.FcPatternAddInteger(pattern, FC_SLANT, italic)
fontconfig.FcPatternAddString(pattern, FC_FAMILY, face)
fontconfig.FcConfigSubstitute(0, pattern, FcMatchPattern)
fontconfig.FcDefaultSubstitute(pattern)
result = FcType()
match = fontconfig.FcFontMatch(0, pattern, byref(result))
fontconfig.FcPatternDestroy(pattern)
if not match:
raise RuntimeError('Could not match font "%s"' % face)
value = FcValue()
fontconfig.FcPatternGet(match, FC_FAMILY, 0, byref(value))
if(value.u.s != face):
warnings.warn('Could not find face match "%s", falling back to "%s"'
% (face, value.u.s))
result = fontconfig.FcPatternGet(match, FC_FILE, 0, byref(value))
if result != 0:
raise RuntimeError('No filename or FT face for "%s"' % face)
fname = value.u.s
return fname.decode('utf-8')
|
Find font
|
entailment
|
def _list_fonts():
"""List system fonts"""
stdout_, stderr = run_subprocess(['fc-list', ':scalable=true', 'family'])
vals = [v.split(',')[0] for v in stdout_.strip().splitlines(False)]
return vals
|
List system fonts
|
entailment
|
def _get_vispy_caller():
"""Helper to get vispy calling function from the stack"""
records = inspect.stack()
# first few records are vispy-based logging calls
for record in records[5:]:
module = record[0].f_globals['__name__']
if module.startswith('vispy'):
line = str(record[0].f_lineno)
func = record[3]
cls = record[0].f_locals.get('self', None)
clsname = "" if cls is None else cls.__class__.__name__ + '.'
caller = "{0}:{1}{2}({3}): ".format(module, clsname, func, line)
return caller
return 'unknown'
|
Helper to get vispy calling function from the stack
|
entailment
|
def set_log_level(verbose, match=None, return_old=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
match : str | None
String to match. Only those messages that both contain a substring
that regexp matches ``'match'`` (and the ``verbose`` level) will be
displayed.
return_old : bool
If True, return the old verbosity level and old match.
Notes
-----
If ``verbose=='debug'``, then the ``vispy`` method emitting the log
message will be prepended to each log message, which is useful for
debugging. If ``verbose=='debug'`` or ``match is not None``, then a
small performance overhead is added. Thus it is suggested to only use
these options when performance is not crucial.
See also
--------
vispy.util.use_log_level
"""
# This method is responsible for setting properties of the handler and
# formatter such that proper messages (possibly with the vispy caller
# prepended) are displayed. Storing log messages is only available
# via the context handler (use_log_level), so that configuration is
# done by the context handler itself.
if isinstance(verbose, bool):
verbose = 'info' if verbose else 'warning'
if isinstance(verbose, string_types):
verbose = verbose.lower()
if verbose not in logging_types:
raise ValueError('Invalid argument "%s"' % verbose)
verbose = logging_types[verbose]
else:
raise TypeError('verbose must be a bool or string')
logger = logging.getLogger('vispy')
old_verbose = logger.level
old_match = _lh._vispy_set_match(match)
logger.setLevel(verbose)
if verbose <= logging.DEBUG:
_lf._vispy_set_prepend(True)
else:
_lf._vispy_set_prepend(False)
out = None
if return_old:
out = (old_verbose, old_match)
return out
|
Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
match : str | None
String to match. Only those messages that both contain a substring
that regexp matches ``'match'`` (and the ``verbose`` level) will be
displayed.
return_old : bool
If True, return the old verbosity level and old match.
Notes
-----
If ``verbose=='debug'``, then the ``vispy`` method emitting the log
message will be prepended to each log message, which is useful for
debugging. If ``verbose=='debug'`` or ``match is not None``, then a
small performance overhead is added. Thus it is suggested to only use
these options when performance is not crucial.
See also
--------
vispy.util.use_log_level
|
entailment
|
def log_exception(level='warning', tb_skip=2):
"""
Send an exception and traceback to the logger.
This function is used in cases where an exception is handled safely but
nevertheless should generate a descriptive error message. An extra line
is inserted into the stack trace indicating where the exception was caught.
Parameters
----------
level : str
See ``set_log_level`` for options.
tb_skip : int
The number of traceback entries to ignore, prior to the point where
the exception was caught. The default is 2.
"""
stack = "".join(traceback.format_stack()[:-tb_skip])
tb = traceback.format_exception(*sys.exc_info())
msg = tb[0] # "Traceback (most recent call last):"
msg += stack
msg += " << caught exception here: >>\n"
msg += "".join(tb[1:]).rstrip()
logger.log(logging_types[level], msg)
|
Send an exception and traceback to the logger.
This function is used in cases where an exception is handled safely but
nevertheless should generate a descriptive error message. An extra line
is inserted into the stack trace indicating where the exception was caught.
Parameters
----------
level : str
See ``set_log_level`` for options.
tb_skip : int
The number of traceback entries to ignore, prior to the point where
the exception was caught. The default is 2.
|
entailment
|
def _handle_exception(ignore_callback_errors, print_callback_errors, obj,
cb_event=None, node=None):
"""Helper for prining errors in callbacks
See EventEmitter._invoke_callback for a use example.
"""
if not hasattr(obj, '_vispy_err_registry'):
obj._vispy_err_registry = {}
registry = obj._vispy_err_registry
if cb_event is not None:
cb, event = cb_event
exp_type = 'callback'
else:
exp_type = 'node'
type_, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type_
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
# Handle
if not ignore_callback_errors:
raise
if print_callback_errors != "never":
this_print = 'full'
if print_callback_errors in ('first', 'reminders'):
# need to check to see if we've hit this yet
if exp_type == 'callback':
key = repr(cb) + repr(event)
else:
key = repr(node)
if key in registry:
registry[key] += 1
if print_callback_errors == 'first':
this_print = None
else: # reminders
ii = registry[key]
# Use logarithmic selection
# (1, 2, ..., 10, 20, ..., 100, 200, ...)
if ii == (2 ** int(np.log2(ii))):
this_print = ii
else:
this_print = None
else:
registry[key] = 1
if this_print == 'full':
logger.log_exception()
if exp_type == 'callback':
logger.error("Invoking %s for %s" % (cb, event))
else: # == 'node':
logger.error("Drawing node %s" % node)
elif this_print is not None:
if exp_type == 'callback':
logger.error("Invoking %s repeat %s"
% (cb, this_print))
else: # == 'node':
logger.error("Drawing node %s repeat %s"
% (node, this_print))
|
Helper for prining errors in callbacks
See EventEmitter._invoke_callback for a use example.
|
entailment
|
def _serialize_buffer(buffer, array_serialization=None):
"""Serialize a NumPy array."""
if array_serialization == 'binary':
# WARNING: in NumPy 1.9, tostring() has been renamed to tobytes()
# but tostring() is still here for now for backward compatibility.
return buffer.ravel().tostring()
elif array_serialization == 'base64':
return {'storage_type': 'base64',
'buffer': base64.b64encode(buffer).decode('ascii')
}
raise ValueError("The array serialization method should be 'binary' or "
"'base64'.")
|
Serialize a NumPy array.
|
entailment
|
def _vispy_emit_match_andor_record(self, record):
"""Log message emitter that optionally matches and/or records"""
test = record.getMessage()
match = self._vispy_match
if (match is None or re.search(match, test) or
re.search(match, _get_vispy_caller())):
if self._vispy_emit_record:
fmt_rec = self._vispy_formatter.format(record)
self._vispy_emit_list.append(fmt_rec)
if self._vispy_print_msg:
return logging.StreamHandler.emit(self, record)
else:
return
|
Log message emitter that optionally matches and/or records
|
entailment
|
def create(self, obj, ref=None):
""" Convert *obj* to a new ShaderObject. If the output is a Variable
with no name, then set its name using *ref*.
"""
if isinstance(ref, Variable):
ref = ref.name
elif isinstance(ref, string_types) and ref.startswith('gl_'):
# gl_ names not allowed for variables
ref = ref[3:].lower()
# Allow any type of object to be converted to ShaderObject if it
# provides a magic method:
if hasattr(obj, '_shader_object'):
obj = obj._shader_object()
if isinstance(obj, ShaderObject):
if isinstance(obj, Variable) and obj.name is None:
obj.name = ref
elif isinstance(obj, string_types):
obj = TextExpression(obj)
else:
obj = Variable(ref, obj)
# Try prepending the name to indicate attribute, uniform, varying
if obj.vtype and obj.vtype[0] in 'auv':
obj.name = obj.vtype[0] + '_' + obj.name
return obj
|
Convert *obj* to a new ShaderObject. If the output is a Variable
with no name, then set its name using *ref*.
|
entailment
|
def dependencies(self, sort=False):
""" Return all dependencies required to use this object. The last item
in the list is *self*.
"""
alldeps = []
if sort:
def key(obj):
# sort deps such that we get functions, variables, self.
if not isinstance(obj, Variable):
return (0, 0)
else:
return (1, obj.vtype)
deps = sorted(self._deps, key=key)
else:
deps = self._deps
for dep in deps:
alldeps.extend(dep.dependencies(sort=sort))
alldeps.append(self)
return alldeps
|
Return all dependencies required to use this object. The last item
in the list is *self*.
|
entailment
|
def _add_dep(self, dep):
""" Increment the reference count for *dep*. If this is a new
dependency, then connect to its *changed* event.
"""
if dep in self._deps:
self._deps[dep] += 1
else:
self._deps[dep] = 1
dep._dependents[self] = None
|
Increment the reference count for *dep*. If this is a new
dependency, then connect to its *changed* event.
|
entailment
|
def _remove_dep(self, dep):
""" Decrement the reference count for *dep*. If the reference count
reaches 0, then the dependency is removed and its *changed* event is
disconnected.
"""
refcount = self._deps[dep]
if refcount == 1:
self._deps.pop(dep)
dep._dependents.pop(self)
else:
self._deps[dep] -= 1
|
Decrement the reference count for *dep*. If the reference count
reaches 0, then the dependency is removed and its *changed* event is
disconnected.
|
entailment
|
def _dep_changed(self, dep, code_changed=False, value_changed=False):
""" Called when a dependency's expression has changed.
"""
self.changed(code_changed, value_changed)
|
Called when a dependency's expression has changed.
|
entailment
|
def changed(self, code_changed=False, value_changed=False):
"""Inform dependents that this shaderobject has changed.
"""
for d in self._dependents:
d._dep_changed(self, code_changed=code_changed,
value_changed=value_changed)
|
Inform dependents that this shaderobject has changed.
|
entailment
|
def eq(a, b):
""" The great missing equivalence function: Guaranteed evaluation
to a single bool value.
"""
if a is b:
return True
if a is None or b is None:
return True if a is None and b is None else False
try:
e = a == b
except ValueError:
return False
except AttributeError:
return False
except Exception:
print("a:", str(type(a)), str(a))
print("b:", str(type(b)), str(b))
raise
t = type(e)
if t is bool:
return e
elif t is bool_:
return bool(e)
elif isinstance(e, ndarray):
try:
# disaster: if a is empty and b is not, then e.all() is True
if a.shape != b.shape:
return False
except Exception:
return False
if (hasattr(e, 'implements') and e.implements('MetaArray')):
return e.asarray().all()
else:
return e.all()
else:
raise Exception("== operator returned type %s" % str(type(e)))
|
The great missing equivalence function: Guaranteed evaluation
to a single bool value.
|
entailment
|
def zoom(self, factor, center=None):
""" Zoom in (or out) at the given center
Parameters
----------
factor : float or tuple
Fraction by which the scene should be zoomed (e.g. a factor of 2
causes the scene to appear twice as large).
center : tuple of 2-4 elements
The center of the view. If not given or None, use the
current center.
"""
assert len(center) in (2, 3, 4)
# Get scale factor, take scale ratio into account
if np.isscalar(factor):
scale = [factor, factor]
else:
if len(factor) != 2:
raise TypeError("factor must be scalar or length-2 sequence.")
scale = list(factor)
if self.aspect is not None:
scale[0] = scale[1]
# Init some variables
center = center if (center is not None) else self.center
# Make a new object (copy), so that allocation will
# trigger view_changed:
rect = Rect(self.rect)
# Get space from given center to edges
left_space = center[0] - rect.left
right_space = rect.right - center[0]
bottom_space = center[1] - rect.bottom
top_space = rect.top - center[1]
# Scale these spaces
rect.left = center[0] - left_space * scale[0]
rect.right = center[0] + right_space * scale[0]
rect.bottom = center[1] - bottom_space * scale[1]
rect.top = center[1] + top_space * scale[1]
self.rect = rect
|
Zoom in (or out) at the given center
Parameters
----------
factor : float or tuple
Fraction by which the scene should be zoomed (e.g. a factor of 2
causes the scene to appear twice as large).
center : tuple of 2-4 elements
The center of the view. If not given or None, use the
current center.
|
entailment
|
def pan(self, *pan):
"""Pan the view.
Parameters
----------
*pan : length-2 sequence
The distance to pan the view, in the coordinate system of the
scene.
"""
if len(pan) == 1:
pan = pan[0]
self.rect = self.rect + pan
|
Pan the view.
Parameters
----------
*pan : length-2 sequence
The distance to pan the view, in the coordinate system of the
scene.
|
entailment
|
def viewbox_mouse_event(self, event):
"""
The SubScene received a mouse event; update transform
accordingly.
Parameters
----------
event : instance of Event
The event.
"""
if event.handled or not self.interactive:
return
# Scrolling
BaseCamera.viewbox_mouse_event(self, event)
if event.type == 'mouse_wheel':
center = self._scene_transform.imap(event.pos)
self.zoom((1 + self.zoom_factor) ** (-event.delta[1] * 30), center)
event.handled = True
elif event.type == 'mouse_move':
if event.press_event is None:
return
modifiers = event.mouse_event.modifiers
p1 = event.mouse_event.press_event.pos
p2 = event.mouse_event.pos
if 1 in event.buttons and not modifiers:
# Translate
p1 = np.array(event.last_event.pos)[:2]
p2 = np.array(event.pos)[:2]
p1s = self._transform.imap(p1)
p2s = self._transform.imap(p2)
self.pan(p1s-p2s)
event.handled = True
elif 2 in event.buttons and not modifiers:
# Zoom
p1c = np.array(event.last_event.pos)[:2]
p2c = np.array(event.pos)[:2]
scale = ((1 + self.zoom_factor) **
((p1c-p2c) * np.array([1, -1])))
center = self._transform.imap(event.press_event.pos[:2])
self.zoom(scale, center)
event.handled = True
else:
event.handled = False
elif event.type == 'mouse_press':
# accept the event if it is button 1 or 2.
# This is required in order to receive future events
event.handled = event.button in [1, 2]
else:
event.handled = False
|
The SubScene received a mouse event; update transform
accordingly.
Parameters
----------
event : instance of Event
The event.
|
entailment
|
def set_data(self, vol, clim=None):
""" Set the volume data.
Parameters
----------
vol : ndarray
The 3D volume.
clim : tuple | None
Colormap limits to use. None will use the min and max values.
"""
# Check volume
if not isinstance(vol, np.ndarray):
raise ValueError('Volume visual needs a numpy array.')
if not ((vol.ndim == 3) or (vol.ndim == 4 and vol.shape[-1] <= 4)):
raise ValueError('Volume visual needs a 3D image.')
# Handle clim
if clim is not None:
clim = np.array(clim, float)
if not (clim.ndim == 1 and clim.size == 2):
raise ValueError('clim must be a 2-element array-like')
self._clim = tuple(clim)
if self._clim is None:
self._clim = vol.min(), vol.max()
# Apply clim
vol = np.array(vol, dtype='float32', copy=False)
if self._clim[1] == self._clim[0]:
if self._clim[0] != 0.:
vol *= 1.0 / self._clim[0]
else:
vol -= self._clim[0]
vol /= self._clim[1] - self._clim[0]
# Apply to texture
self._tex.set_data(vol) # will be efficient if vol is same shape
self.shared_program['u_shape'] = (vol.shape[2], vol.shape[1],
vol.shape[0])
shape = vol.shape[:3]
if self._vol_shape != shape:
self._vol_shape = shape
self._need_vertex_update = True
self._vol_shape = shape
# Get some stats
self._kb_for_texture = np.prod(self._vol_shape) / 1024
|
Set the volume data.
Parameters
----------
vol : ndarray
The 3D volume.
clim : tuple | None
Colormap limits to use. None will use the min and max values.
|
entailment
|
def _create_vertex_data(self):
""" Create and set positions and texture coords from the given shape
We have six faces with 1 quad (2 triangles) each, resulting in
6*2*3 = 36 vertices in total.
"""
shape = self._vol_shape
# Get corner coordinates. The -0.5 offset is to center
# pixels/voxels. This works correctly for anisotropic data.
x0, x1 = -0.5, shape[2] - 0.5
y0, y1 = -0.5, shape[1] - 0.5
z0, z1 = -0.5, shape[0] - 0.5
pos = np.array([
[x0, y0, z0],
[x1, y0, z0],
[x0, y1, z0],
[x1, y1, z0],
[x0, y0, z1],
[x1, y0, z1],
[x0, y1, z1],
[x1, y1, z1],
], dtype=np.float32)
"""
6-------7
/| /|
4-------5 |
| | | |
| 2-----|-3
|/ |/
0-------1
"""
# Order is chosen such that normals face outward; front faces will be
# culled.
indices = np.array([2, 6, 0, 4, 5, 6, 7, 2, 3, 0, 1, 5, 3, 7],
dtype=np.uint32)
# Apply
self._vertices.set_data(pos)
self._index_buffer.set_data(indices)
|
Create and set positions and texture coords from the given shape
We have six faces with 1 quad (2 triangles) each, resulting in
6*2*3 = 36 vertices in total.
|
entailment
|
def set_data(self, pos=None, color=None):
"""Set the data
Parameters
----------
pos : list, tuple or numpy array
Bounds of the region along the axis. len(pos) must be >=2.
color : list, tuple, or array
The color to use when drawing the line. It must have a shape of
(1, 4) for a single color region or (len(pos), 4) for a multicolor
region.
"""
new_pos = self._pos
new_color = self._color
if pos is not None:
num_elements = len(pos)
pos = np.array(pos, dtype=np.float32)
if pos.ndim != 1:
raise ValueError('Expected 1D array')
vertex = np.empty((num_elements * 2, 2), dtype=np.float32)
if self._is_vertical:
vertex[:, 0] = np.repeat(pos, 2)
vertex[:, 1] = np.tile([-1, 1], num_elements)
else:
vertex[:, 1] = np.repeat(pos, 2)
vertex[:, 0] = np.tile([1, -1], num_elements)
new_pos = vertex
self._changed['pos'] = True
if color is not None:
color = np.array(color, dtype=np.float32)
num_elements = new_pos.shape[0] / 2
if color.ndim == 2:
if color.shape[0] != num_elements:
raise ValueError('Expected a color for each pos')
if color.shape[1] != 4:
raise ValueError('Each color must be a RGBA array')
color = np.repeat(color, 2, axis=0).astype(np.float32)
elif color.ndim == 1:
if color.shape[0] != 4:
raise ValueError('Each color must be a RGBA array')
color = np.repeat([color], new_pos.shape[0], axis=0)
color = color.astype(np.float32)
else:
raise ValueError('Expected a numpy array of shape '
'(%d, 4) or (1, 4)' % num_elements)
new_color = color
self._changed['color'] = True
# Ensure pos and color have the same size
if new_pos.shape[0] != new_color.shape[0]:
raise ValueError('pos and color does must have the same size')
self._color = new_color
self._pos = new_pos
|
Set the data
Parameters
----------
pos : list, tuple or numpy array
Bounds of the region along the axis. len(pos) must be >=2.
color : list, tuple, or array
The color to use when drawing the line. It must have a shape of
(1, 4) for a single color region or (len(pos), 4) for a multicolor
region.
|
entailment
|
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._changed['pos']:
self.pos_buf.set_data(self._pos)
self._changed['pos'] = False
if self._changed['color']:
self.color_buf.set_data(self._color)
self._program.vert['color'] = self.color_buf
self._changed['color'] = False
return True
|
This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
|
entailment
|
def refresh_cache(self, cat_id):
'''
Repopulate cache
'''
self.cache[cat_id] = most_recent_25_posts_by_category(cat_id)
self.last_refresh[cat_id] = datetime.now()
print ('Cache refresh at...', str(self.last_refresh[cat_id]))
|
Repopulate cache
|
entailment
|
def _merge_intervals(self, min_depth):
"""
Merge overlapping intervals.
This method is called only once in the constructor.
"""
def add_interval(ret, start, stop):
if min_depth is not None:
shift = 2 * (29 - min_depth)
mask = (int(1) << shift) - 1
if stop - start < mask:
ret.append((start, stop))
else:
ofs = start & mask
st = start
if ofs > 0:
st = (start - ofs) + (mask + 1)
ret.append((start, st))
while st + mask + 1 < stop:
ret.append((st, st + mask + 1))
st = st + mask + 1
ret.append((st, stop))
else:
ret.append((start, stop))
ret = []
start = stop = None
# Use numpy sort method
self._intervals.sort(axis=0)
for itv in self._intervals:
if start is None:
start, stop = itv
continue
# gap between intervals
if itv[0] > stop:
add_interval(ret, start, stop)
start, stop = itv
else:
# merge intervals
if itv[1] > stop:
stop = itv[1]
if start is not None and stop is not None:
add_interval(ret, start, stop)
self._intervals = np.asarray(ret)
|
Merge overlapping intervals.
This method is called only once in the constructor.
|
entailment
|
def union(self, another_is):
"""
Return the union between self and ``another_is``.
Parameters
----------
another_is : `IntervalSet`
an IntervalSet object.
Returns
-------
interval : `IntervalSet`
the union of self with ``another_is``.
"""
result = IntervalSet()
if another_is.empty():
result._intervals = self._intervals
elif self.empty():
result._intervals = another_is._intervals
else:
# res has no overlapping intervals
result._intervals = IntervalSet.merge(self._intervals,
another_is._intervals,
lambda in_a, in_b: in_a or in_b)
return result
|
Return the union between self and ``another_is``.
Parameters
----------
another_is : `IntervalSet`
an IntervalSet object.
Returns
-------
interval : `IntervalSet`
the union of self with ``another_is``.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.