sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def _assign_uid(self, sid):
"""
Purpose: Assign a uid to the current object based on the sid passed. Pass the current uid to children of
current object
"""
self._uid = ru.generate_id(
'pipeline.%(item_counter)04d', ru.ID_CUSTOM, namespace=sid)
for stage in self._stages:
stage._assign_uid(sid)
self._pass_uid()
|
Purpose: Assign a uid to the current object based on the sid passed. Pass the current uid to children of
current object
|
entailment
|
def _pass_uid(self):
"""
Purpose: Pass current Pipeline's uid to all Stages.
:argument: List of Stage objects (optional)
"""
for stage in self._stages:
stage.parent_pipeline['uid'] = self._uid
stage.parent_pipeline['name'] = self._name
stage._pass_uid()
|
Purpose: Pass current Pipeline's uid to all Stages.
:argument: List of Stage objects (optional)
|
entailment
|
def hexdump(src, length=16, sep='.'):
"""
Hexdump function by sbz and 7h3rAm on Github:
(https://gist.github.com/7h3rAm/5603718).
:param src: Source, the string to be shown in hexadecimal format
:param length: Number of hex characters to print in one row
:param sep: Unprintable characters representation
:return:
"""
filtr = ''.join([(len(repr(chr(x))) == 3) and chr(x) or sep for x in range(256)])
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c+length]
hexstring = ' '.join(["%02x" % ord(x) for x in chars])
if len(hexstring) > 24:
hexstring = "%s %s" % (hexstring[:24], hexstring[24:])
printable = ''.join(["%s" % ((ord(x) <= 127 and filtr[ord(x)]) or sep) for x in chars])
lines.append(" %02x: %-*s |%s|\n" % (c, length*3, hexstring, printable))
print(''.join(lines))
|
Hexdump function by sbz and 7h3rAm on Github:
(https://gist.github.com/7h3rAm/5603718).
:param src: Source, the string to be shown in hexadecimal format
:param length: Number of hex characters to print in one row
:param sep: Unprintable characters representation
:return:
|
entailment
|
def xym(source_id, srcdir, dstdir, strict=False, strict_examples=False, debug_level=0, add_line_refs=False,
force_revision_pyang=False, force_revision_regexp=False):
"""
Extracts YANG model from an IETF RFC or draft text file.
This is the main (external) API entry for the module.
:param add_line_refs:
:param source_id: identifier (file name or URL) of a draft or RFC file containing
one or more YANG models
:param srcdir: If source_id points to a file, the optional parameter identifies
the directory where the file is located
:param dstdir: Directory where to put the extracted YANG models
:param strict: Strict syntax enforcement
:param strict_examples: Only output valid examples when in strict mode
:param debug_level: Determines how much debug output is printed to the console
:param force_revision_regexp: Whether it should create a <filename>@<revision>.yang even on error using regexp
:param force_revision_pyang: Whether it should create a <filename>@<revision>.yang even on error using pyang
:return: None
"""
if force_revision_regexp and force_revision_pyang:
print('Can not use both methods for parsing name and revision - using regular expression method only')
force_revision_pyang = False
url = re.compile(r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
rqst_hdrs = {'Accept': 'text/plain', 'Accept-Charset': 'utf-8'}
ye = YangModuleExtractor(source_id, dstdir, strict, strict_examples, add_line_refs, debug_level)
is_url = url.match(source_id)
if is_url:
r = requests.get(source_id, headers=rqst_hdrs)
if r.status_code == 200:
content = r.text.encode('utf8').splitlines(True)
ye.extract_yang_model(content)
else:
print("Failed to fetch file from URL '%s', error '%d'" % (source_id, r.status_code), file=sys.stderr)
else:
try:
with open(os.path.join(srcdir, source_id)) as sf:
ye.extract_yang_model(sf.readlines())
except IOError as ioe:
print(ioe)
return ye.get_extracted_models(force_revision_pyang, force_revision_regexp)
|
Extracts YANG model from an IETF RFC or draft text file.
This is the main (external) API entry for the module.
:param add_line_refs:
:param source_id: identifier (file name or URL) of a draft or RFC file containing
one or more YANG models
:param srcdir: If source_id points to a file, the optional parameter identifies
the directory where the file is located
:param dstdir: Directory where to put the extracted YANG models
:param strict: Strict syntax enforcement
:param strict_examples: Only output valid examples when in strict mode
:param debug_level: Determines how much debug output is printed to the console
:param force_revision_regexp: Whether it should create a <filename>@<revision>.yang even on error using regexp
:param force_revision_pyang: Whether it should create a <filename>@<revision>.yang even on error using pyang
:return: None
|
entailment
|
def warning(self, s):
"""
Prints out a warning message to stderr.
:param s: The warning string to print
:return: None
"""
print(" WARNING: '%s', %s" % (self.src_id, s), file=sys.stderr)
|
Prints out a warning message to stderr.
:param s: The warning string to print
:return: None
|
entailment
|
def error(self, s):
"""
Prints out an error message to stderr.
:param s: The error string to print
:return: None
"""
print(" ERROR: '%s', %s" % (self.src_id, s), file=sys.stderr)
|
Prints out an error message to stderr.
:param s: The error string to print
:return: None
|
entailment
|
def remove_leading_spaces(self, input_model):
"""
This function is a part of the model post-processing pipeline. It
removes leading spaces from an extracted module; depending on the
formatting of the draft/rfc text, may have multiple spaces prepended
to each line. The function also determines the length of the longest
line in the module - this value can be used by later stages of the
model post-processing pipeline.
:param input_model: The YANG model to be processed
:return: YANG model lines with leading spaces removed
"""
leading_spaces = 1024
output_model = []
for mline in input_model:
line = mline[0]
if line.rstrip(' \r\n') != '':
leading_spaces = min(leading_spaces, len(line) - len(line.lstrip(' ')))
output_model.append([line[leading_spaces:], mline[1]])
line_len = len(line[leading_spaces:])
if line_len > self.max_line_len:
self.max_line_len = line_len
else:
output_model.append(['\n', mline[1]])
return output_model
|
This function is a part of the model post-processing pipeline. It
removes leading spaces from an extracted module; depending on the
formatting of the draft/rfc text, may have multiple spaces prepended
to each line. The function also determines the length of the longest
line in the module - this value can be used by later stages of the
model post-processing pipeline.
:param input_model: The YANG model to be processed
:return: YANG model lines with leading spaces removed
|
entailment
|
def add_line_references(self, input_model):
"""
This function is a part of the model post-processing pipeline. For
each line in the module, it adds a reference to the line number in
the original draft/RFC from where the module line was extracted.
:param input_model: The YANG model to be processed
:return: Modified YANG model, where line numbers from the RFC/Draft
text file are added as comments at the end of each line in
the modified model
"""
output_model = []
for ln in input_model:
line_len = len(ln[0])
line_ref = ('// %4d' % ln[1]).rjust((self.max_line_len - line_len + 7), ' ')
new_line = '%s %s\n' % (ln[0].rstrip(' \r\n\t\f'), line_ref)
output_model.append([new_line, ln[1]])
return output_model
|
This function is a part of the model post-processing pipeline. For
each line in the module, it adds a reference to the line number in
the original draft/RFC from where the module line was extracted.
:param input_model: The YANG model to be processed
:return: Modified YANG model, where line numbers from the RFC/Draft
text file are added as comments at the end of each line in
the modified model
|
entailment
|
def remove_extra_empty_lines(self, input_model):
"""
Removes superfluous newlines from a YANG model that was extracted
from a draft or RFC text. Newlines are removed whenever 2 or more
consecutive empty lines are found in the model. This function is a
part of the model post-processing pipeline.
:param input_model: The YANG model to be processed
:return: YANG model with superfluous newlines removed
"""
ncnt = 0
output_model = []
for ln in input_model:
if ln[0].strip(' \n\r') is '':
if ncnt is 0:
output_model.append(ln)
elif self.debug_level > 1:
self.debug_print_strip_msg(ln[1] - 1, ln[0])
ncnt += 1
else:
output_model.append(ln)
ncnt = 0
if self.debug_level > 0:
print(' Removed %d empty lines' % (len(input_model) - len(output_model)))
return output_model
|
Removes superfluous newlines from a YANG model that was extracted
from a draft or RFC text. Newlines are removed whenever 2 or more
consecutive empty lines are found in the model. This function is a
part of the model post-processing pipeline.
:param input_model: The YANG model to be processed
:return: YANG model with superfluous newlines removed
|
entailment
|
def post_process_model(self, input_model, add_line_refs):
"""
This function defines the order and execution logic for actions
that are performed in the model post-processing pipeline.
:param input_model: The YANG model to be processed in the pipeline
:param add_line_refs: Flag that controls whether line number
references should be added to the model.
:return: List of strings that constitute the final YANG model to
be written to its module file.
"""
intermediate_model = self.remove_leading_spaces(input_model)
intermediate_model = self.remove_extra_empty_lines(intermediate_model)
if add_line_refs:
intermediate_model = self.add_line_references(intermediate_model)
return finalize_model(intermediate_model)
|
This function defines the order and execution logic for actions
that are performed in the model post-processing pipeline.
:param input_model: The YANG model to be processed in the pipeline
:param add_line_refs: Flag that controls whether line number
references should be added to the model.
:return: List of strings that constitute the final YANG model to
be written to its module file.
|
entailment
|
def write_model_to_file(self, mdl, fn):
"""
Write a YANG model that was extracted from a source identifier
(URL or source .txt file) to a .yang destination file
:param mdl: YANG model, as a list of lines
:param fn: Name of the YANG model file
:return:
"""
# Write the model to file
output = ''.join(self.post_process_model(mdl, self.add_line_refs))
if fn:
fqfn = self.dst_dir + '/' + fn
if os.path.isfile(fqfn):
self.error("File '%s' exists" % fqfn)
return
with open(fqfn, 'w') as of:
of.write(output)
of.close()
self.extracted_models.append(fn)
else:
self.error("Output file name can not be determined; YANG file not created")
|
Write a YANG model that was extracted from a source identifier
(URL or source .txt file) to a .yang destination file
:param mdl: YANG model, as a list of lines
:param fn: Name of the YANG model file
:return:
|
entailment
|
def debug_print_line(self, i, level, line):
"""
Debug print of the currently parsed line
:param i: The line number of the line that is being currently parsed
:param level: Parser level
:param line: the line that is currently being parsed
:return: None
"""
if self.debug_level == 2:
print("Line %d (%d): '%s'" % (i + 1, level, line.rstrip(' \r\n\t\f')))
if self.debug_level > 2:
print("Line %d (%d):" % (i + 1, level))
hexdump(line)
|
Debug print of the currently parsed line
:param i: The line number of the line that is being currently parsed
:param level: Parser level
:param line: the line that is currently being parsed
:return: None
|
entailment
|
def debug_print_strip_msg(self, i, line):
"""
Debug print indicating that an empty line is being skipped
:param i: The line number of the line that is being currently parsed
:param line: the parsed line
:return: None
"""
if self.debug_level == 2:
print(" Stripping Line %d: '%s'" % (i + 1, line.rstrip(' \r\n\t\f')))
elif self.debug_level > 2:
print(" Stripping Line %d:" % (i + 1))
hexdump(line)
|
Debug print indicating that an empty line is being skipped
:param i: The line number of the line that is being currently parsed
:param line: the parsed line
:return: None
|
entailment
|
def strip_empty_lines_forward(self, content, i):
"""
Skip over empty lines
:param content: parsed text
:param i: current parsed line
:return: number of skipped lined
"""
while i < len(content):
line = content[i].strip(' \r\n\t\f')
if line != '':
break
self.debug_print_strip_msg(i, content[i])
i += 1 # Strip an empty line
return i
|
Skip over empty lines
:param content: parsed text
:param i: current parsed line
:return: number of skipped lined
|
entailment
|
def strip_empty_lines_backward(self, model, max_lines_to_strip):
"""
Strips empty lines preceding the line that is currently being parsed. This
fucntion is called when the parser encounters a Footer.
:param model: lines that were added to the model up to this point
:param line_num: the number of teh line being parsed
:param max_lines_to_strip: max number of lines to strip from the model
:return: None
"""
for l in range(0, max_lines_to_strip):
if model[-1][0].strip(' \r\n\t\f') != '':
return
self.debug_print_strip_msg(model[-1][1] - 1, model[-1][0])
model.pop()
|
Strips empty lines preceding the line that is currently being parsed. This
fucntion is called when the parser encounters a Footer.
:param model: lines that were added to the model up to this point
:param line_num: the number of teh line being parsed
:param max_lines_to_strip: max number of lines to strip from the model
:return: None
|
entailment
|
def extract_yang_model(self, content):
"""
Extracts one or more YANG models from an RFC or draft text string in
which the models are specified. The function skips over page
formatting (Page Headers and Footers) and performs basic YANG module
syntax checking. In strict mode, the function also enforces the
<CODE BEGINS> / <CODE ENDS> tags - a model is not extracted unless
the tags are present.
:return: None
"""
model = []
output_file = None
in_model = False
example_match = False
i = 0
level = 0
quotes = 0
while i < len(content):
line = content[i]
# Try to match '<CODE ENDS>'
if self.CODE_ENDS_TAG.match(line):
if in_model is False:
self.warning("Line %d: misplaced <CODE ENDS>" % i)
in_model = False
if "\"" in line:
if line.count("\"") % 2 == 0:
quotes = 0
else:
if quotes == 1:
quotes = 0
else:
quotes = 1
# Try to match '(sub)module <module_name> {'
match = self.MODULE_STATEMENT.match(line)
if match:
# We're already parsing a module
if quotes == 0:
if level > 0:
self.error("Line %d - 'module' statement within another module" % i)
return
# Check if we should enforce <CODE BEGINS> / <CODE ENDS>
# if we do enforce, we ignore models not enclosed in <CODE BEGINS> / <CODE ENDS>
if match.groups()[1] or match.groups()[4]:
self.warning('Line %d - Module name should not be enclosed in quotes' % i)
# do the module name checking, etc.
example_match = self.EXAMPLE_TAG.match(match.groups()[2])
if in_model is True:
if example_match:
self.error("Line %d - YANG module '%s' with <CODE BEGINS> and starting with 'example-'" %
(i, match.groups()[2]))
else:
if not example_match:
self.error("Line %d - YANG module '%s' with no <CODE BEGINS> and not starting with 'example-'" %
(i, match.groups()[2]))
# now decide if we're allowed to set the level
# (i.e. signal that we're in a module) to 1 and if
# we're allowed to output the module at all with the
# strict examples flag
# if self.strict is True:
# if in_model is True:
# level = 1
# else:
# level = 1
# always set the level to 1; we decide whether or not
# to output at the end
if quotes == 0:
level = 1
if not output_file and level == 1 and quotes == 0:
print("\nExtracting '%s'" % match.groups()[2])
output_file = '%s.yang' % match.groups()[2].strip('"\'')
if self.debug_level > 0:
print(' Getting YANG file name from module name: %s' % output_file)
if level > 0:
self.debug_print_line(i, level, content[i])
# Try to match the Footer ('[Page <page_num>]')
# If match found, skip over page headers and footers
if self.PAGE_TAG.match(line):
self.strip_empty_lines_backward(model, 3)
self.debug_print_strip_msg(i, content[i])
i += 1 # Strip the
# Strip empty lines between the Footer and the next page Header
i = self.strip_empty_lines_forward(content, i)
if i < len(content):
self.debug_print_strip_msg(i, content[i])
i += 1 # Strip the next page Header
else:
self.error("<End of File> - EOF encountered while parsing the model")
return
# Strip empty lines between the page Header and real content on the page
i = self.strip_empty_lines_forward(content, i) - 1
if i >= len(content):
self.error("<End of File> - EOF encountered while parsing the model")
return
else:
model.append([line, i + 1])
counter = Counter(line)
if quotes == 0:
if "\"" in line and "}" in line:
if line.index("}") > line.rindex("\"") or line.index("}") < line.index("\""):
level += (counter['{'] - counter['}'])
else:
level += (counter['{'] - counter['}'])
if level == 1:
if self.strict:
if self.strict_examples:
if example_match and not in_model:
self.write_model_to_file(model, output_file)
elif in_model:
self.write_model_to_file(model, output_file)
else:
self.write_model_to_file(model, output_file)
self.max_line_len = 0
model = []
output_file = None
level = 0
# Try to match '<CODE BEGINS>'
match = self.CODE_BEGINS_TAG.match(line)
if match:
# Found the beginning of the YANG module code section; make sure we're not parsing a model already
if level > 0:
self.error("Line %d - <CODE BEGINS> within a model" % i)
return
if in_model is True:
self.error("Line %d - Misplaced <CODE BEGINS> or missing <CODE ENDS>" % i)
in_model = True
mg = match.groups()
# Get the YANG module's file name
if mg[2]:
print("\nExtracting '%s'" % match.groups()[2])
output_file = mg[2].strip()
else:
if mg[0] and mg[1] is None:
self.error('Line %d - Missing file name in <CODE BEGINS>' % i)
else:
self.error("Line %d - YANG file not specified in <CODE BEGINS>" % i)
i += 1
if level > 0:
self.error("<End of File> - EOF encountered while parsing the model")
return
if in_model is True:
self.error("Line %d - Missing <CODE ENDS>" % i)
|
Extracts one or more YANG models from an RFC or draft text string in
which the models are specified. The function skips over page
formatting (Page Headers and Footers) and performs basic YANG module
syntax checking. In strict mode, the function also enforces the
<CODE BEGINS> / <CODE ENDS> tags - a model is not extracted unless
the tags are present.
:return: None
|
entailment
|
def auto_retry(fun):
"""Decorator for retrying method calls, based on instance parameters."""
@functools.wraps(fun)
def decorated(instance, *args, **kwargs):
"""Wrapper around a decorated function."""
cfg = instance._retry_config
remaining_tries = cfg.retry_attempts
current_wait = cfg.retry_wait
retry_backoff = cfg.retry_backoff
last_error = None
while remaining_tries >= 0:
try:
return fun(instance, *args, **kwargs)
except socket.error as e:
last_error = e
instance._retry_logger.warning('Connection failed: %s', e)
remaining_tries -= 1
if remaining_tries == 0:
# Last attempt
break
# Wait a bit
time.sleep(current_wait)
current_wait *= retry_backoff
# All attempts failed, let's raise the last error.
raise last_error
return decorated
|
Decorator for retrying method calls, based on instance parameters.
|
entailment
|
def extract_pattern(fmt):
"""Extracts used strings from a %(foo)s pattern."""
class FakeDict(object):
def __init__(self):
self.seen_keys = set()
def __getitem__(self, key):
self.seen_keys.add(key)
return ''
def keys(self):
return self.seen_keys
fake = FakeDict()
try:
fmt % fake
except TypeError:
# Formatting error
pass
return set(fake.keys())
|
Extracts used strings from a %(foo)s pattern.
|
entailment
|
def iso_mesh_line(vertices, tris, vertex_data, levels):
"""Generate an isocurve from vertex data in a surface mesh.
Parameters
----------
vertices : ndarray, shape (Nv, 3)
Vertex coordinates.
tris : ndarray, shape (Nf, 3)
Indices of triangular element into the vertices array.
vertex_data : ndarray, shape (Nv,)
data at vertex.
levels : ndarray, shape (Nl,)
Levels at which to generate an isocurve
Returns
-------
lines : ndarray, shape (Nvout, 3)
Vertex coordinates for lines points
connects : ndarray, shape (Ne, 2)
Indices of line element into the vertex array.
vertex_level: ndarray, shape (Nvout,)
level for vertex in lines
Notes
-----
Uses a marching squares algorithm to generate the isolines.
"""
lines = None
connects = None
vertex_level = None
level_index = None
if not all([isinstance(x, np.ndarray) for x in (vertices, tris,
vertex_data, levels)]):
raise ValueError('all inputs must be numpy arrays')
if vertices.shape[1] <= 3:
verts = vertices
elif vertices.shape[1] == 4:
verts = vertices[:, :-1]
else:
verts = None
if (verts is not None and tris.shape[1] == 3 and
vertex_data.shape[0] == verts.shape[0]):
edges = np.vstack((tris.reshape((-1)),
np.roll(tris, -1, axis=1).reshape((-1)))).T
edge_datas = vertex_data[edges]
edge_coors = verts[edges].reshape(tris.shape[0]*3, 2, 3)
for lev in levels:
# index for select edges with vertices have only False - True
# or True - False at extremity
index = (edge_datas >= lev)
index = index[:, 0] ^ index[:, 1] # xor calculation
# Selectect edge
edge_datas_Ok = edge_datas[index, :]
xyz = edge_coors[index]
# Linear interpolation
ratio = np.array([(lev - edge_datas_Ok[:, 0]) /
(edge_datas_Ok[:, 1] - edge_datas_Ok[:, 0])])
point = xyz[:, 0, :] + ratio.T * (xyz[:, 1, :] - xyz[:, 0, :])
nbr = point.shape[0]//2
if connects is not None:
connect = np.arange(0, nbr*2).reshape((nbr, 2)) + \
len(lines)
connects = np.append(connects, connect, axis=0)
lines = np.append(lines, point, axis=0)
vertex_level = np.append(vertex_level,
np.zeros(len(point)) +
lev)
level_index = np.append(level_index, np.array(len(point)))
else:
lines = point
connects = np.arange(0, nbr*2).reshape((nbr, 2))
vertex_level = np.zeros(len(point)) + lev
level_index = np.array(len(point))
vertex_level = vertex_level.reshape((vertex_level.size, 1))
return lines, connects, vertex_level, level_index
|
Generate an isocurve from vertex data in a surface mesh.
Parameters
----------
vertices : ndarray, shape (Nv, 3)
Vertex coordinates.
tris : ndarray, shape (Nf, 3)
Indices of triangular element into the vertices array.
vertex_data : ndarray, shape (Nv,)
data at vertex.
levels : ndarray, shape (Nl,)
Levels at which to generate an isocurve
Returns
-------
lines : ndarray, shape (Nvout, 3)
Vertex coordinates for lines points
connects : ndarray, shape (Ne, 2)
Indices of line element into the vertex array.
vertex_level: ndarray, shape (Nvout,)
level for vertex in lines
Notes
-----
Uses a marching squares algorithm to generate the isolines.
|
entailment
|
def set_data(self, vertices=None, tris=None, data=None):
"""Set the data
Parameters
----------
vertices : ndarray, shape (Nv, 3) | None
Vertex coordinates.
tris : ndarray, shape (Nf, 3) | None
Indices into the vertex array.
data : ndarray, shape (Nv,) | None
scalar at vertices
"""
# modifier pour tenier compte des None self._recompute = True
if data is not None:
self._data = data
self._need_recompute = True
if vertices is not None:
self._vertices = vertices
self._need_recompute = True
if tris is not None:
self._tris = tris
self._need_recompute = True
self.update()
|
Set the data
Parameters
----------
vertices : ndarray, shape (Nv, 3) | None
Vertex coordinates.
tris : ndarray, shape (Nf, 3) | None
Indices into the vertex array.
data : ndarray, shape (Nv,) | None
scalar at vertices
|
entailment
|
def set_color(self, color):
"""Set the color
Parameters
----------
color : instance of Color
The color to use.
"""
if color is not None:
self._color_lev = color
self._need_color_update = True
self.update()
|
Set the color
Parameters
----------
color : instance of Color
The color to use.
|
entailment
|
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding level
color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) + colors[i])
self._cl = np.vstack(level_color)
|
compute LineVisual color from level index and corresponding level
color
|
entailment
|
def remove(self):
"""
Remove the layer artist for good
"""
self._multivol.deallocate(self.id)
ARRAY_CACHE.pop(self.id, None)
PIXEL_CACHE.pop(self.id, None)
|
Remove the layer artist for good
|
entailment
|
def _inject():
""" Inject functions and constants from PyOpenGL but leave out the
names that are deprecated or that we provide in our API.
"""
# Get namespaces
NS = globals()
GLNS = _GL.__dict__
# Get names that we use in our API
used_names = []
used_names.extend([names[0] for names in _pyopengl2._functions_to_import])
used_names.extend([name for name in _pyopengl2._used_functions])
NS['_used_names'] = used_names
#
used_constants = set(_constants.__dict__)
# Count
injected_constants = 0
injected_functions = 0
for name in dir(_GL):
if name.startswith('GL_'):
# todo: find list of deprecated constants
if name not in used_constants:
NS[name] = GLNS[name]
injected_constants += 1
elif name.startswith('gl'):
# Functions
if (name + ',') in _deprecated_functions:
pass # Function is deprecated
elif name in used_names:
pass # Function is in our GL ES 2.0 API
else:
NS[name] = GLNS[name]
injected_functions += 1
|
Inject functions and constants from PyOpenGL but leave out the
names that are deprecated or that we provide in our API.
|
entailment
|
def _find_module(name, path=None):
"""
Alternative to `imp.find_module` that can also search in subpackages.
"""
parts = name.split('.')
for part in parts:
if path is not None:
path = [path]
fh, path, descr = imp.find_module(part, path)
if fh is not None and part != parts[-1]:
fh.close()
return fh, path, descr
|
Alternative to `imp.find_module` that can also search in subpackages.
|
entailment
|
def triangulate(vertices):
"""Triangulate a set of vertices
Parameters
----------
vertices : array-like
The vertices.
Returns
-------
vertices : array-like
The vertices.
tringles : array-like
The triangles.
"""
n = len(vertices)
vertices = np.asarray(vertices)
zmean = vertices[:, 2].mean()
vertices_2d = vertices[:, :2]
segments = np.repeat(np.arange(n + 1), 2)[1:-1]
segments[-2:] = n - 1, 0
if _TRIANGLE_AVAILABLE:
vertices_2d, triangles = _triangulate_cpp(vertices_2d, segments)
else:
vertices_2d, triangles = _triangulate_python(vertices_2d, segments)
vertices = np.empty((len(vertices_2d), 3))
vertices[:, :2] = vertices_2d
vertices[:, 2] = zmean
return vertices, triangles
|
Triangulate a set of vertices
Parameters
----------
vertices : array-like
The vertices.
Returns
-------
vertices : array-like
The vertices.
tringles : array-like
The triangles.
|
entailment
|
def triangulate(self):
"""Do the triangulation
"""
self._initialize()
pts = self.pts
front = self._front
## Begin sweep (sec. 3.4)
for i in range(3, pts.shape[0]):
pi = pts[i]
#debug("========== New point %d: %s ==========" % (i, pi))
# First, triangulate from front to new point
# This applies to both "point events" (3.4.1)
# and "edge events" (3.4.2).
# get index along front that intersects pts[i]
l = 0
while pts[front[l+1], 0] <= pi[0]:
l += 1
pl = pts[front[l]]
# "(i) middle case"
if pi[0] > pl[0]:
#debug(" mid case")
# Add a single triangle connecting pi,pl,pr
self._add_tri(front[l], front[l+1], i)
front.insert(l+1, i)
# "(ii) left case"
else:
#debug(" left case")
# Add triangles connecting pi,pl,ps and pi,pl,pr
self._add_tri(front[l], front[l+1], i)
self._add_tri(front[l-1], front[l], i)
front[l] = i
#debug(front)
# Continue adding triangles to smooth out front
# (heuristics shown in figs. 9, 10)
#debug("Smoothing front...")
for direction in -1, 1:
while True:
# Find point connected to pi
ind0 = front.index(i)
ind1 = ind0 + direction
ind2 = ind1 + direction
if ind2 < 0 or ind2 >= len(front):
break
# measure angle made with front
p1 = pts[front[ind1]]
p2 = pts[front[ind2]]
err = np.geterr()
np.seterr(invalid='ignore')
try:
angle = np.arccos(self._cosine(pi, p1, p2))
finally:
np.seterr(**err)
# if angle is < pi/2, make new triangle
#debug("Smooth angle:", pi, p1, p2, angle)
if angle > np.pi/2. or np.isnan(angle):
break
assert (i != front[ind1] and
front[ind1] != front[ind2] and
front[ind2] != i)
self._add_tri(i, front[ind1], front[ind2],
source='smooth1')
front.pop(ind1)
#debug("Finished smoothing front.")
# "edge event" (sec. 3.4.2)
# remove any triangles cut by completed edges and re-fill
# the holes.
if i in self._tops:
for j in self._bottoms[self._tops == i]:
# Make sure edge (j, i) is present in mesh
# because edge event may have created a new front list
self._edge_event(i, j)
front = self._front
self._finalize()
self.tris = np.array(list(self.tris.keys()), dtype=int)
|
Do the triangulation
|
entailment
|
def _edge_event(self, i, j):
"""
Force edge (i, j) to be present in mesh.
This works by removing intersected triangles and filling holes up to
the cutting edge.
"""
front_index = self._front.index(i)
#debug(" == edge event ==")
front = self._front
# First just see whether this edge is already present
# (this is not in the published algorithm)
if (i, j) in self._edges_lookup or (j, i) in self._edges_lookup:
#debug(" already added.")
return
#debug(" Edge (%d,%d) not added yet. Do edge event. (%s - %s)" %
# (i, j, pts[i], pts[j]))
# traverse in two different modes:
# 1. If cutting edge is below front, traverse through triangles. These
# must be removed and the resulting hole re-filled. (fig. 12)
# 2. If cutting edge is above the front, then follow the front until
# crossing under again. (fig. 13)
# We must be able to switch back and forth between these
# modes (fig. 14)
# Collect points that draw the open polygons on either side of the
# cutting edge. Note that our use of 'upper' and 'lower' is not strict;
# in some cases the two may be swapped.
upper_polygon = [i]
lower_polygon = [i]
# Keep track of which section of the front must be replaced
# and with what it should be replaced
front_holes = [] # contains indexes for sections of front to remove
next_tri = None # next triangle to cut (already set if in mode 1)
last_edge = None # or last triangle edge crossed (if in mode 1)
# Which direction to traverse front
front_dir = 1 if self.pts[j][0] > self.pts[i][0] else -1
# Initialize search state
if self._edge_below_front((i, j), front_index):
mode = 1 # follow triangles
tri = self._find_cut_triangle((i, j))
last_edge = self._edge_opposite_point(tri, i)
next_tri = self._adjacent_tri(last_edge, i)
assert next_tri is not None
self._remove_tri(*tri)
# todo: does this work? can we count on last_edge to be clockwise
# around point i?
lower_polygon.append(last_edge[1])
upper_polygon.append(last_edge[0])
else:
mode = 2 # follow front
# Loop until we reach point j
while True:
#debug(" == edge_event loop: mode %d ==" % mode)
#debug(" front_holes:", front_holes, front)
#debug(" front_index:", front_index)
#debug(" next_tri:", next_tri)
#debug(" last_edge:", last_edge)
#debug(" upper_polygon:", upper_polygon)
#debug(" lower_polygon:", lower_polygon)
#debug(" =====")
if mode == 1:
# crossing from one triangle into another
if j in next_tri:
#debug(" -> hit endpoint!")
# reached endpoint!
# update front / polygons
upper_polygon.append(j)
lower_polygon.append(j)
#debug(" Appended to upper_polygon:", upper_polygon)
#debug(" Appended to lower_polygon:", lower_polygon)
self._remove_tri(*next_tri)
break
else:
# next triangle does not contain the end point; we will
# cut one of the two far edges.
tri_edges = self._edges_in_tri_except(next_tri, last_edge)
# select the edge that is cut
last_edge = self._intersected_edge(tri_edges, (i, j))
#debug(" set last_edge to intersected edge:", last_edge)
last_tri = next_tri
next_tri = self._adjacent_tri(last_edge, last_tri)
#debug(" set next_tri:", next_tri)
self._remove_tri(*last_tri)
# Crossing an edge adds one point to one of the polygons
if lower_polygon[-1] == last_edge[0]:
upper_polygon.append(last_edge[1])
#debug(" Appended to upper_polygon:", upper_polygon)
elif lower_polygon[-1] == last_edge[1]:
upper_polygon.append(last_edge[0])
#debug(" Appended to upper_polygon:", upper_polygon)
elif upper_polygon[-1] == last_edge[0]:
lower_polygon.append(last_edge[1])
#debug(" Appended to lower_polygon:", lower_polygon)
elif upper_polygon[-1] == last_edge[1]:
lower_polygon.append(last_edge[0])
#debug(" Appended to lower_polygon:", lower_polygon)
else:
raise RuntimeError("Something went wrong..")
# If we crossed the front, go to mode 2
x = self._edge_in_front(last_edge)
if x >= 0: # crossing over front
#debug(" -> crossed over front, prepare for mode 2")
mode = 2
next_tri = None
#debug(" set next_tri: None")
# where did we cross the front?
# nearest to new point
front_index = x + (1 if front_dir == -1 else 0)
#debug(" set front_index:", front_index)
# Select the correct polygon to be lower_polygon
# (because mode 2 requires this).
# We know that last_edge is in the front, and
# front[front_index] is the point _above_ the front.
# So if this point is currently the last element in
# lower_polygon, then the polys must be swapped.
if lower_polygon[-1] == front[front_index]:
tmp = lower_polygon, upper_polygon
upper_polygon, lower_polygon = tmp
#debug(' Swap upper/lower polygons')
else:
assert upper_polygon[-1] == front[front_index]
else:
assert next_tri is not None
else: # mode == 2
# At each iteration, we require:
# * front_index is the starting index of the edge _preceding_
# the edge that will be handled in this iteration
# * lower_polygon is the polygon to which points should be
# added while traversing the front
front_index += front_dir
#debug(" Increment front_index: %d" % front_index)
next_edge = (front[front_index], front[front_index+front_dir])
#debug(" Set next_edge: %s" % repr(next_edge))
assert front_index >= 0
if front[front_index] == j:
# found endpoint!
#debug(" -> hit endpoint!")
lower_polygon.append(j)
upper_polygon.append(j)
#debug(" Appended to upper_polygon:", upper_polygon)
#debug(" Appended to lower_polygon:", lower_polygon)
break
# Add point to lower_polygon.
# The conditional is because there are cases where the
# point was already added if we just crossed from mode 1.
if lower_polygon[-1] != front[front_index]:
lower_polygon.append(front[front_index])
#debug(" Appended to lower_polygon:", lower_polygon)
front_holes.append(front_index)
#debug(" Append to front_holes:", front_holes)
if self._edges_intersect((i, j), next_edge):
# crossing over front into triangle
#debug(" -> crossed over front, prepare for mode 1")
mode = 1
last_edge = next_edge
#debug(" Set last_edge:", last_edge)
# we are crossing the front, so this edge only has one
# triangle.
next_tri = self._tri_from_edge(last_edge)
#debug(" Set next_tri:", next_tri)
upper_polygon.append(front[front_index+front_dir])
#debug(" Appended to upper_polygon:", upper_polygon)
#else:
#debug(" -> did not cross front..")
#debug("Finished edge_event:")
#debug(" front_holes:", front_holes)
#debug(" upper_polygon:", upper_polygon)
#debug(" lower_polygon:", lower_polygon)
# (iii) triangluate empty areas
#debug("Filling edge_event polygons...")
for polygon in [lower_polygon, upper_polygon]:
dist = self._distances_from_line((i, j), polygon)
#debug("Distances:", dist)
while len(polygon) > 2:
ind = np.argmax(dist)
#debug("Next index: %d" % ind)
self._add_tri(polygon[ind], polygon[ind-1],
polygon[ind+1], legal=False,
source='edge_event')
polygon.pop(ind)
dist.pop(ind)
#debug("Finished filling edge_event polygons.")
# update front by removing points in the holes (places where front
# passes below the cut edge)
front_holes.sort(reverse=True)
for i in front_holes:
front.pop(i)
|
Force edge (i, j) to be present in mesh.
This works by removing intersected triangles and filling holes up to
the cutting edge.
|
entailment
|
def _find_cut_triangle(self, edge):
"""
Return the triangle that has edge[0] as one of its vertices and is
bisected by edge.
Return None if no triangle is found.
"""
edges = [] # opposite edge for each triangle attached to edge[0]
for tri in self.tris:
if edge[0] in tri:
edges.append(self._edge_opposite_point(tri, edge[0]))
for oedge in edges:
o1 = self._orientation(edge, oedge[0])
o2 = self._orientation(edge, oedge[1])
#debug(edge, oedge, o1, o2)
#debug(self.pts[np.array(edge)])
#debug(self.pts[np.array(oedge)])
if o1 != o2:
return (edge[0], oedge[0], oedge[1])
return None
|
Return the triangle that has edge[0] as one of its vertices and is
bisected by edge.
Return None if no triangle is found.
|
entailment
|
def _edge_in_front(self, edge):
""" Return the index where *edge* appears in the current front.
If the edge is not in the front, return -1
"""
e = (list(edge), list(edge)[::-1])
for i in range(len(self._front)-1):
if self._front[i:i+2] in e:
return i
return -1
|
Return the index where *edge* appears in the current front.
If the edge is not in the front, return -1
|
entailment
|
def _edge_opposite_point(self, tri, i):
""" Given a triangle, return the edge that is opposite point i.
Vertexes are returned in the same orientation as in tri.
"""
ind = tri.index(i)
return (tri[(ind+1) % 3], tri[(ind+2) % 3])
|
Given a triangle, return the edge that is opposite point i.
Vertexes are returned in the same orientation as in tri.
|
entailment
|
def _adjacent_tri(self, edge, i):
"""
Given a triangle formed by edge and i, return the triangle that shares
edge. *i* may be either a point or the entire triangle.
"""
if not np.isscalar(i):
i = [x for x in i if x not in edge][0]
try:
pt1 = self._edges_lookup[edge]
pt2 = self._edges_lookup[(edge[1], edge[0])]
except KeyError:
return None
if pt1 == i:
return (edge[1], edge[0], pt2)
elif pt2 == i:
return (edge[1], edge[0], pt1)
else:
raise RuntimeError("Edge %s and point %d do not form a triangle "
"in this mesh." % (edge, i))
|
Given a triangle formed by edge and i, return the triangle that shares
edge. *i* may be either a point or the entire triangle.
|
entailment
|
def _tri_from_edge(self, edge):
"""Return the only tri that contains *edge*. If two tris share this
edge, raise an exception.
"""
edge = tuple(edge)
p1 = self._edges_lookup.get(edge, None)
p2 = self._edges_lookup.get(edge[::-1], None)
if p1 is None:
if p2 is None:
raise RuntimeError("No tris connected to edge %r" % (edge,))
return edge + (p2,)
elif p2 is None:
return edge + (p1,)
else:
raise RuntimeError("Two triangles connected to edge %r" % (edge,))
|
Return the only tri that contains *edge*. If two tris share this
edge, raise an exception.
|
entailment
|
def _edges_in_tri_except(self, tri, edge):
"""Return the edges in *tri*, excluding *edge*.
"""
edges = [(tri[i], tri[(i+1) % 3]) for i in range(3)]
try:
edges.remove(tuple(edge))
except ValueError:
edges.remove(tuple(edge[::-1]))
return edges
|
Return the edges in *tri*, excluding *edge*.
|
entailment
|
def _edge_below_front(self, edge, front_index):
"""Return True if *edge* is below the current front.
One of the points in *edge* must be _on_ the front, at *front_index*.
"""
f0 = self._front[front_index-1]
f1 = self._front[front_index+1]
return (self._orientation(edge, f0) > 0 and
self._orientation(edge, f1) < 0)
|
Return True if *edge* is below the current front.
One of the points in *edge* must be _on_ the front, at *front_index*.
|
entailment
|
def _intersected_edge(self, edges, cut_edge):
""" Given a list of *edges*, return the first that is intersected by
*cut_edge*.
"""
for edge in edges:
if self._edges_intersect(edge, cut_edge):
return edge
|
Given a list of *edges*, return the first that is intersected by
*cut_edge*.
|
entailment
|
def _find_edge_intersections(self):
"""
Return a dictionary containing, for each edge in self.edges, a list
of the positions at which the edge should be split.
"""
edges = self.pts[self.edges]
cuts = {} # { edge: [(intercept, point), ...], ... }
for i in range(edges.shape[0]-1):
# intersection of edge i onto all others
int1 = self._intersect_edge_arrays(edges[i:i+1], edges[i+1:])
# intersection of all edges onto edge i
int2 = self._intersect_edge_arrays(edges[i+1:], edges[i:i+1])
# select for pairs that intersect
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
mask1 = (int1 >= 0) & (int1 <= 1)
mask2 = (int2 >= 0) & (int2 <= 1)
mask3 = mask1 & mask2 # all intersections
finally:
np.seterr(**err)
# compute points of intersection
inds = np.argwhere(mask3)[:, 0]
if len(inds) == 0:
continue
h = int2[inds][:, np.newaxis]
pts = (edges[i, 0][np.newaxis, :] * (1.0 - h) +
edges[i, 1][np.newaxis, :] * h)
# record for all edges the location of cut points
edge_cuts = cuts.setdefault(i, [])
for j, ind in enumerate(inds):
if 0 < int2[ind] < 1:
edge_cuts.append((int2[ind], pts[j]))
if 0 < int1[ind] < 1:
other_cuts = cuts.setdefault(ind+i+1, [])
other_cuts.append((int1[ind], pts[j]))
# sort all cut lists by intercept, remove duplicates
for k, v in cuts.items():
v.sort(key=lambda x: x[0])
for i in range(len(v)-2, -1, -1):
if v[i][0] == v[i+1][0]:
v.pop(i+1)
return cuts
|
Return a dictionary containing, for each edge in self.edges, a list
of the positions at which the edge should be split.
|
entailment
|
def _projection(self, a, b, c):
"""Return projection of (a,b) onto (a,c)
Arguments are point locations, not indexes.
"""
ab = b - a
ac = c - a
return a + ((ab*ac).sum() / (ac*ac).sum()) * ac
|
Return projection of (a,b) onto (a,c)
Arguments are point locations, not indexes.
|
entailment
|
def _edges_intersect(self, edge1, edge2):
"""
Return 1 if edges intersect completely (endpoints excluded)
"""
h12 = self._intersect_edge_arrays(self.pts[np.array(edge1)],
self.pts[np.array(edge2)])
h21 = self._intersect_edge_arrays(self.pts[np.array(edge2)],
self.pts[np.array(edge1)])
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
out = (0 < h12 < 1) and (0 < h21 < 1)
finally:
np.seterr(**err)
return out
|
Return 1 if edges intersect completely (endpoints excluded)
|
entailment
|
def _intersection_matrix(self, lines):
"""
Return a 2D array of intercepts such that
intercepts[i, j] is the intercept of lines[i] onto lines[j].
*lines* must be an array of point locations with shape (N, 2, 2), where
the axes are (lines, points_per_line, xy_per_point).
The intercept is described in intersect_edge_arrays().
"""
return self._intersect_edge_arrays(lines[:, np.newaxis, ...],
lines[np.newaxis, ...])
|
Return a 2D array of intercepts such that
intercepts[i, j] is the intercept of lines[i] onto lines[j].
*lines* must be an array of point locations with shape (N, 2, 2), where
the axes are (lines, points_per_line, xy_per_point).
The intercept is described in intersect_edge_arrays().
|
entailment
|
def _intersect_edge_arrays(self, lines1, lines2):
"""Return the intercepts of all lines defined in *lines1* as they
intersect all lines in *lines2*.
Arguments are of shape (..., 2, 2), where axes are:
0: number of lines
1: two points per line
2: x,y pair per point
Lines are compared elementwise across the arrays (lines1[i] is compared
against lines2[i]). If one of the arrays has N=1, then that line is
compared against all lines in the other array.
Returns an array of shape (N,) where each value indicates the intercept
relative to the defined line segment. A value of 0 indicates
intersection at the first endpoint, and a value of 1 indicates
intersection at the second endpoint. Values between 1 and 0 are on the
segment, whereas values outside 1 and 0 are off of the segment.
"""
# vector for each line in lines1
l1 = lines1[..., 1, :] - lines1[..., 0, :]
# vector for each line in lines2
l2 = lines2[..., 1, :] - lines2[..., 0, :]
# vector between first point of each line
diff = lines1[..., 0, :] - lines2[..., 0, :]
p = l1.copy()[..., ::-1] # vectors perpendicular to l1
p[..., 0] *= -1
f = (l2 * p).sum(axis=-1) # l2 dot p
# tempting, but bad idea!
#f = np.where(f==0, 1, f)
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
h = (diff * p).sum(axis=-1) / f # diff dot p / f
finally:
np.seterr(**err)
return h
|
Return the intercepts of all lines defined in *lines1* as they
intersect all lines in *lines2*.
Arguments are of shape (..., 2, 2), where axes are:
0: number of lines
1: two points per line
2: x,y pair per point
Lines are compared elementwise across the arrays (lines1[i] is compared
against lines2[i]). If one of the arrays has N=1, then that line is
compared against all lines in the other array.
Returns an array of shape (N,) where each value indicates the intercept
relative to the defined line segment. A value of 0 indicates
intersection at the first endpoint, and a value of 1 indicates
intersection at the second endpoint. Values between 1 and 0 are on the
segment, whereas values outside 1 and 0 are off of the segment.
|
entailment
|
def _orientation(self, edge, point):
""" Returns +1 if edge[0]->point is clockwise from edge[0]->edge[1],
-1 if counterclockwise, and 0 if parallel.
"""
v1 = self.pts[point] - self.pts[edge[0]]
v2 = self.pts[edge[1]] - self.pts[edge[0]]
c = np.cross(v1, v2) # positive if v1 is CW from v2
return 1 if c > 0 else (-1 if c < 0 else 0)
|
Returns +1 if edge[0]->point is clockwise from edge[0]->edge[1],
-1 if counterclockwise, and 0 if parallel.
|
entailment
|
def load_ipython_extension(ipython):
""" Entry point of the IPython extension
Parameters
----------
IPython : IPython interpreter
An instance of the IPython interpreter that is handed
over to the extension
"""
import IPython
# don't continue if IPython version is < 3.0
ipy_version = LooseVersion(IPython.__version__)
if ipy_version < LooseVersion("3.0.0"):
ipython.write_err("Your IPython version is older than "
"version 3.0.0, the minimum for Vispy's"
"IPython backend. Please upgrade your IPython"
"version.")
return
_load_webgl_backend(ipython)
|
Entry point of the IPython extension
Parameters
----------
IPython : IPython interpreter
An instance of the IPython interpreter that is handed
over to the extension
|
entailment
|
def _load_webgl_backend(ipython):
""" Load the webgl backend for the IPython notebook"""
from .. import app
app_instance = app.use_app("ipynb_webgl")
if app_instance.backend_name == "ipynb_webgl":
ipython.write("Vispy IPython module has loaded successfully")
else:
# TODO: Improve this error message
ipython.write_err("Unable to load webgl backend of Vispy")
|
Load the webgl backend for the IPython notebook
|
entailment
|
def draw(self, mode=None):
""" Draw collection """
if self._need_update:
self._update()
program = self._programs[0]
mode = mode or self._mode
if self._indices_list is not None:
program.draw(mode, self._indices_buffer)
else:
program.draw(mode)
|
Draw collection
|
entailment
|
def gaussian_filter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = float(sigma[ax])
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x**2 / (2*s**2))
kshape = [1, ] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) *
np.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax], None, None)
filtered = filtered[sl]
return filtered + baseline
|
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
|
entailment
|
def translate(offset, dtype=None):
"""Translate by an offset (x, y, z) .
Parameters
----------
offset : array-like, shape (3,)
Translation in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the translation.
"""
assert len(offset) == 3
x, y, z = offset
M = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[x, y, z, 1.0]], dtype)
return M
|
Translate by an offset (x, y, z) .
Parameters
----------
offset : array-like, shape (3,)
Translation in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the translation.
|
entailment
|
def scale(s, dtype=None):
"""Non-uniform scaling along the x, y, and z axes
Parameters
----------
s : array-like, shape (3,)
Scaling in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the scaling.
"""
assert len(s) == 3
return np.array(np.diag(np.concatenate([s, (1.,)])), dtype)
|
Non-uniform scaling along the x, y, and z axes
Parameters
----------
s : array-like, shape (3,)
Scaling in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the scaling.
|
entailment
|
def rotate(angle, axis, dtype=None):
"""The 3x3 rotation matrix for rotation about a vector.
Parameters
----------
angle : float
The angle of rotation, in degrees.
axis : ndarray
The x, y, z coordinates of the axis direction vector.
"""
angle = np.radians(angle)
assert len(axis) == 3
x, y, z = axis / np.linalg.norm(axis)
c, s = math.cos(angle), math.sin(angle)
cx, cy, cz = (1 - c) * x, (1 - c) * y, (1 - c) * z
M = np.array([[cx * x + c, cy * x - z * s, cz * x + y * s, .0],
[cx * y + z * s, cy * y + c, cz * y - x * s, 0.],
[cx * z - y * s, cy * z + x * s, cz * z + c, 0.],
[0., 0., 0., 1.]], dtype).T
return M
|
The 3x3 rotation matrix for rotation about a vector.
Parameters
----------
angle : float
The angle of rotation, in degrees.
axis : ndarray
The x, y, z coordinates of the axis direction vector.
|
entailment
|
def perspective(fovy, aspect, znear, zfar):
"""Create perspective projection matrix
Parameters
----------
fovy : float
The field of view along the y axis.
aspect : float
Aspect ratio of the view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : ndarray
Perspective projection matrix (4x4).
"""
assert(znear != zfar)
h = math.tan(fovy / 360.0 * math.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar)
|
Create perspective projection matrix
Parameters
----------
fovy : float
The field of view along the y axis.
aspect : float
Aspect ratio of the view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : ndarray
Perspective projection matrix (4x4).
|
entailment
|
def affine_map(points1, points2):
""" Find a 3D transformation matrix that maps points1 onto points2.
Arguments are specified as arrays of four 3D coordinates, shape (4, 3).
"""
A = np.ones((4, 4))
A[:, :3] = points1
B = np.ones((4, 4))
B[:, :3] = points2
# solve 3 sets of linear equations to determine
# transformation matrix elements
matrix = np.eye(4)
for i in range(3):
# solve Ax = B; x is one row of the desired transformation matrix
matrix[i] = np.linalg.solve(A, B[:, i])
return matrix
|
Find a 3D transformation matrix that maps points1 onto points2.
Arguments are specified as arrays of four 3D coordinates, shape (4, 3).
|
entailment
|
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._new_msg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
|
Add a final message; flush the message list if no parent profiler.
|
entailment
|
def _init():
""" Create global Config object, parse command flags
"""
global config, _data_path, _allowed_config_keys
app_dir = _get_vispy_app_dir()
if app_dir is not None:
_data_path = op.join(app_dir, 'data')
_test_data_path = op.join(app_dir, 'test_data')
else:
_data_path = _test_data_path = None
# All allowed config keys and the types they may have
_allowed_config_keys = {
'data_path': string_types,
'default_backend': string_types,
'gl_backend': string_types,
'gl_debug': (bool,),
'glir_file': string_types+file_types,
'include_path': list,
'logging_level': string_types,
'qt_lib': string_types,
'dpi': (int, type(None)),
'profile': string_types + (type(None),),
'audit_tests': (bool,),
'test_data_path': string_types + (type(None),),
}
# Default values for all config options
default_config_options = {
'data_path': _data_path,
'default_backend': '',
'gl_backend': 'gl2',
'gl_debug': False,
'glir_file': '',
'include_path': [],
'logging_level': 'info',
'qt_lib': 'any',
'dpi': None,
'profile': None,
'audit_tests': False,
'test_data_path': _test_data_path,
}
config = Config(**default_config_options)
try:
config.update(**_load_config())
except Exception as err:
raise Exception('Error while reading vispy config file "%s":\n %s' %
(_get_config_fname(), err.message))
set_log_level(config['logging_level'])
_parse_command_line_arguments()
|
Create global Config object, parse command flags
|
entailment
|
def _parse_command_line_arguments():
""" Transform vispy specific command line args to vispy config.
Put into a function so that any variables dont leak in the vispy namespace.
"""
global config
# Get command line args for vispy
argnames = ['vispy-backend=', 'vispy-gl-debug', 'vispy-glir-file=',
'vispy-log=', 'vispy-help', 'vispy-profile=', 'vispy-cprofile',
'vispy-dpi=', 'vispy-audit-tests']
try:
opts, args = getopt.getopt(sys.argv[1:], '', argnames)
except getopt.GetoptError:
opts = []
# Use them to set the config values
for o, a in opts:
if o.startswith('--vispy'):
if o == '--vispy-backend':
config['default_backend'] = a
logger.info('vispy backend: %s', a)
elif o == '--vispy-gl-debug':
config['gl_debug'] = True
elif o == '--vispy-glir-file':
config['glir_file'] = a
elif o == '--vispy-log':
if ',' in a:
verbose, match = a.split(',')
else:
verbose = a
match = None
config['logging_level'] = a
set_log_level(verbose, match)
elif o == '--vispy-profile':
config['profile'] = a
elif o == '--vispy-cprofile':
_enable_profiling()
elif o == '--vispy-help':
print(VISPY_HELP)
elif o == '--vispy-dpi':
config['dpi'] = int(a)
elif o == '--vispy-audit-tests':
config['audit_tests'] = True
else:
logger.warning("Unsupported vispy flag: %s" % o)
|
Transform vispy specific command line args to vispy config.
Put into a function so that any variables dont leak in the vispy namespace.
|
entailment
|
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data"""
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path
|
Helper to get the default directory for storing vispy data
|
entailment
|
def _get_config_fname():
"""Helper for the vispy config file"""
directory = _get_vispy_app_dir()
if directory is None:
return None
fname = op.join(directory, 'vispy.json')
if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:
fname = op.join(_TempDir(), 'vispy.json')
return fname
|
Helper for the vispy config file
|
entailment
|
def _load_config():
"""Helper to load prefs from ~/.vispy/vispy.json"""
fname = _get_config_fname()
if fname is None or not op.isfile(fname):
return dict()
with open(fname, 'r') as fid:
config = json.load(fid)
return config
|
Helper to load prefs from ~/.vispy/vispy.json
|
entailment
|
def save_config(**kwargs):
"""Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
"""
if kwargs == {}:
kwargs = config._config
current_config = _load_config()
current_config.update(**kwargs)
# write to disk
fname = _get_config_fname()
if fname is None:
raise RuntimeError('config filename could not be determined')
if not op.isdir(op.dirname(fname)):
os.mkdir(op.dirname(fname))
with open(fname, 'w') as fid:
json.dump(current_config, fid, sort_keys=True, indent=0)
|
Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
|
entailment
|
def set_data_dir(directory=None, create=False, save=False):
"""Set vispy data download directory
Parameters
----------
directory : str | None
The directory to use.
create : bool
If True, create directory if it doesn't exist.
save : bool
If True, save the configuration to the vispy config.
"""
if directory is None:
directory = _data_path
if _data_path is None:
raise IOError('default path cannot be determined, please '
'set it manually (directory != None)')
if not op.isdir(directory):
if not create:
raise IOError('directory "%s" does not exist, perhaps try '
'create=True to create it?' % directory)
os.mkdir(directory)
config.update(data_path=directory)
if save:
save_config(data_path=directory)
|
Set vispy data download directory
Parameters
----------
directory : str | None
The directory to use.
create : bool
If True, create directory if it doesn't exist.
save : bool
If True, save the configuration to the vispy config.
|
entailment
|
def _enable_profiling():
""" Start profiling and register callback to print stats when the program
exits.
"""
import cProfile
import atexit
global _profiler
_profiler = cProfile.Profile()
_profiler.enable()
atexit.register(_profile_atexit)
|
Start profiling and register callback to print stats when the program
exits.
|
entailment
|
def sys_info(fname=None, overwrite=False):
"""Get relevant system and debugging information
Parameters
----------
fname : str | None
Filename to dump info to. Use None to simply print.
overwrite : bool
If True, overwrite file (if it exists).
Returns
-------
out : str
The system information as a string.
"""
if fname is not None and op.isfile(fname) and not overwrite:
raise IOError('file exists, use overwrite=True to overwrite')
out = ''
try:
# Nest all imports here to avoid any circular imports
from ..app import use_app, Canvas
from ..app.backends import BACKEND_NAMES
from ..gloo import gl
from ..testing import has_backend
# get default app
with use_log_level('warning'):
app = use_app(call_reuse=False) # suppress messages
out += 'Platform: %s\n' % platform.platform()
out += 'Python: %s\n' % str(sys.version).replace('\n', ' ')
out += 'Backend: %s\n' % app.backend_name
for backend in BACKEND_NAMES:
if backend.startswith('ipynb_'):
continue
with use_log_level('warning', print_msg=False):
which = has_backend(backend, out=['which'])[1]
out += '{0:<9} {1}\n'.format(backend + ':', which)
out += '\n'
# We need an OpenGL context to get GL info
canvas = Canvas('Test', (10, 10), show=False, app=app)
canvas._backend._vispy_set_current()
out += 'GL version: %r\n' % (gl.glGetParameter(gl.GL_VERSION),)
x_ = gl.GL_MAX_TEXTURE_SIZE
out += 'MAX_TEXTURE_SIZE: %r\n' % (gl.glGetParameter(x_),)
out += 'Extensions: %r\n' % (gl.glGetParameter(gl.GL_EXTENSIONS),)
canvas.close()
except Exception: # don't stop printing info
out += '\nInfo-gathering error:\n%s' % traceback.format_exc()
pass
if fname is not None:
with open(fname, 'w') as fid:
fid.write(out)
return out
|
Get relevant system and debugging information
Parameters
----------
fname : str | None
Filename to dump info to. Use None to simply print.
overwrite : bool
If True, overwrite file (if it exists).
Returns
-------
out : str
The system information as a string.
|
entailment
|
def compact(vertices, indices, tolerance=1e-3):
""" Compact vertices and indices within given tolerance """
# Transform vertices into a structured array for np.unique to work
n = len(vertices)
V = np.zeros(n, dtype=[("pos", np.float32, 3)])
V["pos"][:, 0] = vertices[:, 0]
V["pos"][:, 1] = vertices[:, 1]
V["pos"][:, 2] = vertices[:, 2]
epsilon = 1e-3
decimals = int(np.log(epsilon)/np.log(1/10.))
# Round all vertices within given decimals
V_ = np.zeros_like(V)
X = V["pos"][:, 0].round(decimals=decimals)
X[np.where(abs(X) < epsilon)] = 0
V_["pos"][:, 0] = X
Y = V["pos"][:, 1].round(decimals=decimals)
Y[np.where(abs(Y) < epsilon)] = 0
V_["pos"][:, 1] = Y
Z = V["pos"][:, 2].round(decimals=decimals)
Z[np.where(abs(Z) < epsilon)] = 0
V_["pos"][:, 2] = Z
# Find the unique vertices AND the mapping
U, RI = np.unique(V_, return_inverse=True)
# Translate indices from original vertices into the reduced set (U)
indices = indices.ravel()
I_ = indices.copy().ravel()
for i in range(len(indices)):
I_[i] = RI[indices[i]]
I_ = I_.reshape(len(indices)/3, 3)
# Return reduced vertices set, transalted indices and mapping that allows
# to go from U to V
return U.view(np.float32).reshape(len(U), 3), I_, RI
|
Compact vertices and indices within given tolerance
|
entailment
|
def normals(vertices, indices):
"""
Compute normals over a triangulated surface
Parameters
----------
vertices : ndarray (n,3)
triangles vertices
indices : ndarray (p,3)
triangles indices
"""
# Compact similar vertices
vertices, indices, mapping = compact(vertices, indices)
T = vertices[indices]
N = np.cross(T[:, 1] - T[:, 0], T[:, 2]-T[:, 0])
L = np.sqrt(np.sum(N * N, axis=1))
L[L == 0] = 1.0 # prevent divide-by-zero
N /= L[:, np.newaxis]
normals = np.zeros_like(vertices)
normals[indices[:, 0]] += N
normals[indices[:, 1]] += N
normals[indices[:, 2]] += N
L = np.sqrt(np.sum(normals*normals, axis=1))
L[L == 0] = 1.0
normals /= L[:, np.newaxis]
return normals[mapping]
|
Compute normals over a triangulated surface
Parameters
----------
vertices : ndarray (n,3)
triangles vertices
indices : ndarray (p,3)
triangles indices
|
entailment
|
def create_native(self):
""" Create the native widget if not already done so. If the widget
is already created, this function does nothing.
"""
if self._backend is not None:
return
# Make sure that the app is active
assert self._app.native
# Instantiate the backend with the right class
self._app.backend_module.CanvasBackend(self, **self._backend_kwargs)
# self._backend = set by BaseCanvasBackend
self._backend_kwargs = None # Clean up
# Connect to draw event (append to the end)
# Process GLIR commands at each paint event
self.events.draw.connect(self.context.flush_commands, position='last')
if self._autoswap:
self.events.draw.connect((self, 'swap_buffers'),
ref=True, position='last')
|
Create the native widget if not already done so. If the widget
is already created, this function does nothing.
|
entailment
|
def connect(self, fun):
""" Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
"""
# Get and check name
name = fun.__name__
if not name.startswith('on_'):
raise ValueError('When connecting a function based on its name, '
'the name should start with "on_"')
eventname = name[3:]
# Get emitter
try:
emitter = self.events[eventname]
except KeyError:
raise ValueError(
'Event "%s" not available on this canvas.' %
eventname)
# Connect
emitter.connect(fun)
|
Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
|
entailment
|
def size(self):
""" The size of canvas/window """
size = self._backend._vispy_get_size()
return (size[0] // self._px_scale, size[1] // self._px_scale)
|
The size of canvas/window
|
entailment
|
def show(self, visible=True, run=False):
"""Show or hide the canvas
Parameters
----------
visible : bool
Make the canvas visible.
run : bool
Run the backend event loop.
"""
self._backend._vispy_set_visible(visible)
if run:
self.app.run()
|
Show or hide the canvas
Parameters
----------
visible : bool
Make the canvas visible.
run : bool
Run the backend event loop.
|
entailment
|
def close(self):
"""Close the canvas
Notes
-----
This will usually destroy the GL context. For Qt, the context
(and widget) will be destroyed only if the widget is top-level.
To avoid having the widget destroyed (more like standard Qt
behavior), consider making the widget a sub-widget.
"""
if self._backend is not None and not self._closed:
self._closed = True
self.events.close()
self._backend._vispy_close()
forget_canvas(self)
|
Close the canvas
Notes
-----
This will usually destroy the GL context. For Qt, the context
(and widget) will be destroyed only if the widget is top-level.
To avoid having the widget destroyed (more like standard Qt
behavior), consider making the widget a sub-widget.
|
entailment
|
def _update_fps(self, event):
"""Update the fps after every window"""
self._frame_count += 1
diff = time() - self._basetime
if (diff > self._fps_window):
self._fps = self._frame_count / diff
self._basetime = time()
self._frame_count = 0
self._fps_callback(self.fps)
|
Update the fps after every window
|
entailment
|
def measure_fps(self, window=1, callback='%1.1f FPS'):
"""Measure the current FPS
Sets the update window, connects the draw event to update_fps
and sets the callback function.
Parameters
----------
window : float
The time-window (in seconds) to calculate FPS. Default 1.0.
callback : function | str
The function to call with the float FPS value, or the string
to be formatted with the fps value and then printed. The
default is ``'%1.1f FPS'``. If callback evaluates to False, the
FPS measurement is stopped.
"""
# Connect update_fps function to draw
self.events.draw.disconnect(self._update_fps)
if callback:
if isinstance(callback, string_types):
callback_str = callback # because callback gets overwritten
def callback(x):
print(callback_str % x)
self._fps_window = window
self.events.draw.connect(self._update_fps)
self._fps_callback = callback
else:
self._fps_callback = None
|
Measure the current FPS
Sets the update window, connects the draw event to update_fps
and sets the callback function.
Parameters
----------
window : float
The time-window (in seconds) to calculate FPS. Default 1.0.
callback : function | str
The function to call with the float FPS value, or the string
to be formatted with the fps value and then printed. The
default is ``'%1.1f FPS'``. If callback evaluates to False, the
FPS measurement is stopped.
|
entailment
|
def render(self):
""" Render the canvas to an offscreen buffer and return the image
array.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
"""
self.set_current()
size = self.physical_size
fbo = FrameBuffer(color=RenderBuffer(size[::-1]),
depth=RenderBuffer(size[::-1]))
try:
fbo.activate()
self.events.draw()
return fbo.read()
finally:
fbo.deactivate()
|
Render the canvas to an offscreen buffer and return the image
array.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
|
entailment
|
def drag_events(self):
""" Return a list of all mouse events in the current drag operation.
Returns None if there is no current drag operation.
"""
if not self.is_dragging:
return None
event = self
events = []
while True:
# mouse_press events can only be the start of a trail
if event is None or event.type == 'mouse_press':
break
events.append(event)
event = event.last_event
return events[::-1]
|
Return a list of all mouse events in the current drag operation.
Returns None if there is no current drag operation.
|
entailment
|
def trail(self):
""" Return an (N, 2) array of mouse coordinates for every event in the
current mouse drag operation.
Returns None if there is no current drag operation.
"""
events = self.drag_events()
if events is None:
return None
trail = np.empty((len(events), 2), dtype=int)
for i, ev in enumerate(events):
trail[i] = ev.pos
return trail
|
Return an (N, 2) array of mouse coordinates for every event in the
current mouse drag operation.
Returns None if there is no current drag operation.
|
entailment
|
def width_min(self, width_min):
"""Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
"""
if width_min is None:
self._width_limits[0] = 0
return
width_min = float(width_min)
assert(0 <= width_min)
self._width_limits[0] = width_min
self._update_layout()
|
Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
|
entailment
|
def width_max(self, width_max):
"""Set the maximum width of the widget.
Parameters
----------
width_max: None | float
the maximum width of the widget. if None, maximum width
is unbounded
"""
if width_max is None:
self._width_limits[1] = None
return
width_max = float(width_max)
assert(self.width_min <= width_max)
self._width_limits[1] = width_max
self._update_layout()
|
Set the maximum width of the widget.
Parameters
----------
width_max: None | float
the maximum width of the widget. if None, maximum width
is unbounded
|
entailment
|
def height_min(self, height_min):
"""Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
"""
if height_min is None:
self._height_limits[0] = 0
return
height_min = float(height_min)
assert(height_min >= 0)
self._height_limits[0] = height_min
self._update_layout()
|
Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
|
entailment
|
def height_max(self, height_max):
"""Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
"""
if height_max is None:
self._height_limits[1] = None
return
height_max = float(height_max)
assert(0 <= self.height_min <= height_max)
self._height_limits[1] = height_max
self._update_layout()
|
Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
|
entailment
|
def inner_rect(self):
"""The rectangular area inside the margin, border, and padding.
Generally widgets should avoid drawing or placing sub-widgets outside
this rectangle.
"""
m = self.margin + self._border_width + self.padding
if not self.border_color.is_blank:
m += 1
return Rect((m, m), (self.size[0]-2*m, self.size[1]-2*m))
|
The rectangular area inside the margin, border, and padding.
Generally widgets should avoid drawing or placing sub-widgets outside
this rectangle.
|
entailment
|
def _update_clipper(self):
"""Called whenever the clipper for this widget may need to be updated.
"""
if self.clip_children and self._clipper is None:
self._clipper = Clipper()
elif not self.clip_children:
self._clipper = None
if self._clipper is None:
return
self._clipper.rect = self.inner_rect
self._clipper.transform = self.get_transform('framebuffer', 'visual')
|
Called whenever the clipper for this widget may need to be updated.
|
entailment
|
def _update_line(self):
""" Update border line to match new shape """
w = self._border_width
m = self.margin
# border is drawn within the boundaries of the widget:
#
# size = (8, 7) margin=2
# internal rect = (3, 3, 2, 1)
# ........
# ........
# ..BBBB..
# ..B B..
# ..BBBB..
# ........
# ........
#
l = b = m
r = self.size[0] - m
t = self.size[1] - m
pos = np.array([
[l, b], [l+w, b+w],
[r, b], [r-w, b+w],
[r, t], [r-w, t-w],
[l, t], [l+w, t-w],
], dtype=np.float32)
faces = np.array([
[0, 2, 1],
[1, 2, 3],
[2, 4, 3],
[3, 5, 4],
[4, 5, 6],
[5, 7, 6],
[6, 0, 7],
[7, 0, 1],
[5, 3, 1],
[1, 5, 7],
], dtype=np.int32)
start = 8 if self._border_color.is_blank else 0
stop = 8 if self._bgcolor.is_blank else 10
face_colors = None
if self._face_colors is not None:
face_colors = self._face_colors[start:stop]
self._mesh.set_data(vertices=pos, faces=faces[start:stop],
face_colors=face_colors)
# picking mesh covers the entire area
self._picking_mesh.set_data(vertices=pos[::2])
|
Update border line to match new shape
|
entailment
|
def add_widget(self, widget):
"""
Add a Widget as a managed child of this Widget.
The child will be
automatically positioned and sized to fill the entire space inside
this Widget (unless _update_child_widgets is redefined).
Parameters
----------
widget : instance of Widget
The widget to add.
Returns
-------
widget : instance of Widget
The widget.
"""
self._widgets.append(widget)
widget.parent = self
self._update_child_widgets()
return widget
|
Add a Widget as a managed child of this Widget.
The child will be
automatically positioned and sized to fill the entire space inside
this Widget (unless _update_child_widgets is redefined).
Parameters
----------
widget : instance of Widget
The widget to add.
Returns
-------
widget : instance of Widget
The widget.
|
entailment
|
def add_grid(self, *args, **kwargs):
"""
Create a new Grid and add it as a child widget.
All arguments are given to Grid().
"""
from .grid import Grid
grid = Grid(*args, **kwargs)
return self.add_widget(grid)
|
Create a new Grid and add it as a child widget.
All arguments are given to Grid().
|
entailment
|
def add_view(self, *args, **kwargs):
"""
Create a new ViewBox and add it as a child widget.
All arguments are given to ViewBox().
"""
from .viewbox import ViewBox
view = ViewBox(*args, **kwargs)
return self.add_widget(view)
|
Create a new ViewBox and add it as a child widget.
All arguments are given to ViewBox().
|
entailment
|
def remove_widget(self, widget):
"""
Remove a Widget as a managed child of this Widget.
Parameters
----------
widget : instance of Widget
The widget to remove.
"""
self._widgets.remove(widget)
widget.parent = None
self._update_child_widgets()
|
Remove a Widget as a managed child of this Widget.
Parameters
----------
widget : instance of Widget
The widget to remove.
|
entailment
|
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
|
Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
|
entailment
|
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tostring(),
np.ubyte).reshape((value.shape + (4,)))
|
Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
|
entailment
|
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
|
Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
|
entailment
|
def list_fonts():
"""List system fonts
Returns
-------
fonts : list of str
List of system fonts.
"""
vals = _list_fonts()
for font in _vispy_fonts:
vals += [font] if font not in vals else []
vals = sorted(vals, key=lambda s: s.lower())
return vals
|
List system fonts
Returns
-------
fonts : list of str
List of system fonts.
|
entailment
|
def timeout(limit, handler):
"""A decorator ensuring that the decorated function tun time does not
exceeds the argument limit.
:args limit: the time limit
:type limit: int
:args handler: the handler function called when the decorated
function times out.
:type handler: callable
Example:
>>>def timeout_handler(limit, f, *args, **kwargs):
... print "{func} call timed out after {lim}s.".format(
... func=f.__name__, lim=limit)
...
>>>@timeout(limit=5, handler=timeout_handler)
... def work(foo, bar, baz="spam")
... time.sleep(10)
>>>work("foo", "bar", "baz")
# time passes...
work call timed out after 5s.
>>>
"""
def wrapper(f):
def wrapped_f(*args, **kwargs):
old_handler = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(limit)
try:
res = f(*args, **kwargs)
except Timeout:
handler(limit, f, args, kwargs)
else:
return res
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return wrapped_f
return wrapper
|
A decorator ensuring that the decorated function tun time does not
exceeds the argument limit.
:args limit: the time limit
:type limit: int
:args handler: the handler function called when the decorated
function times out.
:type handler: callable
Example:
>>>def timeout_handler(limit, f, *args, **kwargs):
... print "{func} call timed out after {lim}s.".format(
... func=f.__name__, lim=limit)
...
>>>@timeout(limit=5, handler=timeout_handler)
... def work(foo, bar, baz="spam")
... time.sleep(10)
>>>work("foo", "bar", "baz")
# time passes...
work call timed out after 5s.
>>>
|
entailment
|
def _process_backend_kwargs(self, kwargs):
""" Simple utility to retrieve kwargs in predetermined order.
Also checks whether the values of the backend arguments do not
violate the backend capabilities.
"""
# Verify given argument with capability of the backend
app = self._vispy_canvas.app
capability = app.backend_module.capability
if kwargs['context'].shared.name: # name already assigned: shared
if not capability['context']:
raise RuntimeError('Cannot share context with this backend')
for key in [key for (key, val) in capability.items() if not val]:
if key in ['context', 'multi_window', 'scroll']:
continue
invert = key in ['resizable', 'decorate']
if bool(kwargs[key]) - invert:
raise RuntimeError('Config %s is not supported by backend %s'
% (key, app.backend_name))
# Return items in sequence
out = SimpleBunch()
keys = ['title', 'size', 'position', 'show', 'vsync', 'resizable',
'decorate', 'fullscreen', 'parent', 'context', 'always_on_top',
]
for key in keys:
out[key] = kwargs[key]
return out
|
Simple utility to retrieve kwargs in predetermined order.
Also checks whether the values of the backend arguments do not
violate the backend capabilities.
|
entailment
|
def _set_range(self, init):
""" Reset the view.
"""
#PerspectiveCamera._set_range(self, init)
# Stop moving
self._speed *= 0.0
# Get window size (and store factor now to sync with resizing)
w, h = self._viewbox.size
w, h = float(w), float(h)
# Get range and translation for x and y
x1, y1, z1 = self._xlim[0], self._ylim[0], self._zlim[0]
x2, y2, z2 = self._xlim[1], self._ylim[1], self._zlim[1]
rx, ry, rz = (x2 - x1), (y2 - y1), (z2 - z1)
# Correct ranges for window size. Note that the window width
# influences the x and y data range, while the height influences
# the z data range.
if w / h > 1:
rx /= w / h
ry /= w / h
else:
rz /= h / w
# Do not convert to screen coordinates. This camera does not need
# to fit everything on screen, but we need to estimate the scale
# of the data in the scene.
# Set scale, depending on data range. Initial speed is such that
# the scene can be traversed in about three seconds.
self._scale_factor = max(rx, ry, rz) / 3.0
# Set initial position to a corner of the scene
margin = np.mean([rx, ry, rz]) * 0.1
self._center = x1 - margin, y1 - margin, z1 + margin
# Determine initial view direction based on flip axis
yaw = 45 * self._flip_factors[0]
pitch = -90 - 20 * self._flip_factors[2]
if self._flip_factors[1] < 0:
yaw += 90 * np.sign(self._flip_factors[0])
# Set orientation
q1 = Quaternion.create_from_axis_angle(pitch*math.pi/180, 1, 0, 0)
q2 = Quaternion.create_from_axis_angle(0*math.pi/180, 0, 1, 0)
q3 = Quaternion.create_from_axis_angle(yaw*math.pi/180, 0, 0, 1)
#
self._rotation1 = (q1 * q2 * q3).normalize()
self._rotation2 = Quaternion()
# Update
self.view_changed()
|
Reset the view.
|
entailment
|
def on_timer(self, event):
"""Timer event handler
Parameters
----------
event : instance of Event
The event.
"""
# Set relative speed and acceleration
rel_speed = event.dt
rel_acc = 0.1
# Get what's forward
pf, pr, pl, pu = self._get_directions()
# Increase speed through acceleration
# Note that self._speed is relative. We can balance rel_acc and
# rel_speed to get a nice smooth or direct control
self._speed += self._acc * rel_acc
# Reduce speed. Simulate resistance. Using brakes slows down faster.
# Note that the way that we reduce speed, allows for higher
# speeds if keys ar bound to higher acc values (i.e. turbo)
reduce = np.array([0.05, 0.05, 0.05, 0.1, 0.1, 0.1])
reduce[self._brake > 0] = 0.2
self._speed -= self._speed * reduce
if np.abs(self._speed).max() < 0.05:
self._speed *= 0.0
# --- Determine new position from translation speed
if self._speed[:3].any():
# Create speed vectors, use scale_factor as a reference
dv = np.array([1.0/d for d in self._flip_factors])
#
vf = pf * dv * rel_speed * self._scale_factor
vr = pr * dv * rel_speed * self._scale_factor
vu = pu * dv * rel_speed * self._scale_factor
direction = vf, vr, vu
# Set position
center_loc = np.array(self._center, dtype='float32')
center_loc += (self._speed[0] * direction[0] +
self._speed[1] * direction[1] +
self._speed[2] * direction[2])
self._center = tuple(center_loc)
# --- Determine new orientation from rotation speed
roll_angle = 0
# Calculate manual roll (from speed)
if self._speed[3:].any():
angleGain = np.array([1.0, 1.5, 1.0]) * 3 * math.pi / 180
angles = self._speed[3:] * angleGain
q1 = Quaternion.create_from_axis_angle(angles[0], -1, 0, 0)
q2 = Quaternion.create_from_axis_angle(angles[1], 0, 1, 0)
q3 = Quaternion.create_from_axis_angle(angles[2], 0, 0, -1)
q = q1 * q2 * q3
self._rotation1 = (q * self._rotation1).normalize()
# Calculate auto-roll
if self.auto_roll:
up = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}[self.up[1]]
up = np.array(up) * {'+': +1, '-': -1}[self.up[0]]
def angle(p1, p2):
return np.arccos(p1.dot(p2))
#au = angle(pu, (0, 0, 1))
ar = angle(pr, up)
al = angle(pl, up)
af = angle(pf, up)
# Roll angle that's off from being leveled (in unit strength)
roll_angle = math.sin(0.5*(al - ar))
# Correct for pitch
roll_angle *= abs(math.sin(af)) # abs(math.sin(au))
if abs(roll_angle) < 0.05:
roll_angle = 0
if roll_angle:
# Correct to soften the force at 90 degree angle
roll_angle = np.sign(roll_angle) * np.abs(roll_angle)**0.5
# Get correction for this iteration and apply
angle_correction = 1.0 * roll_angle * math.pi / 180
q = Quaternion.create_from_axis_angle(angle_correction,
0, 0, 1)
self._rotation1 = (q * self._rotation1).normalize()
# Update
if self._speed.any() or roll_angle or self._update_from_mouse:
self._update_from_mouse = False
self.view_changed()
|
Timer event handler
Parameters
----------
event : instance of Event
The event.
|
entailment
|
def viewbox_key_event(self, event):
"""ViewBox key event handler
Parameters
----------
event : instance of Event
The event.
"""
PerspectiveCamera.viewbox_key_event(self, event)
if event.handled or not self.interactive:
return
# Ensure the timer runs
if not self._timer.running:
self._timer.start()
if event.key in self._keymap:
val_dims = self._keymap[event.key]
val = val_dims[0]
# Brake or accelarate?
if val == 0:
vec = self._brake
val = 1
else:
vec = self._acc
# Set
if event.type == 'key_release':
val = 0
for dim in val_dims[1:]:
factor = 1.0
vec[dim-1] = val * factor
|
ViewBox key event handler
Parameters
----------
event : instance of Event
The event.
|
entailment
|
def viewbox_mouse_event(self, event):
"""ViewBox mouse event handler
Parameters
----------
event : instance of Event
The event.
"""
PerspectiveCamera.viewbox_mouse_event(self, event)
if event.handled or not self.interactive:
return
if event.type == 'mouse_wheel':
if not event.mouse_event.modifiers:
# Move forward / backward
self._speed[0] += 0.5 * event.delta[1]
elif keys.SHIFT in event.mouse_event.modifiers:
# Speed
s = 1.1 ** - event.delta[1]
self.scale_factor /= s # divide instead of multiply
print('scale factor: %1.1f units/s' % self.scale_factor)
return
if event.type == 'mouse_press':
event.handled = True
if event.type == 'mouse_release':
# Reset
self._event_value = None
# Apply rotation
self._rotation1 = (self._rotation2 * self._rotation1).normalize()
self._rotation2 = Quaternion()
elif not self._timer.running:
# Ensure the timer runs
self._timer.start()
if event.type == 'mouse_move':
if event.press_event is None:
return
if not event.buttons:
return
# Prepare
modifiers = event.mouse_event.modifiers
pos1 = event.mouse_event.press_event.pos
pos2 = event.mouse_event.pos
w, h = self._viewbox.size
if 1 in event.buttons and not modifiers:
# rotate
# get normalized delta values
d_az = -float(pos2[0] - pos1[0]) / w
d_el = +float(pos2[1] - pos1[1]) / h
# Apply gain
d_az *= - 0.5 * math.pi # * self._speed_rot
d_el *= + 0.5 * math.pi # * self._speed_rot
# Create temporary quaternions
q_az = Quaternion.create_from_axis_angle(d_az, 0, 1, 0)
q_el = Quaternion.create_from_axis_angle(d_el, 1, 0, 0)
# Apply to global quaternion
self._rotation2 = (q_el.normalize() * q_az).normalize()
elif 2 in event.buttons and keys.CONTROL in modifiers:
# zoom --> fov
if self._event_value is None:
self._event_value = self._fov
p1 = np.array(event.press_event.pos)[:2]
p2 = np.array(event.pos)[:2]
p1c = event.map_to_canvas(p1)[:2]
p2c = event.map_to_canvas(p2)[:2]
d = p2c - p1c
fov = self._event_value * math.exp(-0.01*d[1])
self._fov = min(90.0, max(10, fov))
# Make transform be updated on the next timer tick.
# By doing it at timer tick, we avoid shaky behavior
self._update_from_mouse = True
|
ViewBox mouse event handler
Parameters
----------
event : instance of Event
The event.
|
entailment
|
def _stdin_ready_posix():
"""Return True if there's something to read on stdin (posix version)."""
infds, outfds, erfds = select.select([sys.stdin],[],[],0)
return bool(infds)
|
Return True if there's something to read on stdin (posix version).
|
entailment
|
def set_inputhook(self, callback):
"""Set PyOS_InputHook to callback and return the previous one."""
# On platforms with 'readline' support, it's all too likely to
# have a KeyboardInterrupt signal delivered *even before* an
# initial ``try:`` clause in the callback can be executed, so
# we need to disable CTRL+C in this situation.
ignore_CTRL_C()
self._callback = callback
self._callback_pyfunctype = self.PYFUNC(callback)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = \
ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
self._installed = True
return original
|
Set PyOS_InputHook to callback and return the previous one.
|
entailment
|
def clear_inputhook(self, app=None):
"""Set PyOS_InputHook to NULL and return the previous one.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = ctypes.c_void_p(None).value
allow_CTRL_C()
self._reset()
return original
|
Set PyOS_InputHook to NULL and return the previous one.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`.
|
entailment
|
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self.apps = {}
elif gui in self.apps:
del self.apps[gui]
|
Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
|
entailment
|
def register(self, toolkitname, *aliases):
"""Register a class to provide the event loop for a given GUI.
This is intended to be used as a class decorator. It should be passed
the names with which to register this GUI integration. The classes
themselves should subclass :class:`InputHookBase`.
::
@inputhook_manager.register('qt')
class QtInputHook(InputHookBase):
def enable(self, app=None):
...
"""
def decorator(cls):
inst = cls(self)
self.guihooks[toolkitname] = inst
for a in aliases:
self.aliases[a] = toolkitname
return cls
return decorator
|
Register a class to provide the event loop for a given GUI.
This is intended to be used as a class decorator. It should be passed
the names with which to register this GUI integration. The classes
themselves should subclass :class:`InputHookBase`.
::
@inputhook_manager.register('qt')
class QtInputHook(InputHookBase):
def enable(self, app=None):
...
|
entailment
|
def enable_gui(self, gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is a higher level method than :meth:`set_inputhook` - it uses the
GUI name to look up a registered object which enables the input hook
for that GUI.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if gui in (None, GUI_NONE):
return self.disable_gui()
if gui in self.aliases:
return self.enable_gui(self.aliases[gui], app)
try:
gui_hook = self.guihooks[gui]
except KeyError:
e = "Invalid GUI request {!r}, valid ones are: {}"
raise ValueError(e.format(gui, ', '.join(self.guihooks)))
self._current_gui = gui
app = gui_hook.enable(app)
if app is not None:
app._in_event_loop = True
self.apps[gui] = app
return app
|
Switch amongst GUI input hooks by name.
This is a higher level method than :meth:`set_inputhook` - it uses the
GUI name to look up a registered object which enables the input hook
for that GUI.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.