_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q22900
|
unbool
|
train
|
def unbool(element, true=object(), false=object()):
"""
A hack to make True and 1 and False and 0 unique for ``uniq``.
"""
if element is True:
return true
elif element is False:
return false
return element
|
python
|
{
"resource": ""
}
|
q22901
|
uniq
|
train
|
def uniq(container):
"""
Check if all of a container's elements are unique.
Successively tries first to rely that the elements are hashable, then
falls back on them being sortable, and finally falls back on brute
force.
"""
try:
return len(set(unbool(i) for i in container)) == len(container)
except TypeError:
try:
sort = sorted(unbool(i) for i in container)
sliced = itertools.islice(sort, 1, None)
for i, j in zip(sort, sliced):
if i == j:
return False
except (NotImplementedError, TypeError):
seen = []
for e in container:
e = unbool(e)
if e in seen:
return False
seen.append(e)
return True
|
python
|
{
"resource": ""
}
|
q22902
|
FormatChecker.checks
|
train
|
def checks(self, format, raises=()):
"""
Register a decorated function as validating a new format.
Arguments:
format (str):
The format that the decorated function will check.
raises (Exception):
The exception(s) raised by the decorated function when an
invalid instance is found.
The exception object will be accessible as the
`jsonschema.exceptions.ValidationError.cause` attribute of the
resulting validation error.
"""
def _checks(func):
self.checkers[format] = (func, raises)
return func
return _checks
|
python
|
{
"resource": ""
}
|
q22903
|
_generate_legacy_type_checks
|
train
|
def _generate_legacy_type_checks(types=()):
"""
Generate newer-style type checks out of JSON-type-name-to-type mappings.
Arguments:
types (dict):
A mapping of type names to their Python types
Returns:
A dictionary of definitions to pass to `TypeChecker`
"""
types = dict(types)
def gen_type_check(pytypes):
pytypes = _utils.flatten(pytypes)
def type_check(checker, instance):
if isinstance(instance, bool):
if bool not in pytypes:
return False
return isinstance(instance, pytypes)
return type_check
definitions = {}
for typename, pytypes in iteritems(types):
definitions[typename] = gen_type_check(pytypes)
return definitions
|
python
|
{
"resource": ""
}
|
q22904
|
extend
|
train
|
def extend(validator, validators=(), version=None, type_checker=None):
"""
Create a new validator class by extending an existing one.
Arguments:
validator (jsonschema.IValidator):
an existing validator class
validators (collections.Mapping):
a mapping of new validator callables to extend with, whose
structure is as in `create`.
.. note::
Any validator callables with the same name as an existing one
will (silently) replace the old validator callable entirely,
effectively overriding any validation done in the "parent"
validator class.
If you wish to instead extend the behavior of a parent's
validator callable, delegate and call it directly in the new
validator function by retrieving it using
``OldValidator.VALIDATORS["validator_name"]``.
version (str):
a version for the new validator class
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, the type checker of the extended
`jsonschema.IValidator` will be carried along.`
Returns:
a new `jsonschema.IValidator` class extending the one provided
.. note:: Meta Schemas
The new validator class will have its parent's meta schema.
If you wish to change or extend the meta schema in the new
validator class, modify ``META_SCHEMA`` directly on the returned
class. Note that no implicit copying is done, so a copy should
likely be made before modifying it, in order to not affect the
old validator.
"""
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
if type_checker is None:
type_checker = validator.TYPE_CHECKER
elif validator._CREATED_WITH_DEFAULT_TYPES:
raise TypeError(
"Cannot extend a validator created with default_types "
"with a type_checker. Update the validator to use a "
"type_checker when created."
)
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
type_checker=type_checker,
id_of=validator.ID_OF,
)
|
python
|
{
"resource": ""
}
|
q22905
|
validator_for
|
train
|
def validator_for(schema, default=_LATEST_VERSION):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class cannot be
determined.
If unprovided, the default is to return
the latest supported draft.
"""
if schema is True or schema is False or u"$schema" not in schema:
return default
if schema[u"$schema"] not in meta_schemas:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION)
|
python
|
{
"resource": ""
}
|
q22906
|
RefResolver.resolving
|
train
|
def resolving(self, ref):
"""
Resolve the given ``ref`` and enter its resolution scope.
Exits the scope on exit of this context manager.
Arguments:
ref (str):
The reference to resolve
"""
url, resolved = self.resolve(ref)
self.push_scope(url)
try:
yield resolved
finally:
self.pop_scope()
|
python
|
{
"resource": ""
}
|
q22907
|
ErrorTree.total_errors
|
train
|
def total_errors(self):
"""
The total number of errors in the entire tree, including children.
"""
child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
return len(self.errors) + child_errors
|
python
|
{
"resource": ""
}
|
q22908
|
setup
|
train
|
def setup(app):
"""
Install the plugin.
Arguments:
app (sphinx.application.Sphinx):
the Sphinx application context
"""
app.add_config_value("cache_path", "_cache", "")
try:
os.makedirs(app.config.cache_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
path = os.path.join(app.config.cache_path, "spec.html")
spec = fetch_or_load(path)
app.add_role("validator", docutils_sucks(spec))
|
python
|
{
"resource": ""
}
|
q22909
|
fetch_or_load
|
train
|
def fetch_or_load(spec_path):
"""
Fetch a new specification or use the cache if it's current.
Arguments:
cache_path:
the path to a cached specification
"""
headers = {}
try:
modified = datetime.utcfromtimestamp(os.path.getmtime(spec_path))
date = modified.strftime("%a, %d %b %Y %I:%M:%S UTC")
headers["If-Modified-Since"] = date
except OSError as error:
if error.errno != errno.ENOENT:
raise
request = urllib.Request(VALIDATION_SPEC, headers=headers)
response = urllib.urlopen(request, cafile=certifi.where())
if response.code == 200:
with open(spec_path, "w+b") as spec:
spec.writelines(response)
spec.seek(0)
return html.parse(spec)
with open(spec_path) as spec:
return html.parse(spec)
|
python
|
{
"resource": ""
}
|
q22910
|
namedAny
|
train
|
def namedAny(name):
"""
Retrieve a Python object by its fully qualified name from the global Python
module namespace. The first part of the name, that describes a module,
will be discovered and imported. Each subsequent part of the name is
treated as the name of an attribute of the object specified by all of the
name which came before it. For example, the fully-qualified name of this
object is 'twisted.python.reflect.namedAny'.
@type name: L{str}
@param name: The name of the object to return.
@raise InvalidName: If the name is an empty string, starts or ends with
a '.', or is otherwise syntactically incorrect.
@raise ModuleNotFound: If the name is syntactically correct but the
module it specifies cannot be imported because it does not appear to
exist.
@raise ObjectNotFound: If the name is syntactically correct, includes at
least one '.', but the module it specifies cannot be imported because
it does not appear to exist.
@raise AttributeError: If an attribute of an object along the way cannot be
accessed, or a module along the way is not found.
@return: the Python object identified by 'name'.
"""
if not name:
raise InvalidName('Empty module name')
names = name.split('.')
# if the name starts or ends with a '.' or contains '..', the __import__
# will raise an 'Empty module name' error. This will provide a better error
# message.
if '' in names:
raise InvalidName(
"name must be a string giving a '.'-separated list of Python "
"identifiers, not %r" % (name,))
topLevelPackage = None
moduleNames = names[:]
while not topLevelPackage:
if moduleNames:
trialname = '.'.join(moduleNames)
try:
topLevelPackage = _importAndCheckStack(trialname)
except _NoModuleFound:
moduleNames.pop()
else:
if len(names) == 1:
raise ModuleNotFound("No module named %r" % (name,))
else:
raise ObjectNotFound('%r does not name an object' % (name,))
obj = topLevelPackage
for n in names[1:]:
obj = getattr(obj, n)
return obj
|
python
|
{
"resource": ""
}
|
q22911
|
face_adjacency_unshared
|
train
|
def face_adjacency_unshared(mesh):
"""
Return the vertex index of the two vertices not in the shared
edge between two adjacent faces
Parameters
----------
mesh : Trimesh object
Returns
-----------
vid_unshared : (len(mesh.face_adjacency), 2) int
Indexes of mesh.vertices
"""
# the non- shared vertex index is the same shape as face_adjacnecy
# just holding vertex indices rather than face indices
vid_unshared = np.zeros_like(mesh.face_adjacency,
dtype=np.int64)
# loop through both columns of face adjacency
for i, adjacency in enumerate(mesh.face_adjacency.T):
# faces from the current column of face adjacency
faces = mesh.faces[adjacency]
shared = np.logical_or(
faces == mesh.face_adjacency_edges[:, 0].reshape((-1, 1)),
faces == mesh.face_adjacency_edges[:, 1].reshape((-1, 1)))
vid_unshared[:, i] = faces[np.logical_not(shared)]
return vid_unshared
|
python
|
{
"resource": ""
}
|
q22912
|
face_adjacency_radius
|
train
|
def face_adjacency_radius(mesh):
"""
Compute an approximate radius between adjacent faces.
Parameters
--------------
mesh : trimesh.Trimesh
Returns
-------------
radii : (len(self.face_adjacency),) float
Approximate radius between faces
Parallel faces will have a value of np.inf
span : (len(self.face_adjacency),) float
Perpendicular projection distance of two
unshared vertices onto the shared edge
"""
# solve for the radius of the adjacent faces
# distance
# R = ------------------
# 2 * sin(theta / 2)
nonzero = mesh.face_adjacency_angles > np.radians(.01)
denominator = np.abs(
2.0 * np.sin(mesh.face_adjacency_angles[nonzero] / 1.0))
# consider the distance between the non- shared vertices of the
# face adjacency pair as the key distance
point_pairs = mesh.vertices[mesh.face_adjacency_unshared]
vectors = np.diff(point_pairs,
axis=1).reshape((-1, 3))
# the vertex indices of the shared edge for the adjacency pairx
edges = mesh.face_adjacency_edges
# unit vector along shared the edge
edges_vec = util.unitize(np.diff(mesh.vertices[edges],
axis=1).reshape((-1, 3)))
# the vector of the perpendicular projection to the shared edge
perp = np.subtract(
vectors, (util.diagonal_dot(
vectors, edges_vec).reshape(
(-1, 1)) * edges_vec))
# the length of the perpendicular projection
span = np.linalg.norm(perp, axis=1)
# complete the values for non- infinite radii
radii = np.ones(len(mesh.face_adjacency)) * np.inf
radii[nonzero] = span[nonzero] / denominator
return radii, span
|
python
|
{
"resource": ""
}
|
q22913
|
vertex_adjacency_graph
|
train
|
def vertex_adjacency_graph(mesh):
"""
Returns a networkx graph representing the vertices and
their connections in the mesh.
Parameters
----------
mesh : Trimesh object
Returns
---------
graph : networkx.Graph
Graph representing vertices and edges between
them where vertices are nodes and edges are edges
Examples
----------
This is useful for getting nearby vertices for a given vertex,
potentially for some simple smoothing techniques.
>>> graph = mesh.vertex_adjacency_graph
>>> graph.neighbors(0)
> [1,3,4]
"""
g = nx.Graph()
g.add_edges_from(mesh.edges_unique)
return g
|
python
|
{
"resource": ""
}
|
q22914
|
shared_edges
|
train
|
def shared_edges(faces_a, faces_b):
"""
Given two sets of faces, find the edges which are in both sets.
Parameters
---------
faces_a: (n,3) int, set of faces
faces_b: (m,3) int, set of faces
Returns
---------
shared: (p, 2) int, set of edges
"""
e_a = np.sort(faces_to_edges(faces_a), axis=1)
e_b = np.sort(faces_to_edges(faces_b), axis=1)
shared = grouping.boolean_rows(e_a, e_b, operation=np.intersect1d)
return shared
|
python
|
{
"resource": ""
}
|
q22915
|
connected_edges
|
train
|
def connected_edges(G, nodes):
"""
Given graph G and list of nodes, return the list of edges that
are connected to nodes
"""
nodes_in_G = collections.deque()
for node in nodes:
if not G.has_node(node):
continue
nodes_in_G.extend(nx.node_connected_component(G, node))
edges = G.subgraph(nodes_in_G).edges()
return edges
|
python
|
{
"resource": ""
}
|
q22916
|
facets
|
train
|
def facets(mesh, engine=None):
"""
Find the list of parallel adjacent faces.
Parameters
---------
mesh : trimesh.Trimesh
engine : str
Which graph engine to use:
('scipy', 'networkx', 'graphtool')
Returns
---------
facets : sequence of (n,) int
Groups of face indexes of
parallel adjacent faces.
"""
# what is the radius of a circle that passes through the perpendicular
# projection of the vector between the two non- shared vertices
# onto the shared edge, with the face normal from the two adjacent faces
radii = mesh.face_adjacency_radius
# what is the span perpendicular to the shared edge
span = mesh.face_adjacency_span
# a very arbitrary formula for declaring two adjacent faces
# parallel in a way that is hopefully (and anecdotally) robust
# to numeric error
# a common failure mode is two faces that are very narrow with a slight
# angle between them, so here we divide by the perpendicular span
# to penalize very narrow faces, and then square it just for fun
parallel = np.ones(len(radii), dtype=np.bool)
# if span is zero we know faces are small/parallel
nonzero = np.abs(span) > tol.zero
# faces with a radii/span ratio larger than a threshold pass
parallel[nonzero] = (radii[nonzero] /
span[nonzero]) ** 2 > tol.facet_threshold
# run connected components on the parallel faces to group them
components = connected_components(mesh.face_adjacency[parallel],
nodes=np.arange(len(mesh.faces)),
min_len=2,
engine=engine)
return components
|
python
|
{
"resource": ""
}
|
q22917
|
split
|
train
|
def split(mesh,
only_watertight=True,
adjacency=None,
engine=None):
"""
Split a mesh into multiple meshes from face connectivity.
If only_watertight is true, it will only return watertight meshes
and will attempt single triangle/quad repairs.
Parameters
----------
mesh: Trimesh
only_watertight: if True, only return watertight components
adjacency: (n,2) list of face adjacency to override using the plain
adjacency calculated automatically.
engine: str, which engine to use. ('networkx', 'scipy', or 'graphtool')
Returns
----------
meshes: list of Trimesh objects
"""
if adjacency is None:
adjacency = mesh.face_adjacency
# if only watertight the shortest thing we can split has 3 triangles
if only_watertight:
min_len = 3
else:
min_len = 1
components = connected_components(edges=adjacency,
nodes=np.arange(len(mesh.faces)),
min_len=min_len,
engine=engine)
meshes = mesh.submesh(components,
only_watertight=only_watertight)
return meshes
|
python
|
{
"resource": ""
}
|
q22918
|
connected_component_labels
|
train
|
def connected_component_labels(edges, node_count=None):
"""
Label graph nodes from an edge list, using scipy.sparse.csgraph
Parameters
----------
edges : (n, 2) int
Edges of a graph
node_count : int, or None
The largest node in the graph.
Returns
---------
labels : (node_count,) int
Component labels for each node
"""
matrix = edges_to_coo(edges, node_count)
body_count, labels = csgraph.connected_components(
matrix, directed=False)
assert len(labels) == node_count
return labels
|
python
|
{
"resource": ""
}
|
q22919
|
split_traversal
|
train
|
def split_traversal(traversal,
edges,
edges_hash=None):
"""
Given a traversal as a list of nodes, split the traversal
if a sequential index pair is not in the given edges.
Parameters
--------------
edges : (n, 2) int
Graph edge indexes
traversal : (m,) int
Traversal through edges
edge_hash : (n,)
Edges sorted on axis=1 and
passed to grouping.hashable_rows
Returns
---------------
split : sequence of (p,) int
"""
traversal = np.asanyarray(traversal,
dtype=np.int64)
# hash edge rows for contains checks
if edges_hash is None:
edges_hash = grouping.hashable_rows(
np.sort(edges, axis=1))
# turn the (n,) traversal into (n-1,2) edges
trav_edge = np.column_stack((traversal[:-1],
traversal[1:]))
# hash each edge so we can compare to edge set
trav_hash = grouping.hashable_rows(
np.sort(trav_edge, axis=1))
# check if each edge is contained in edge set
contained = np.in1d(trav_hash, edges_hash)
# exit early if every edge of traversal exists
if contained.all():
# just reshape one traversal
split = [traversal]
else:
# find contiguous groups of contained edges
blocks = grouping.blocks(contained,
min_len=1,
only_nonzero=True)
# turn edges back in to sequence of traversals
split = [np.append(trav_edge[b][:, 0],
trav_edge[b[-1]][1])
for b in blocks]
# close traversals if necessary
for i, t in enumerate(split):
# make sure elements of sequence are numpy arrays
split[i] = np.asanyarray(split[i], dtype=np.int64)
# don't close if its a single edge
if len(t) <= 2:
continue
# make sure it's not already closed
edge = np.sort([t[0], t[-1]])
if edge.ptp() == 0:
continue
close = grouping.hashable_rows(edge.reshape((1, 2)))[0]
# if we need the edge add it
if close in edges_hash:
split[i] = np.append(t, t[0]).astype(np.int64)
result = np.array(split)
return result
|
python
|
{
"resource": ""
}
|
q22920
|
fill_traversals
|
train
|
def fill_traversals(traversals, edges, edges_hash=None):
"""
Convert a traversal of a list of edges into a sequence of
traversals where every pair of consecutive node indexes
is an edge in a passed edge list
Parameters
-------------
traversals : sequence of (m,) int
Node indexes of traversals of a graph
edges : (n, 2) int
Pairs of connected node indexes
edges_hash : None, or (n,) int
Edges sorted along axis 1 then hashed
using grouping.hashable_rows
Returns
--------------
splits : sequence of (p,) int
Node indexes of connected traversals
"""
# make sure edges are correct type
edges = np.asanyarray(edges, dtype=np.int64)
# make sure edges are sorted
edges.sort(axis=1)
# if there are no traversals just return edges
if len(traversals) == 0:
return edges.copy()
# hash edges for contains checks
if edges_hash is None:
edges_hash = grouping.hashable_rows(edges)
splits = []
for nodes in traversals:
# split traversals to remove edges
# that don't actually exist
splits.extend(split_traversal(
traversal=nodes,
edges=edges,
edges_hash=edges_hash))
# turn the split traversals back into (n,2) edges
included = util.vstack_empty([np.column_stack((i[:-1], i[1:]))
for i in splits])
if len(included) > 0:
# sort included edges in place
included.sort(axis=1)
# make sure any edges not included in split traversals
# are just added as a length 2 traversal
splits.extend(grouping.boolean_rows(
edges,
included,
operation=np.setdiff1d))
else:
# no edges were included, so our filled traversal
# is just the original edges copied over
splits = edges.copy()
return splits
|
python
|
{
"resource": ""
}
|
q22921
|
traversals
|
train
|
def traversals(edges, mode='bfs'):
"""
Given an edge list, generate a sequence of ordered
depth first search traversals, using scipy.csgraph routines.
Parameters
------------
edges : (n,2) int, undirected edges of a graph
mode : str, 'bfs', or 'dfs'
Returns
-----------
traversals: (m,) sequence of (p,) int,
ordered DFS or BFS traversals of the graph.
"""
edges = np.asanyarray(edges, dtype=np.int64)
if len(edges) == 0:
return []
elif not util.is_shape(edges, (-1, 2)):
raise ValueError('edges are not (n,2)!')
# pick the traversal method
mode = str(mode).lower().strip()
if mode == 'bfs':
func = csgraph.breadth_first_order
elif mode == 'dfs':
func = csgraph.depth_first_order
else:
raise ValueError('traversal mode must be either dfs or bfs')
# make sure edges are sorted so we can query
# an ordered pair later
edges.sort(axis=1)
# set of nodes to make sure we get every node
nodes = set(edges.reshape(-1))
# coo_matrix for csgraph routines
graph = edges_to_coo(edges)
# we're going to make a sequence of traversals
traversals = []
while len(nodes) > 0:
# starting at any node
start = nodes.pop()
# get an (n,) ordered traversal
ordered = func(graph,
i_start=start,
return_predecessors=False,
directed=False).astype(np.int64)
traversals.append(ordered)
# remove the nodes we've consumed
nodes.difference_update(ordered)
return traversals
|
python
|
{
"resource": ""
}
|
q22922
|
edges_to_coo
|
train
|
def edges_to_coo(edges, count=None, data=None):
"""
Given an edge list, return a boolean scipy.sparse.coo_matrix
representing the edges in matrix form.
Parameters
------------
edges : (n,2) int
Edges of a graph
count : int
The total number of nodes in the graph
if None: count = edges.max() + 1
data : (n,) any
Assign data to each edge, if None will
be bool True for each specified edge
Returns
------------
matrix: (count, count) scipy.sparse.coo_matrix
Sparse COO
"""
edges = np.asanyarray(edges, dtype=np.int64)
if not (len(edges) == 0 or
util.is_shape(edges, (-1, 2))):
raise ValueError('edges must be (n,2)!')
# if count isn't specified just set it to largest
# value referenced in edges
if count is None:
count = edges.max() + 1
count = int(count)
# if no data is specified set every specified edge
# to True
if data is None:
data = np.ones(len(edges), dtype=np.bool)
matrix = coo_matrix((data, edges.T),
dtype=data.dtype,
shape=(count, count))
return matrix
|
python
|
{
"resource": ""
}
|
q22923
|
smoothed
|
train
|
def smoothed(mesh, angle):
"""
Return a non- watertight version of the mesh which will
render nicely with smooth shading by disconnecting faces
at sharp angles to each other.
Parameters
---------
mesh : trimesh.Trimesh
Source geometry
angle : float
Angle in radians, adjacent faces which have normals
below this angle will be smoothed
Returns
---------
smooth : trimesh.Trimesh
Geometry with disconnected face patches
"""
# if the mesh has no adjacent faces return a copy
if len(mesh.face_adjacency) == 0:
return mesh.copy()
# face pairs below angle threshold
angle_ok = mesh.face_adjacency_angles <= angle
# subset of face adjacency
adjacency = mesh.face_adjacency[angle_ok]
# list of connected groups of faces
components = connected_components(adjacency,
min_len=1,
nodes=np.arange(len(mesh.faces)))
# get a submesh as a single appended Trimesh
smooth = mesh.submesh(components,
only_watertight=False,
append=True)
return smooth
|
python
|
{
"resource": ""
}
|
q22924
|
graph_to_svg
|
train
|
def graph_to_svg(graph):
"""
Turn a networkx graph into an SVG string, using graphviz dot.
Parameters
----------
graph: networkx graph
Returns
---------
svg: string, pictoral layout in SVG format
"""
import tempfile
import subprocess
with tempfile.NamedTemporaryFile() as dot_file:
nx.drawing.nx_agraph.write_dot(graph, dot_file.name)
svg = subprocess.check_output(['dot', dot_file.name, '-Tsvg'])
return svg
|
python
|
{
"resource": ""
}
|
q22925
|
multigraph_paths
|
train
|
def multigraph_paths(G, source, cutoff=None):
"""
For a networkx MultiDiGraph, find all paths from a source node
to leaf nodes. This function returns edge instance numbers
in addition to nodes, unlike networkx.all_simple_paths.
Parameters
---------------
G : networkx.MultiDiGraph
Graph to evaluate
source : hashable
Node to start traversal at
cutoff : int
Number of nodes to visit
If None will visit all nodes
Returns
----------
traversals : (n,) list of [(node, edge instance index), ] paths
Traversals of the multigraph
"""
if cutoff is None:
cutoff = (len(G.edges()) * len(G.nodes())) + 1
# the path starts at the node specified
current = [(source, 0)]
# traversals we need to go back and do
queue = []
# completed paths
traversals = []
for i in range(cutoff):
# paths are stored as (node, instance) so
# get the node of the last place visited
current_node = current[-1][0]
# get all the children of the current node
child = G[current_node]
if len(child) == 0:
# we have no children, so we are at the end of this path
# save the path as a completed traversal
traversals.append(current)
# if there is nothing on the queue, we are done
if len(queue) == 0:
break
# otherwise continue traversing with the next path
# on the queue
current = queue.pop()
else:
# oh no, we have multiple edges from current -> child
start = True
# iterate through child nodes and edge instances
for node in child.keys():
for instance in child[node].keys():
if start:
# if this is the first edge, keep it on the
# current traversal and save the others for later
current.append((node, instance))
start = False
else:
# this child has multiple instances
# so we will need to traverse them multiple times
# we appended a node to current, so only take the
# first n-1 visits
queue.append(current[:-1] + [(node, instance)])
return traversals
|
python
|
{
"resource": ""
}
|
q22926
|
multigraph_collect
|
train
|
def multigraph_collect(G, traversal, attrib=None):
"""
Given a MultiDiGraph traversal, collect attributes along it.
Parameters
-------------
G: networkx.MultiDiGraph
traversal: (n) list of (node, instance) tuples
attrib: dict key, name to collect. If None, will return all
Returns
-------------
collected: (len(traversal) - 1) list of attributes
"""
collected = []
for u, v in util.pairwise(traversal):
attribs = G[u[0]][v[0]][v[1]]
if attrib is None:
collected.append(attribs)
else:
collected.append(attribs[attrib])
return collected
|
python
|
{
"resource": ""
}
|
q22927
|
kwargs_to_matrix
|
train
|
def kwargs_to_matrix(**kwargs):
"""
Turn a set of keyword arguments into a transformation matrix.
"""
matrix = np.eye(4)
if 'matrix' in kwargs:
# a matrix takes precedence over other options
matrix = kwargs['matrix']
elif 'quaternion' in kwargs:
matrix = transformations.quaternion_matrix(kwargs['quaternion'])
elif ('axis' in kwargs) and ('angle' in kwargs):
matrix = transformations.rotation_matrix(kwargs['angle'],
kwargs['axis'])
else:
raise ValueError('Couldn\'t update transform!')
if 'translation' in kwargs:
# translation can be used in conjunction with any of the methods of
# specifying transforms. In the case a matrix and translation are passed,
# we add the translations together rather than picking one.
matrix[0:3, 3] += kwargs['translation']
return matrix
|
python
|
{
"resource": ""
}
|
q22928
|
TransformForest.update
|
train
|
def update(self, frame_to, frame_from=None, **kwargs):
"""
Update a transform in the tree.
Parameters
---------
frame_from : hashable object
Usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to : hashable object
Usually a string (eg 'mesh_0')
matrix : (4,4) float
Homogenous transformation matrix
quaternion : (4,) float
Quaternion ordered [w, x, y, z]
axis : (3,) float
Axis of rotation
angle : float
Angle of rotation, in radians
translation : (3,) float
Distance to translate
geometry : hashable
Geometry object name, e.g. 'mesh_0'
"""
# save a random number for this update
self._updated = np.random.random()
# if no frame specified, use base frame
if frame_from is None:
frame_from = self.base_frame
# convert various kwargs to a single matrix
matrix = kwargs_to_matrix(**kwargs)
# create the edge attributes
attr = {'matrix': matrix, 'time': time.time()}
# pass through geometry to edge attribute
if 'geometry' in kwargs:
attr['geometry'] = kwargs['geometry']
# add the edges
changed = self.transforms.add_edge(frame_from,
frame_to,
**attr)
# set the node attribute with the geometry information
if 'geometry' in kwargs:
nx.set_node_attributes(
self.transforms,
name='geometry',
values={frame_to: kwargs['geometry']})
# if the edge update changed our structure
# dump our cache of shortest paths
if changed:
self._paths = {}
|
python
|
{
"resource": ""
}
|
q22929
|
TransformForest.md5
|
train
|
def md5(self):
"""
"Hash" of transforms
Returns
-----------
md5 : str
Approximate hash of transforms
"""
result = str(self._updated) + str(self.base_frame)
return result
|
python
|
{
"resource": ""
}
|
q22930
|
TransformForest.copy
|
train
|
def copy(self):
"""
Return a copy of the current TransformForest
Returns
------------
copied: TransformForest
"""
copied = TransformForest()
copied.base_frame = copy.deepcopy(self.base_frame)
copied.transforms = copy.deepcopy(self.transforms)
return copied
|
python
|
{
"resource": ""
}
|
q22931
|
TransformForest.to_flattened
|
train
|
def to_flattened(self, base_frame=None):
"""
Export the current transform graph as a flattened
"""
if base_frame is None:
base_frame = self.base_frame
flat = {}
for node in self.nodes:
if node == base_frame:
continue
transform, geometry = self.get(
frame_to=node, frame_from=base_frame)
flat[node] = {
'transform': transform.tolist(),
'geometry': geometry
}
return flat
|
python
|
{
"resource": ""
}
|
q22932
|
TransformForest.to_gltf
|
train
|
def to_gltf(self, scene):
"""
Export a transforms as the 'nodes' section of a GLTF dict.
Flattens tree.
Returns
--------
gltf : dict
with keys:
'nodes': list of dicts
"""
# geometry is an OrderedDict
# {geometry key : index}
mesh_index = {name: i for i, name
in enumerate(scene.geometry.keys())}
# save the output
gltf = collections.deque([])
for node in self.nodes:
# don't include edge for base frame
if node == self.base_frame:
continue
# get the transform and geometry from the graph
transform, geometry = self.get(
frame_to=node, frame_from=self.base_frame)
gltf.append({
'matrix': transform.T.reshape(-1).tolist(),
'name': node})
# assign geometry if it exists
if geometry is not None:
gltf[-1]['mesh'] = mesh_index[geometry]
if node == scene.camera.name:
gltf[-1]['camera'] = 0
# we have flattened tree, so all nodes will be child of world
gltf.appendleft({
'name': self.base_frame,
'children': list(range(1, 1 + len(gltf)))
})
result = {'nodes': list(gltf)}
return result
|
python
|
{
"resource": ""
}
|
q22933
|
TransformForest.from_edgelist
|
train
|
def from_edgelist(self, edges, strict=True):
"""
Load transform data from an edge list into the current
scene graph.
Parameters
-------------
edgelist : (n,) tuples
(node_a, node_b, {key: value})
strict : bool
If true, raise a ValueError when a
malformed edge is passed in a tuple.
"""
# loop through each edge
for edge in edges:
# edge contains attributes
if len(edge) == 3:
self.update(edge[1], edge[0], **edge[2])
# edge just contains nodes
elif len(edge) == 2:
self.update(edge[1], edge[0])
# edge is broken
elif strict:
raise ValueError('edge incorrect shape: {}'.format(str(edge)))
|
python
|
{
"resource": ""
}
|
q22934
|
TransformForest.nodes
|
train
|
def nodes(self):
"""
A list of every node in the graph.
Returns
-------------
nodes: (n,) array, of node names
"""
nodes = np.array(list(self.transforms.nodes()))
return nodes
|
python
|
{
"resource": ""
}
|
q22935
|
TransformForest.nodes_geometry
|
train
|
def nodes_geometry(self):
"""
The nodes in the scene graph with geometry attached.
Returns
------------
nodes_geometry: (m,) array, of node names
"""
nodes = np.array([
n for n in self.transforms.nodes()
if 'geometry' in self.transforms.node[n]
])
return nodes
|
python
|
{
"resource": ""
}
|
q22936
|
TransformForest.get
|
train
|
def get(self, frame_to, frame_from=None):
"""
Get the transform from one frame to another, assuming they are connected
in the transform tree.
If the frames are not connected a NetworkXNoPath error will be raised.
Parameters
---------
frame_from: hashable object, usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to: hashable object, usually a string (eg 'mesh_0')
Returns
---------
transform: (4,4) homogenous transformation matrix
"""
if frame_from is None:
frame_from = self.base_frame
cache_key = str(frame_from) + ':' + str(frame_to)
cached = self._cache[cache_key]
if cached is not None:
return cached
transform = np.eye(4)
path = self._get_path(frame_from, frame_to)
for i in range(len(path) - 1):
data, direction = self.transforms.get_edge_data_direction(
path[i], path[i + 1])
matrix = data['matrix']
if direction < 0:
matrix = np.linalg.inv(matrix)
transform = np.dot(transform, matrix)
geometry = None
if 'geometry' in self.transforms.node[frame_to]:
geometry = self.transforms.node[frame_to]['geometry']
self._cache[cache_key] = (transform, geometry)
return transform, geometry
|
python
|
{
"resource": ""
}
|
q22937
|
TransformForest.show
|
train
|
def show(self):
"""
Plot the graph layout of the scene.
"""
import matplotlib.pyplot as plt
nx.draw(self.transforms, with_labels=True)
plt.show()
|
python
|
{
"resource": ""
}
|
q22938
|
TransformForest._get_path
|
train
|
def _get_path(self, frame_from, frame_to):
"""
Find a path between two frames, either from cached paths or
from the transform graph.
Parameters
---------
frame_from: a frame key, usually a string
eg, 'world'
frame_to: a frame key, usually a string
eg, 'mesh_0'
Returns
----------
path: (n) list of frame keys
eg, ['mesh_finger', 'mesh_hand', 'world']
"""
key = (frame_from, frame_to)
if not (key in self._paths):
path = self.transforms.shortest_path_undirected(
frame_from, frame_to)
self._paths[key] = path
return self._paths[key]
|
python
|
{
"resource": ""
}
|
q22939
|
is_ccw
|
train
|
def is_ccw(points):
"""
Check if connected planar points are counterclockwise.
Parameters
-----------
points: (n,2) float, connected points on a plane
Returns
----------
ccw: bool, True if points are counterclockwise
"""
points = np.asanyarray(points, dtype=np.float64)
if (len(points.shape) != 2 or
points.shape[1] != 2):
raise ValueError('CCW is only defined for 2D')
xd = np.diff(points[:, 0])
yd = np.column_stack((
points[:, 1],
points[:, 1])).reshape(-1)[1:-1].reshape((-1, 2)).sum(axis=1)
area = np.sum(xd * yd) * .5
ccw = area < 0
return ccw
|
python
|
{
"resource": ""
}
|
q22940
|
concatenate
|
train
|
def concatenate(paths):
"""
Concatenate multiple paths into a single path.
Parameters
-------------
paths: list of Path, Path2D, or Path3D objects
Returns
-------------
concat: Path, Path2D, or Path3D object
"""
# if only one path object just return copy
if len(paths) == 1:
return paths[0].copy()
# length of vertex arrays
vert_len = np.array([len(i.vertices) for i in paths])
# how much to offset each paths vertex indices by
offsets = np.append(0.0, np.cumsum(vert_len))[:-1].astype(np.int64)
# resulting entities
entities = []
# resulting vertices
vertices = []
# resulting metadata
metadata = {}
for path, offset in zip(paths, offsets):
# update metadata
metadata.update(path.metadata)
# copy vertices, we will stack later
vertices.append(path.vertices.copy())
# copy entity then reindex points
for entity in path.entities:
entities.append(entity.copy())
entities[-1].points += offset
# generate the single new concatenated path
# use input types so we don't have circular imports
concat = type(path)(metadata=metadata,
entities=entities,
vertices=np.vstack(vertices))
return concat
|
python
|
{
"resource": ""
}
|
q22941
|
filter_humphrey
|
train
|
def filter_humphrey(mesh,
alpha=0.1,
beta=0.5,
iterations=10,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing
and Humphrey filtering.
Articles
"Improved Laplacian Smoothing of Noisy Surface Meshes"
J. Vollmer, R. Mencl, and H. Muller
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place
alpha : float
Controls shrinkage, range is 0.0 - 1.0
If 0.0, not considered
If 1.0, no smoothing
beta : float
Controls how aggressive smoothing is
If 0.0, no smoothing
If 1.0, full aggressiveness
iterations : int
Number of passes to run filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# get mesh vertices as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
# save original unmodified vertices
original = vertices.copy()
# run through iterations of filter
for _index in range(iterations):
vert_q = vertices.copy()
vertices = laplacian_operator.dot(vertices)
vert_b = vertices - (alpha * original + (1.0 - alpha) * vert_q)
vertices -= (beta * vert_b + (1.0 - beta) *
laplacian_operator.dot(vert_b))
# assign modified vertices back to mesh
mesh.vertices = vertices
return mesh
|
python
|
{
"resource": ""
}
|
q22942
|
filter_taubin
|
train
|
def filter_taubin(mesh,
lamb=0.5,
nu=0.5,
iterations=10,
laplacian_operator=None):
"""
Smooth a mesh in-place using laplacian smoothing
and taubin filtering.
Articles
"Improved Laplacian Smoothing of Noisy Surface Meshes"
J. Vollmer, R. Mencl, and H. Muller
Parameters
------------
mesh : trimesh.Trimesh
Mesh to be smoothed in place.
lamb : float
Controls shrinkage, range is 0.0 - 1.0
nu : float
Controls dilation, range is 0.0 - 1.0
Nu shall be between 0.0 < 1.0/lambda - 1.0/nu < 0.1
iterations : int
Number of passes to run the filter
laplacian_operator : None or scipy.sparse.coo.coo_matrix
Sparse matrix laplacian operator
Will be autogenerated if None
"""
# if the laplacian operator was not passed create it here
if laplacian_operator is None:
laplacian_operator = laplacian_calculation(mesh)
# get mesh vertices as vanilla numpy array
vertices = mesh.vertices.copy().view(np.ndarray)
# run through multiple passes of the filter
for index in range(iterations):
# do a sparse dot product on the vertices
dot = laplacian_operator.dot(vertices) - vertices
# alternate shrinkage and dilation
if index % 2 == 0:
vertices += lamb * dot
else:
vertices -= nu * dot
# assign updated vertices back to mesh
mesh.vertices = vertices
return mesh
|
python
|
{
"resource": ""
}
|
q22943
|
laplacian_calculation
|
train
|
def laplacian_calculation(mesh, equal_weight=True):
"""
Calculate a sparse matrix for laplacian operations.
Parameters
-------------
mesh : trimesh.Trimesh
Input geometry
equal_weight : bool
If True, all neighbors will be considered equally
If False, all neightbors will be weighted by inverse distance
Returns
----------
laplacian : scipy.sparse.coo.coo_matrix
Laplacian operator
"""
# get the vertex neighbors from the cache
neighbors = mesh.vertex_neighbors
# avoid hitting crc checks in loops
vertices = mesh.vertices.view(np.ndarray)
# stack neighbors to 1D arrays
col = np.concatenate(neighbors)
row = np.concatenate([[i] * len(n)
for i, n in enumerate(neighbors)])
if equal_weight:
# equal weights for each neighbor
data = np.concatenate([[1.0 / len(n)] * len(n)
for n in neighbors])
else:
# umbrella weights, distance-weighted
# use dot product of ones to replace array.sum(axis=1)
ones = np.ones(3)
# the distance from verticesex to neighbors
norms = [1.0 / np.sqrt(np.dot((vertices[i] - vertices[n]) ** 2, ones))
for i, n in enumerate(neighbors)]
# normalize group and stack into single array
data = np.concatenate([i / i.sum() for i in norms])
# create the sparse matrix
matrix = coo_matrix((data, (row, col)),
shape=[len(vertices)] * 2)
return matrix
|
python
|
{
"resource": ""
}
|
q22944
|
is_circle
|
train
|
def is_circle(points, scale, verbose=False):
"""
Given a set of points, quickly determine if they represent
a circle or not.
Parameters
-------------
points: (n,2) float, points in space
scale: float, scale of overall drawing
verbose: bool, print all fit messages or not
Returns
-------------
control: (3,2) float, points in space, OR
None, if not a circle
"""
# make sure input is a numpy array
points = np.asanyarray(points)
scale = float(scale)
# can only be a circle if the first and last point are the
# same (AKA is a closed path)
if np.linalg.norm(points[0] - points[-1]) > tol.merge:
return None
box = points.ptp(axis=0)
# the bounding box size of the points
# check aspect ratio as an early exit if the path is not a circle
aspect = np.divide(*box)
if np.abs(aspect - 1.0) > tol.aspect_frac:
return None
# fit a circle with tolerance checks
CR = fit_circle_check(points, scale=scale)
if CR is None:
return None
# return the circle as three control points
control = arc.to_threepoint(**CR)
return control
|
python
|
{
"resource": ""
}
|
q22945
|
merge_colinear
|
train
|
def merge_colinear(points, scale):
"""
Given a set of points representing a path in space,
merge points which are colinear.
Parameters
----------
points: (n, d) set of points (where d is dimension)
scale: float, scale of drawing
Returns
----------
merged: (j, d) set of points with colinear and duplicate
points merged, where (j < n)
"""
points = np.asanyarray(points, dtype=np.float64)
scale = float(scale)
if len(points.shape) != 2 or points.shape[1] != 2:
raise ValueError('only for 2D points!')
# if there's less than 3 points nothing to merge
if len(points) < 3:
return points.copy()
# the vector from one point to the next
direction = points[1:] - points[:-1]
# the length of the direction vector
direction_norm = np.linalg.norm(direction, axis=1)
# make sure points don't have zero length
direction_ok = direction_norm > tol.merge
# remove duplicate points
points = np.vstack((points[0], points[1:][direction_ok]))
direction = direction[direction_ok]
direction_norm = direction_norm[direction_ok]
# create a vector between every other point, then turn it perpendicular
# if we have points A B C D
# and direction vectors A-B, B-C, etc
# these will be perpendicular to the vectors A-C, B-D, etc
perp = (points[2:] - points[:-2]).T[::-1].T
perp_norm = np.linalg.norm(perp, axis=1)
perp_nonzero = perp_norm > tol.merge
perp[perp_nonzero] /= perp_norm[perp_nonzero].reshape((-1, 1))
# find the projection of each direction vector
# onto the perpendicular vector
projection = np.abs(diagonal_dot(perp,
direction[:-1]))
projection_ratio = np.max((projection / direction_norm[1:],
projection / direction_norm[:-1]), axis=0)
mask = np.ones(len(points), dtype=np.bool)
# since we took diff, we need to offset by one
mask[1:-1][projection_ratio < 1e-4 * scale] = False
merged = points[mask]
return merged
|
python
|
{
"resource": ""
}
|
q22946
|
resample_spline
|
train
|
def resample_spline(points, smooth=.001, count=None, degree=3):
"""
Resample a path in space, smoothing along a b-spline.
Parameters
-----------
points: (n, dimension) float, points in space
smooth: float, smoothing amount
count: number of samples in output
degree: int, degree of spline polynomial
Returns
---------
resampled: (count, dimension) float, points in space
"""
from scipy.interpolate import splprep, splev
if count is None:
count = len(points)
points = np.asanyarray(points)
closed = np.linalg.norm(points[0] - points[-1]) < tol.merge
tpl = splprep(points.T, s=smooth, k=degree)[0]
i = np.linspace(0.0, 1.0, count)
resampled = np.column_stack(splev(i, tpl))
if closed:
shared = resampled[[0, -1]].mean(axis=0)
resampled[0] = shared
resampled[-1] = shared
return resampled
|
python
|
{
"resource": ""
}
|
q22947
|
points_to_spline_entity
|
train
|
def points_to_spline_entity(points, smooth=None, count=None):
"""
Create a spline entity from a curve in space
Parameters
-----------
points: (n, dimension) float, points in space
smooth: float, smoothing amount
count: int, number of samples in result
Returns
---------
entity: entities.BSpline object with points indexed at zero
control: (m, dimension) float, new vertices for entity
"""
from scipy.interpolate import splprep
if count is None:
count = len(points)
if smooth is None:
smooth = 0.002
points = np.asanyarray(points, dtype=np.float64)
closed = np.linalg.norm(points[0] - points[-1]) < tol.merge
knots, control, degree = splprep(points.T, s=smooth)[0]
control = np.transpose(control)
index = np.arange(len(control))
if closed:
control[0] = control[[0, -1]].mean(axis=0)
control = control[:-1]
index[-1] = index[0]
entity = entities.BSpline(points=index,
knots=knots,
closed=closed)
return entity, control
|
python
|
{
"resource": ""
}
|
q22948
|
simplify_basic
|
train
|
def simplify_basic(drawing, process=False, **kwargs):
"""
Merge colinear segments and fit circles.
Parameters
-----------
drawing: Path2D object, will not be modified.
Returns
-----------
simplified: Path2D with circles.
"""
if any(i.__class__.__name__ != 'Line'
for i in drawing.entities):
log.debug('Path contains non- linear entities, skipping')
return drawing
# we are going to do a bookkeeping to avoid having
# to recompute literally everything when simplification is ran
cache = copy.deepcopy(drawing._cache)
# store new values
vertices_new = collections.deque()
entities_new = collections.deque()
# avoid thrashing cache in loop
scale = drawing.scale
# loop through (n, 2) closed paths
for discrete in drawing.discrete:
# check to see if the closed entity is a circle
circle = is_circle(discrete,
scale=scale)
if circle is not None:
# the points are circular enough for our high standards
# so replace them with a closed Arc entity
entities_new.append(entities.Arc(points=np.arange(3) +
len(vertices_new),
closed=True))
vertices_new.extend(circle)
else:
# not a circle, so clean up colinear segments
# then save it as a single line entity
points = merge_colinear(discrete, scale=scale)
# references for new vertices
indexes = np.arange(len(points)) + len(vertices_new)
# discrete curves are always closed
indexes[-1] = indexes[0]
# append new vertices and entity
entities_new.append(entities.Line(points=indexes))
vertices_new.extend(points)
# create the new drawing object
simplified = type(drawing)(
entities=entities_new,
vertices=vertices_new,
metadata=copy.deepcopy(drawing.metadata),
process=process)
# we have changed every path to a single closed entity
# either a closed arc, or a closed line
# so all closed paths are now represented by a single entity
cache.cache.update({
'paths': np.arange(len(entities_new)).reshape((-1, 1)),
'path_valid': np.ones(len(entities_new), dtype=np.bool),
'dangling': np.array([])})
# force recompute of exact bounds
if 'bounds' in cache.cache:
cache.cache.pop('bounds')
simplified._cache = cache
# set the cache ID so it won't dump when a value is requested
simplified._cache.id_set()
return simplified
|
python
|
{
"resource": ""
}
|
q22949
|
simplify_spline
|
train
|
def simplify_spline(path, smooth=None, verbose=False):
"""
Replace discrete curves with b-spline or Arc and
return the result as a new Path2D object.
Parameters
------------
path : trimesh.path.Path2D
Input geometry
smooth : float
Distance to smooth
Returns
------------
simplified : Path2D
Consists of Arc and BSpline entities
"""
new_vertices = []
new_entities = []
scale = path.scale
for discrete in path.discrete:
circle = is_circle(discrete,
scale=scale,
verbose=verbose)
if circle is not None:
# the points are circular enough for our high standards
# so replace them with a closed Arc entity
new_entities.append(entities.Arc(points=np.arange(3) +
len(new_vertices),
closed=True))
new_vertices.extend(circle)
continue
# entities for this path
entity, vertices = points_to_spline_entity(discrete, smooth=smooth)
# reindex returned control points
entity.points += len(new_vertices)
# save entity and vertices
new_vertices.extend(vertices)
new_entities.append(entity)
# create the Path2D object for the result
simplified = type(path)(entities=new_entities,
vertices=new_vertices)
return simplified
|
python
|
{
"resource": ""
}
|
q22950
|
boolean
|
train
|
def boolean(meshes, operation='difference'):
"""
Run an operation on a set of meshes
"""
script = operation + '(){'
for i in range(len(meshes)):
script += 'import(\"$mesh_' + str(i) + '\");'
script += '}'
return interface_scad(meshes, script)
|
python
|
{
"resource": ""
}
|
q22951
|
parse_mtl
|
train
|
def parse_mtl(mtl):
"""
Parse a loaded MTL file.
Parameters
-------------
mtl : str or bytes
Data from an MTL file
Returns
------------
mtllibs : list of dict
Each dict has keys: newmtl, map_Kd, Kd
"""
# decode bytes if necessary
if hasattr(mtl, 'decode'):
mtl = mtl.decode('utf-8')
mtllib = None
mtllibs = []
# use universal newline splitting
for line in str.splitlines(str(mtl).strip()):
# clean leading/trailing whitespace and split
line_split = line.strip().split()
# needs to be at least two values
if len(line_split) <= 1:
continue
# store the keys
key = line_split[0]
if key == 'newmtl':
if mtllib:
mtllibs.append(mtllib)
mtllib = {'newmtl': line_split[1],
'map_Kd': None,
'Kd': None}
elif key == 'map_Kd':
mtllib[key] = line_split[1]
elif key == 'Kd':
mtllib[key] = [float(x) for x in line_split[1:]]
if mtllib:
mtllibs.append(mtllib)
return mtllibs
|
python
|
{
"resource": ""
}
|
q22952
|
export_wavefront
|
train
|
def export_wavefront(mesh,
include_normals=True,
include_texture=True):
"""
Export a mesh as a Wavefront OBJ file
Parameters
-----------
mesh: Trimesh object
Returns
-----------
export: str, string of OBJ format output
"""
# store the multiple options for formatting
# a vertex index for a face
face_formats = {('v',): '{}',
('v', 'vn'): '{}//{}',
('v', 'vt'): '{}/{}',
('v', 'vn', 'vt'): '{}/{}/{}'}
# we are going to reference face_formats with this
face_type = ['v']
export = 'v '
export += util.array_to_string(mesh.vertices,
col_delim=' ',
row_delim='\nv ',
digits=8) + '\n'
if include_normals and 'vertex_normals' in mesh._cache:
# if vertex normals are stored in cache export them
# these will have been autogenerated if they have ever been called
face_type.append('vn')
export += 'vn '
export += util.array_to_string(mesh.vertex_normals,
col_delim=' ',
row_delim='\nvn ',
digits=8) + '\n'
if (include_texture and
'vertex_texture' in mesh.metadata and
len(mesh.metadata['vertex_texture']) == len(mesh.vertices)):
# if vertex texture exists and is the right shape export here
face_type.append('vt')
export += 'vt '
export += util.array_to_string(mesh.metadata['vertex_texture'],
col_delim=' ',
row_delim='\nvt ',
digits=8) + '\n'
# the format for a single vertex reference of a face
face_format = face_formats[tuple(face_type)]
faces = 'f ' + util.array_to_string(mesh.faces + 1,
col_delim=' ',
row_delim='\nf ',
value_format=face_format)
# add the exported faces to the export
export += faces
return export
|
python
|
{
"resource": ""
}
|
q22953
|
RayMeshIntersector._scale
|
train
|
def _scale(self):
"""
Scaling factor for precision.
"""
if self._scale_to_box:
# scale vertices to approximately a cube to help with
# numerical issues at very large/small scales
scale = 100.0 / self.mesh.scale
else:
scale = 1.0
return scale
|
python
|
{
"resource": ""
}
|
q22954
|
RayMeshIntersector._scene
|
train
|
def _scene(self):
"""
A cached version of the pyembree scene.
"""
return _EmbreeWrap(vertices=self.mesh.vertices,
faces=self.mesh.faces,
scale=self._scale)
|
python
|
{
"resource": ""
}
|
q22955
|
RayMeshIntersector.intersects_location
|
train
|
def intersects_location(self,
ray_origins,
ray_directions,
multiple_hits=True):
"""
Return the location of where a ray hits a surface.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
---------
locations: (n) sequence of (m,3) intersection points
index_ray: (n,) int, list of ray index
index_tri: (n,) int, list of triangle (face) indexes
"""
(index_tri,
index_ray,
locations) = self.intersects_id(
ray_origins=ray_origins,
ray_directions=ray_directions,
multiple_hits=multiple_hits,
return_locations=True)
return locations, index_ray, index_tri
|
python
|
{
"resource": ""
}
|
q22956
|
RayMeshIntersector.intersects_id
|
train
|
def intersects_id(self,
ray_origins,
ray_directions,
multiple_hits=True,
max_hits=20,
return_locations=False):
"""
Find the triangles hit by a list of rays, including
optionally multiple hits along a single ray.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
multiple_hits: bool, if True will return every hit along the ray
if False will only return first hit
return_locations: bool, should we return hit locations or not
Returns
----------
index_tri: (m,) int, index of triangle the ray hit
index_ray: (m,) int, index of ray
locations: (m,3) float, locations in space
"""
# make sure input is _dtype for embree
ray_origins = np.asanyarray(
deepcopy(ray_origins),
dtype=np.float64)
ray_directions = np.asanyarray(ray_directions,
dtype=np.float64)
ray_directions = util.unitize(ray_directions)
# since we are constructing all hits, save them to a deque then
# stack into (depth, len(rays)) at the end
result_triangle = deque()
result_ray_idx = deque()
result_locations = deque()
# the mask for which rays are still active
current = np.ones(len(ray_origins), dtype=np.bool)
if multiple_hits or return_locations:
# how much to offset ray to transport to the other side of face
distance = np.clip(_ray_offset_factor * self._scale,
_ray_offset_floor,
np.inf)
ray_offsets = ray_directions * distance
# grab the planes from triangles
plane_origins = self.mesh.triangles[:, 0, :]
plane_normals = self.mesh.face_normals
# use a for loop rather than a while to ensure this exits
# if a ray is offset from a triangle and then is reported
# hitting itself this could get stuck on that one triangle
for query_depth in range(max_hits):
# run the pyembree query
# if you set output=1 it will calculate distance along
# ray, which is bizzarely slower than our calculation
query = self._scene.run(
ray_origins[current],
ray_directions[current])
# basically we need to reduce the rays to the ones that hit
# something
hit = query != -1
# which triangle indexes were hit
hit_triangle = query[hit]
# eliminate rays that didn't hit anything from future queries
current_index = np.nonzero(current)[0]
current_index_no_hit = current_index[np.logical_not(hit)]
current_index_hit = current_index[hit]
current[current_index_no_hit] = False
# append the triangle and ray index to the results
result_triangle.append(hit_triangle)
result_ray_idx.append(current_index_hit)
# if we don't need all of the hits, return the first one
if ((not multiple_hits and
not return_locations) or
not hit.any()):
break
# find the location of where the ray hit the triangle plane
new_origins, valid = intersections.planes_lines(
plane_origins=plane_origins[hit_triangle],
plane_normals=plane_normals[hit_triangle],
line_origins=ray_origins[current],
line_directions=ray_directions[current])
if not valid.all():
# since a plane intersection was invalid we have to go back and
# fix some stuff, we pop the ray index and triangle index,
# apply the valid mask then append it right back to keep our
# indexes intact
result_ray_idx.append(result_ray_idx.pop()[valid])
result_triangle.append(result_triangle.pop()[valid])
# update the current rays to reflect that we couldn't find a
# new origin
current[current_index_hit[np.logical_not(valid)]] = False
# since we had to find the intersection point anyway we save it
# even if we're not going to return it
result_locations.extend(new_origins)
if multiple_hits:
# move the ray origin to the other side of the triangle
ray_origins[current] = new_origins + ray_offsets[current]
else:
break
# stack the deques into nice 1D numpy arrays
index_tri = np.hstack(result_triangle)
index_ray = np.hstack(result_ray_idx)
if return_locations:
locations = np.array(result_locations)
return index_tri, index_ray, locations
return index_tri, index_ray
|
python
|
{
"resource": ""
}
|
q22957
|
RayMeshIntersector.intersects_first
|
train
|
def intersects_first(self,
ray_origins,
ray_directions):
"""
Find the index of the first triangle a ray hits.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
----------
triangle_index: (n,) int, index of triangle ray hit, or -1 if not hit
"""
ray_origins = np.asanyarray(deepcopy(ray_origins))
ray_directions = np.asanyarray(ray_directions)
triangle_index = self._scene.run(ray_origins,
ray_directions)
return triangle_index
|
python
|
{
"resource": ""
}
|
q22958
|
RayMeshIntersector.intersects_any
|
train
|
def intersects_any(self,
ray_origins,
ray_directions):
"""
Check if a list of rays hits the surface.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
----------
hit: (n,) bool, did each ray hit the surface
"""
first = self.intersects_first(ray_origins=ray_origins,
ray_directions=ray_directions)
hit = first != -1
return hit
|
python
|
{
"resource": ""
}
|
q22959
|
_attrib_to_transform
|
train
|
def _attrib_to_transform(attrib):
"""
Extract a homogenous transform from a dictionary.
Parameters
------------
attrib: dict, optionally containing 'transform'
Returns
------------
transform: (4, 4) float, homogeonous transformation
"""
transform = np.eye(4, dtype=np.float64)
if 'transform' in attrib:
# wangle their transform format
values = np.array(
attrib['transform'].split(),
dtype=np.float64).reshape((4, 3)).T
transform[:3, :4] = values
return transform
|
python
|
{
"resource": ""
}
|
q22960
|
check
|
train
|
def check(a, b, digits):
"""
Check input ranges, convert them to vector form,
and get a fixed precision integer version of them.
Parameters
--------------
a : (2, ) or (2, n) float
Start and end of a 1D interval
b : (2, ) or (2, n) float
Start and end of a 1D interval
digits : int
How many digits to consider
Returns
--------------
a : (2, n) float
Ranges as vector
b : (2, n) float
Ranges as vector
a_int : (2, n) int64
Ranges rounded to digits, as vector
b_int : (2, n) int64
Ranges rounded to digits, as vector
is_1D : bool
If True, input was single pair of ranges
"""
a = np.array(a, dtype=np.float64)
b = np.array(b, dtype=np.float64)
if a.shape != b.shape or a.shape[-1] != 2:
raise ValueError('ranges must be identical and (2,)!')
# if input was single interval reshape it here
is_1D = False
if len(a.shape) == 1:
a = a.reshape((-1, 2))
b = b.reshape((-1, 2))
is_1D = True
# make sure ranges are sorted
a.sort(axis=1)
b.sort(axis=1)
# compare in fixed point as integers
a_int = (a * 10**digits).round().astype(np.int64)
b_int = (b * 10**digits).round().astype(np.int64)
return a, b, a_int, b_int, is_1D
|
python
|
{
"resource": ""
}
|
q22961
|
intersection
|
train
|
def intersection(a, b, digits=8):
"""
Given a pair of ranges, merge them in to
one range if they overlap at all
Parameters
--------------
a : (2, ) float
Start and end of a 1D interval
b : (2, ) float
Start and end of a 1D interval
digits : int
How many digits to consider
Returns
--------------
intersects : bool or (n,) bool
Indicates if the ranges overlap at all
new_range : (2, ) or (2, 2) float
The unioned range from the two inputs,
or both of the original ranges if not overlapping
"""
# check shape and convert
a, b, a_int, b_int, is_1D = check(a, b, digits)
# what are the starting and ending points of the overlap
overlap = np.zeros(a.shape, dtype=np.float64)
# A fully overlaps B
current = np.logical_and(a_int[:, 0] <= b_int[:, 0],
a_int[:, 1] >= b_int[:, 1])
overlap[current] = b[current]
# B fully overlaps A
current = np.logical_and(a_int[:, 0] >= b_int[:, 0],
a_int[:, 1] <= b_int[:, 1])
overlap[current] = a[current]
# A starts B ends
# A:, 0 B:, 0 A:, 1 B:, 1
current = np.logical_and(
np.logical_and(a_int[:, 0] <= b_int[:, 0],
b_int[:, 0] < a_int[:, 1]),
a_int[:, 1] < b_int[:, 1])
overlap[current] = np.column_stack([b[current][:, 0],
a[current][:, 1]])
# B starts A ends
# B:, 0 A:, 0 B:, 1 A:, 1
current = np.logical_and(
np.logical_and(b_int[:, 0] <= a_int[:, 0],
a_int[:, 0] < b_int[:, 1]),
b_int[:, 1] < a_int[:, 1])
overlap[current] = np.column_stack([a[current][:, 0],
b[current][:, 1]])
# is range overlapping at all
intersects = overlap.ptp(axis=1) > 10**-digits
if is_1D:
return intersects[0], overlap[0]
return intersects, overlap
|
python
|
{
"resource": ""
}
|
q22962
|
geometry_hash
|
train
|
def geometry_hash(geometry):
"""
Get an MD5 for a geometry object
Parameters
------------
geometry : object
Returns
------------
MD5 : str
"""
if hasattr(geometry, 'md5'):
# for most of our trimesh objects
md5 = geometry.md5()
elif hasattr(geometry, 'tostring'):
# for unwrapped ndarray objects
md5 = str(hash(geometry.tostring()))
if hasattr(geometry, 'visual'):
# if visual properties are defined
md5 += str(geometry.visual.crc())
return md5
|
python
|
{
"resource": ""
}
|
q22963
|
render_scene
|
train
|
def render_scene(scene,
resolution=(1080, 1080),
visible=True,
**kwargs):
"""
Render a preview of a scene to a PNG.
Parameters
------------
scene : trimesh.Scene
Geometry to be rendered
resolution : (2,) int
Resolution in pixels
kwargs : **
Passed to SceneViewer
Returns
---------
render : bytes
Image in PNG format
"""
window = SceneViewer(scene,
start_loop=False,
visible=visible,
resolution=resolution,
**kwargs)
if visible is None:
visible = platform.system() != 'Linux'
# need to run loop twice to display anything
for i in range(2):
pyglet.clock.tick()
window.switch_to()
window.dispatch_events()
window.dispatch_event('on_draw')
window.flip()
from ..util import BytesIO
# save the color buffer data to memory
file_obj = BytesIO()
window.save_image(file_obj)
file_obj.seek(0)
render = file_obj.read()
window.close()
return render
|
python
|
{
"resource": ""
}
|
q22964
|
SceneViewer.add_geometry
|
train
|
def add_geometry(self, name, geometry, **kwargs):
"""
Add a geometry to the viewer.
Parameters
--------------
name : hashable
Name that references geometry
geometry : Trimesh, Path2D, Path3D, PointCloud
Geometry to display in the viewer window
kwargs **
Passed to rendering.convert_to_vertexlist
"""
# convert geometry to constructor args
args = rendering.convert_to_vertexlist(geometry, **kwargs)
# create the indexed vertex list
self.vertex_list[name] = self.batch.add_indexed(*args)
# save the MD5 of the geometry
self.vertex_list_hash[name] = geometry_hash(geometry)
# save the rendering mode from the constructor args
self.vertex_list_mode[name] = args[1]
# if a geometry has a texture defined convert it to opengl form and
# save
if hasattr(geometry, 'visual') and hasattr(
geometry.visual, 'material'):
tex = rendering.material_to_texture(geometry.visual.material)
if tex is not None:
self.textures[name] = tex
|
python
|
{
"resource": ""
}
|
q22965
|
SceneViewer.reset_view
|
train
|
def reset_view(self, flags=None):
"""
Set view to the default view.
Parameters
--------------
flags : None or dict
If any view key passed override the default
e.g. {'cull': False}
"""
self.view = {
'cull': True,
'axis': False,
'fullscreen': False,
'wireframe': False,
'ball': Trackball(
pose=self._initial_camera_transform,
size=self.scene.camera.resolution,
scale=self.scene.scale,
target=self.scene.centroid,
),
}
try:
# if any flags are passed override defaults
if isinstance(flags, dict):
for k, v in flags.items():
if k in self.view:
self.view[k] = v
self.update_flags()
except BaseException:
pass
|
python
|
{
"resource": ""
}
|
q22966
|
SceneViewer.init_gl
|
train
|
def init_gl(self):
"""
Perform the magic incantations to create an
OpenGL scene using pyglet.
"""
# default background color is white-ish
background = [.99, .99, .99, 1.0]
# if user passed a background color use it
if 'background' in self.kwargs:
try:
# convert to (4,) uint8 RGBA
background = to_rgba(self.kwargs['background'])
# convert to 0.0 - 1.0 float
background = background.astype(np.float64) / 255.0
except BaseException:
log.error('background color set but wrong!',
exc_info=True)
self._gl_set_background(background)
self._gl_enable_depth(self.scene)
self._gl_enable_color_material()
self._gl_enable_blending()
self._gl_enable_smooth_lines()
self._gl_enable_lighting(self.scene)
|
python
|
{
"resource": ""
}
|
q22967
|
SceneViewer._gl_enable_lighting
|
train
|
def _gl_enable_lighting(scene):
"""
Take the lights defined in scene.lights and
apply them as openGL lights.
"""
gl.glEnable(gl.GL_LIGHTING)
# opengl only supports 7 lights?
for i, light in enumerate(scene.lights[:7]):
# the index of which light we have
lightN = eval('gl.GL_LIGHT{}'.format(i))
# get the transform for the light by name
matrix = scene.graph[light.name][0]
# convert light object to glLightfv calls
multiargs = rendering.light_to_gl(
light=light,
transform=matrix,
lightN=lightN)
# enable the light in question
gl.glEnable(lightN)
# run the glLightfv calls
for args in multiargs:
gl.glLightfv(*args)
|
python
|
{
"resource": ""
}
|
q22968
|
SceneViewer.update_flags
|
train
|
def update_flags(self):
"""
Check the view flags, and call required GL functions.
"""
# view mode, filled vs wirefrom
if self.view['wireframe']:
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
else:
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
# set fullscreen or windowed
self.set_fullscreen(fullscreen=self.view['fullscreen'])
# backface culling on or off
if self.view['cull']:
gl.glEnable(gl.GL_CULL_FACE)
else:
gl.glDisable(gl.GL_CULL_FACE)
# case where we WANT an axis and NO vertexlist
# is stored internally
if self.view['axis'] and self._axis is None:
from .. import creation
# create an axis marker sized relative to the scene
axis = creation.axis(origin_size=self.scene.scale / 100)
# create ordered args for a vertex list
args = rendering.mesh_to_vertexlist(axis)
# store the axis as a reference
self._axis = self.batch.add_indexed(*args)
# case where we DON'T want an axis but a vertexlist
# IS stored internally
elif not self.view['axis'] and self._axis is not None:
# remove the axis from the rendering batch
self._axis.delete()
# set the reference to None
self._axis = None
|
python
|
{
"resource": ""
}
|
q22969
|
SceneViewer.on_resize
|
train
|
def on_resize(self, width, height):
"""
Handle resized windows.
"""
width, height = self._update_perspective(width, height)
self.scene.camera.resolution = (width, height)
self.view['ball'].resize(self.scene.camera.resolution)
self.scene.camera.transform = self.view['ball'].pose
|
python
|
{
"resource": ""
}
|
q22970
|
SceneViewer.on_mouse_press
|
train
|
def on_mouse_press(self, x, y, buttons, modifiers):
"""
Set the start point of the drag.
"""
self.view['ball'].set_state(Trackball.STATE_ROTATE)
if (buttons == pyglet.window.mouse.LEFT):
ctrl = (modifiers & pyglet.window.key.MOD_CTRL)
shift = (modifiers & pyglet.window.key.MOD_SHIFT)
if (ctrl and shift):
self.view['ball'].set_state(Trackball.STATE_ZOOM)
elif shift:
self.view['ball'].set_state(Trackball.STATE_ROLL)
elif ctrl:
self.view['ball'].set_state(Trackball.STATE_PAN)
elif (buttons == pyglet.window.mouse.MIDDLE):
self.view['ball'].set_state(Trackball.STATE_PAN)
elif (buttons == pyglet.window.mouse.RIGHT):
self.view['ball'].set_state(Trackball.STATE_ZOOM)
self.view['ball'].down(np.array([x, y]))
self.scene.camera.transform = self.view['ball'].pose
|
python
|
{
"resource": ""
}
|
q22971
|
SceneViewer.on_mouse_drag
|
train
|
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
"""
Pan or rotate the view.
"""
self.view['ball'].drag(np.array([x, y]))
self.scene.camera.transform = self.view['ball'].pose
|
python
|
{
"resource": ""
}
|
q22972
|
SceneViewer.on_mouse_scroll
|
train
|
def on_mouse_scroll(self, x, y, dx, dy):
"""
Zoom the view.
"""
self.view['ball'].scroll(dy)
self.scene.camera.transform = self.view['ball'].pose
|
python
|
{
"resource": ""
}
|
q22973
|
SceneViewer.on_key_press
|
train
|
def on_key_press(self, symbol, modifiers):
"""
Call appropriate functions given key presses.
"""
magnitude = 10
if symbol == pyglet.window.key.W:
self.toggle_wireframe()
elif symbol == pyglet.window.key.Z:
self.reset_view()
elif symbol == pyglet.window.key.C:
self.toggle_culling()
elif symbol == pyglet.window.key.A:
self.toggle_axis()
elif symbol == pyglet.window.key.Q:
self.on_close()
elif symbol == pyglet.window.key.M:
self.maximize()
elif symbol == pyglet.window.key.F:
self.toggle_fullscreen()
if symbol in [
pyglet.window.key.LEFT,
pyglet.window.key.RIGHT,
pyglet.window.key.DOWN,
pyglet.window.key.UP,
]:
self.view['ball'].down([0, 0])
if symbol == pyglet.window.key.LEFT:
self.view['ball'].drag([-magnitude, 0])
elif symbol == pyglet.window.key.RIGHT:
self.view['ball'].drag([magnitude, 0])
elif symbol == pyglet.window.key.DOWN:
self.view['ball'].drag([0, -magnitude])
elif symbol == pyglet.window.key.UP:
self.view['ball'].drag([0, magnitude])
self.scene.camera.transform = self.view['ball'].pose
|
python
|
{
"resource": ""
}
|
q22974
|
SceneViewer.on_draw
|
train
|
def on_draw(self):
"""
Run the actual draw calls.
"""
self._update_meshes()
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glLoadIdentity()
# pull the new camera transform from the scene
transform_camera = self.scene.graph.get(
frame_to='world',
frame_from=self.scene.camera.name)[0]
# apply the camera transform to the matrix stack
gl.glMultMatrixf(rendering.matrix_to_gl(transform_camera))
# we want to render fully opaque objects first,
# followed by objects which have transparency
node_names = collections.deque(self.scene.graph.nodes_geometry)
# how many nodes did we start with
count_original = len(node_names)
count = -1
# if we are rendering an axis marker at the world
if self._axis:
# we stored it as a vertex list
self._axis.draw(mode=gl.GL_TRIANGLES)
while len(node_names) > 0:
count += 1
current_node = node_names.popleft()
# get the transform from world to geometry and mesh name
transform, geometry_name = self.scene.graph[current_node]
# if no geometry at this frame continue without rendering
if geometry_name is None:
continue
# if a geometry is marked as fixed apply the inverse view transform
if self.fixed is not None and geometry_name in self.fixed:
# remove altered camera transform from fixed geometry
transform_fix = np.linalg.inv(
np.dot(self._initial_camera_transform, transform_camera))
# apply the transform so the fixed geometry doesn't move
transform = np.dot(transform, transform_fix)
# get a reference to the mesh so we can check transparency
mesh = self.scene.geometry[geometry_name]
if mesh.is_empty:
continue
# add a new matrix to the model stack
gl.glPushMatrix()
# transform by the nodes transform
gl.glMultMatrixf(rendering.matrix_to_gl(transform))
# draw an axis marker for each mesh frame
if self.view['axis'] == 'all':
self._axis.draw(mode=gl.GL_TRIANGLES)
# transparent things must be drawn last
if (hasattr(mesh, 'visual') and
hasattr(mesh.visual, 'transparency')
and mesh.visual.transparency):
# put the current item onto the back of the queue
if count < count_original:
# add the node to be drawn last
node_names.append(current_node)
# pop the matrix stack for now
gl.glPopMatrix()
# come back to this mesh later
continue
# if we have texture enable the target texture
texture = None
if geometry_name in self.textures:
texture = self.textures[geometry_name]
gl.glEnable(texture.target)
gl.glBindTexture(texture.target, texture.id)
# get the mode of the current geometry
mode = self.vertex_list_mode[geometry_name]
# draw the mesh with its transform applied
self.vertex_list[geometry_name].draw(mode=mode)
# pop the matrix stack as we drew what we needed to draw
gl.glPopMatrix()
# disable texture after using
if texture is not None:
gl.glDisable(texture.target)
|
python
|
{
"resource": ""
}
|
q22975
|
SceneViewer.save_image
|
train
|
def save_image(self, file_obj):
"""
Save the current color buffer to a file object
in PNG format.
Parameters
-------------
file_obj: file name, or file- like object
"""
manager = pyglet.image.get_buffer_manager()
colorbuffer = manager.get_color_buffer()
# if passed a string save by name
if hasattr(file_obj, 'write'):
colorbuffer.save(file=file_obj)
else:
colorbuffer.save(filename=file_obj)
|
python
|
{
"resource": ""
}
|
q22976
|
unit_conversion
|
train
|
def unit_conversion(current, desired):
"""
Calculate the conversion from one set of units to another.
Parameters
---------
current : str
Unit system values are in now (eg 'millimeters')
desired : str
Unit system we'd like values in (eg 'inches')
Returns
---------
conversion : float
Number to multiply by to put values into desired units
"""
current = str(current).strip().lower()
desired = str(desired).strip().lower()
conversion = TO_INCH[current] / TO_INCH[desired]
return conversion
|
python
|
{
"resource": ""
}
|
q22977
|
units_from_metadata
|
train
|
def units_from_metadata(obj, guess=True):
"""
Try to extract hints from metadata and if that fails
guess based on the object scale.
Parameters
------------
obj: object
Has attributes 'metadata' (dict) and 'scale' (float)
guess : bool
If metadata doesn't indicate units, guess from scale
Returns
------------
units: str
A guess of what the units might be
"""
# try to guess from metadata
for key in ['file_name', 'name']:
if key not in obj.metadata:
continue
# get the string which might contain unit hints
hints = obj.metadata[key].lower()
if 'unit' in hints:
# replace all delimiter options with white space
for delim in '_-.':
hints = hints.replace(delim, ' ')
# loop through each hint
for hint in hints.strip().split():
# key word is "unit" or "units"
if 'unit' not in hint:
continue
# get rid of keyword and whitespace
hint = hint.replace(
'units', '').replace(
'unit', '').strip()
# if the hint is a valid unit return it
if hint in TO_INCH:
return hint
if not guess:
raise ValueError('no units and not allowed to guess')
# we made it to the wild ass guess section
# if the scale is larger than 100 mystery units
# declare the model to be millimeters, otherwise inches
log.warning('no units: guessing from scale')
if float(obj.scale) > 100.0:
return 'millimeters'
else:
return 'inches'
|
python
|
{
"resource": ""
}
|
q22978
|
_convert_units
|
train
|
def _convert_units(obj, desired, guess=False):
"""
Given an object with scale and units try to scale
to different units via the object's `apply_scale`.
Parameters
---------
obj : object
With apply_scale method (i.e. Trimesh, Path2D, etc)
desired : str
Units desired (eg 'inches')
guess: bool
Whether we are allowed to guess the units
if they are not specified.
"""
if obj.units is None:
# try to extract units from metadata
# if nothing specified in metadata and not allowed
# to guess will raise a ValueError
obj.units = units_from_metadata(obj, guess=guess)
log.info('converting units from %s to %s', obj.units, desired)
# float, conversion factor
conversion = unit_conversion(obj.units, desired)
# apply scale uses transforms which preserve
# cached properties (rather than just multiplying vertices)
obj.apply_scale(conversion)
# units are now desired units
obj.units = desired
|
python
|
{
"resource": ""
}
|
q22979
|
export_path
|
train
|
def export_path(path,
file_type=None,
file_obj=None,
**kwargs):
"""
Export a Path object to a file- like object, or to a filename
Parameters
---------
file_obj: None, str, or file object
A filename string or a file-like object
file_type: None or str
File type, e.g.: 'svg', 'dxf'
kwargs : passed to loader
Returns
---------
exported : str or bytes
Data exported
"""
# if file object is a string it is probably a file path
# so we can split the extension to set the file type
if util.is_string(file_obj):
file_type = util.split_extension(file_obj)
# run the export
export = _path_exporters[file_type](path, **kwargs)
# if we've been passed files write the data
_write_export(export=export, file_obj=file_obj)
return export
|
python
|
{
"resource": ""
}
|
q22980
|
export_dict
|
train
|
def export_dict(path):
"""
Export a path as a dict of kwargs for the Path constructor.
"""
export_entities = [e.to_dict() for e in path.entities]
export_object = {'entities': export_entities,
'vertices': path.vertices.tolist()}
return export_object
|
python
|
{
"resource": ""
}
|
q22981
|
_write_export
|
train
|
def _write_export(export, file_obj=None):
"""
Write a string to a file.
If file_obj isn't specified, return the string
Parameters
---------
export: a string of the export data
file_obj: a file-like object or a filename
"""
if file_obj is None:
return export
if hasattr(file_obj, 'write'):
out_file = file_obj
else:
out_file = open(file_obj, 'wb')
try:
out_file.write(export)
except TypeError:
out_file.write(export.encode('utf-8'))
out_file.close()
return export
|
python
|
{
"resource": ""
}
|
q22982
|
sample_surface
|
train
|
def sample_surface(mesh, count):
"""
Sample the surface of a mesh, returning the specified
number of points
For individual triangle sampling uses this method:
http://mathworld.wolfram.com/TrianglePointPicking.html
Parameters
---------
mesh: Trimesh object
count: number of points to return
Returns
---------
samples: (count,3) points in space on the surface of mesh
face_index: (count,) indices of faces for each sampled point
"""
# len(mesh.faces) float, array of the areas
# of each face of the mesh
area = mesh.area_faces
# total area (float)
area_sum = np.sum(area)
# cumulative area (len(mesh.faces))
area_cum = np.cumsum(area)
face_pick = np.random.random(count) * area_sum
face_index = np.searchsorted(area_cum, face_pick)
# pull triangles into the form of an origin + 2 vectors
tri_origins = mesh.triangles[:, 0]
tri_vectors = mesh.triangles[:, 1:].copy()
tri_vectors -= np.tile(tri_origins, (1, 2)).reshape((-1, 2, 3))
# pull the vectors for the faces we are going to sample from
tri_origins = tri_origins[face_index]
tri_vectors = tri_vectors[face_index]
# randomly generate two 0-1 scalar components to multiply edge vectors by
random_lengths = np.random.random((len(tri_vectors), 2, 1))
# points will be distributed on a quadrilateral if we use 2 0-1 samples
# if the two scalar components sum less than 1.0 the point will be
# inside the triangle, so we find vectors longer than 1.0 and
# transform them to be inside the triangle
random_test = random_lengths.sum(axis=1).reshape(-1) > 1.0
random_lengths[random_test] -= 1.0
random_lengths = np.abs(random_lengths)
# multiply triangle edge vectors by the random lengths and sum
sample_vector = (tri_vectors * random_lengths).sum(axis=1)
# finally, offset by the origin to generate
# (n,3) points in space on the triangle
samples = sample_vector + tri_origins
return samples, face_index
|
python
|
{
"resource": ""
}
|
q22983
|
volume_mesh
|
train
|
def volume_mesh(mesh, count):
"""
Use rejection sampling to produce points randomly distributed
in the volume of a mesh.
Parameters
----------
mesh: Trimesh object
count: int, number of samples desired
Returns
----------
samples: (n,3) float, points in the volume of the mesh.
where: n <= count
"""
points = (np.random.random((count, 3)) * mesh.extents) + mesh.bounds[0]
contained = mesh.contains(points)
samples = points[contained][:count]
return samples
|
python
|
{
"resource": ""
}
|
q22984
|
volume_rectangular
|
train
|
def volume_rectangular(extents,
count,
transform=None):
"""
Return random samples inside a rectangular volume.
Parameters
----------
extents: (3,) float, side lengths of rectangular solid
count: int, number of points to return
transform: (4,4) float, transformation matrix
Returns
---------
samples: (count, 3) float, points in volume
"""
samples = np.random.random((count, 3)) - .5
samples *= extents
if transform is not None:
samples = transformations.transform_points(samples,
transform)
return samples
|
python
|
{
"resource": ""
}
|
q22985
|
sample_surface_even
|
train
|
def sample_surface_even(mesh, count):
"""
Sample the surface of a mesh, returning samples which are
approximately evenly spaced.
Parameters
---------
mesh: Trimesh object
count: number of points to return
Returns
---------
samples: (count,3) points in space on the surface of mesh
face_index: (count,) indices of faces for each sampled point
"""
from .points import remove_close
radius = np.sqrt(mesh.area / (2 * count))
samples, ids = sample_surface(mesh, count * 5)
result, mask = remove_close(samples, radius)
return result, ids[mask]
|
python
|
{
"resource": ""
}
|
q22986
|
sample_surface_sphere
|
train
|
def sample_surface_sphere(count):
"""
Correctly pick random points on the surface of a unit sphere
Uses this method:
http://mathworld.wolfram.com/SpherePointPicking.html
Parameters
----------
count: int, number of points to return
Returns
----------
points: (count,3) float, list of random points on a unit sphere
"""
u, v = np.random.random((2, count))
theta = np.pi * 2 * u
phi = np.arccos((2 * v) - 1)
points = util.spherical_to_vector(np.column_stack((theta, phi)))
return points
|
python
|
{
"resource": ""
}
|
q22987
|
parameters_to_segments
|
train
|
def parameters_to_segments(origins, vectors, parameters):
"""
Convert a parametric line segment representation to
a two point line segment representation
Parameters
------------
origins : (n, 3) float
Line origin point
vectors : (n, 3) float
Unit line directions
parameters : (n, 2) float
Start and end distance pairs for each line
Returns
--------------
segments : (n, 2, 3) float
Line segments defined by start and end points
"""
# don't copy input
origins = np.asanyarray(origins, dtype=np.float64)
vectors = np.asanyarray(vectors, dtype=np.float64)
parameters = np.asanyarray(parameters, dtype=np.float64)
# turn the segments into a reshapable 2D array
segments = np.hstack((origins + vectors * parameters[:, :1],
origins + vectors * parameters[:, 1:]))
return segments.reshape((-1, 2, origins.shape[1]))
|
python
|
{
"resource": ""
}
|
q22988
|
colinear_pairs
|
train
|
def colinear_pairs(segments,
radius=.01,
angle=.01,
length=None):
"""
Find pairs of segments which are colinear.
Parameters
-------------
segments : (n, 2, (2, 3)) float
Two or three dimensional line segments
radius : float
Maximum radius line origins can differ
and be considered colinear
angle : float
Maximum angle in radians segments can
differ and still be considered colinear
length : None or float
If specified, will additionally require
that pairs have a mean vertex distance less
than this value from each other to qualify.
Returns
------------
pairs : (m, 2) int
Indexes of segments which are colinear
"""
from scipy import spatial
# convert segments to parameterized origins
# which are the closest point on the line to
# the actual zero- origin
origins, vectors, param = segments_to_parameters(segments)
# create a kdtree for origins
tree = spatial.cKDTree(origins)
# find origins closer than specified radius
pairs = tree.query_pairs(r=radius, output_type='ndarray')
# calculate angles between pairs
angles = geometry.vector_angle(vectors[pairs])
# angles can be within tolerance of 180 degrees or 0.0 degrees
angle_ok = np.logical_or(
util.isclose(angles, np.pi, atol=angle),
util.isclose(angles, 0.0, atol=angle))
# apply angle threshold
colinear = pairs[angle_ok]
# if length is specified check endpoint proximity
if length is not None:
# make sure parameter pairs are ordered
param.sort(axis=1)
# calculate the mean parameter distance for each colinear pair
distance = param[colinear].ptp(axis=1).mean(axis=1)
# if the MEAN distance is less than specified length consider
# the segment to be identical: worst case single- vertex
# distance is 2*length
identical = distance < length
# remove non- identical pairs
colinear = colinear[identical]
return colinear
|
python
|
{
"resource": ""
}
|
q22989
|
unique
|
train
|
def unique(segments, digits=5):
"""
Find unique line segments.
Parameters
------------
segments : (n, 2, (2|3)) float
Line segments in space
digits : int
How many digits to consider when merging vertices
Returns
-----------
unique : (m, 2, (2|3)) float
Segments with duplicates merged
"""
segments = np.asanyarray(segments, dtype=np.float64)
# find segments as unique indexes so we can find duplicates
inverse = grouping.unique_rows(
segments.reshape((-1, segments.shape[2])),
digits=digits)[1].reshape((-1, 2))
# make sure rows are sorted
inverse.sort(axis=1)
# find rows that occur once
index = grouping.unique_rows(inverse)
# apply the unique mask
unique = segments[index[0]]
return unique
|
python
|
{
"resource": ""
}
|
q22990
|
overlap
|
train
|
def overlap(origins, vectors, params):
"""
Find the overlap of two parallel line segments.
Parameters
------------
origins : (2, 3) float
Origin points of lines in space
vectors : (2, 3) float
Unit direction vectors of lines
params : (2, 2) float
Two (start, end) distance pairs
Returns
------------
length : float
Overlapping length
overlap : (n, 2, 3) float
Line segments for overlapping distance
"""
# copy inputs and make sure shape is correct
origins = np.array(origins).reshape((2, 3))
vectors = np.array(vectors).reshape((2, 3))
params = np.array(params).reshape((2, 2))
if tol.strict:
# convert input to parameters before flipping
# to make sure we didn't screw it up
truth = parameters_to_segments(origins,
vectors,
params)
# this function only works on parallel lines
dot = np.dot(*vectors)
assert np.isclose(np.abs(dot), 1.0, atol=.01)
# if two vectors are reversed
if dot < 0.0:
# reverse direction vector
vectors[1] *= -1.0
# negate parameters
params[1] *= -1.0
if tol.strict:
# do a check to make sure our reversal didn't
# inadvertently give us incorrect segments
assert np.allclose(truth,
parameters_to_segments(origins,
vectors,
params))
# merge the parameter ranges
ok, new_range = interval.intersection(*params)
if not ok:
return 0.0, np.array([])
# create the overlapping segment pairs (2, 2, 3)
segments = np.array([o + v * new_range.reshape((-1, 1))
for o, v in zip(origins, vectors)])
# get the length of the new range
length = new_range.ptp()
return length, segments
|
python
|
{
"resource": ""
}
|
q22991
|
_load_texture
|
train
|
def _load_texture(file_name, resolver):
"""
Load a texture from a file into a PIL image.
"""
file_data = resolver.get(file_name)
image = PIL.Image.open(util.wrap_as_stream(file_data))
return image
|
python
|
{
"resource": ""
}
|
q22992
|
_parse_material
|
train
|
def _parse_material(effect, resolver):
"""
Turn a COLLADA effect into a trimesh material.
"""
# Compute base color
baseColorFactor = np.ones(4)
baseColorTexture = None
if isinstance(effect.diffuse, collada.material.Map):
try:
baseColorTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load base texture',
exc_info=True)
elif effect.diffuse is not None:
baseColorFactor = effect.diffuse
# Compute emission color
emissiveFactor = np.zeros(3)
emissiveTexture = None
if isinstance(effect.emission, collada.material.Map):
try:
emissiveTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load emissive texture',
exc_info=True)
elif effect.emission is not None:
emissiveFactor = effect.emission[:3]
# Compute roughness
roughnessFactor = 1.0
if (not isinstance(effect.shininess, collada.material.Map)
and effect.shininess is not None):
roughnessFactor = np.sqrt(2.0 / (2.0 + effect.shininess))
# Compute metallic factor
metallicFactor = 0.0
# Compute normal texture
normalTexture = None
if effect.bumpmap is not None:
try:
normalTexture = _load_texture(
effect.bumpmap.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load bumpmap',
exc_info=True)
return visual.texture.PBRMaterial(
emissiveFactor=emissiveFactor,
emissiveTexture=emissiveTexture,
normalTexture=normalTexture,
baseColorTexture=baseColorTexture,
baseColorFactor=baseColorFactor,
metallicFactor=metallicFactor,
roughnessFactor=roughnessFactor
)
|
python
|
{
"resource": ""
}
|
q22993
|
_unparse_material
|
train
|
def _unparse_material(material):
"""
Turn a trimesh material into a COLLADA material.
"""
# TODO EXPORT TEXTURES
if isinstance(material, visual.texture.PBRMaterial):
diffuse = material.baseColorFactor
if diffuse is not None:
diffuse = list(diffuse)
emission = material.emissiveFactor
if emission is not None:
emission = [float(emission[0]), float(emission[1]),
float(emission[2]), 1.0]
shininess = material.roughnessFactor
if shininess is not None:
shininess = 2.0 / shininess**2 - 2.0
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong',
diffuse=diffuse, emission=emission,
specular=[1.0, 1.0, 1.0], shininess=float(shininess)
)
material = collada.material.Material(
uuid.uuid4().hex, 'pbrmaterial', effect
)
else:
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong'
)
material = collada.material.Material(
uuid.uuid4().hex, 'defaultmaterial', effect
)
return material
|
python
|
{
"resource": ""
}
|
q22994
|
load_zae
|
train
|
def load_zae(file_obj, resolver=None, **kwargs):
"""
Load a ZAE file, which is just a zipped DAE file.
Parameters
-------------
file_obj : file object
Contains ZAE data
resolver : trimesh.visual.Resolver
Resolver to load additional assets
kwargs : dict
Passed to load_collada
Returns
------------
loaded : dict
Results of loading
"""
# a dict, {file name : file object}
archive = util.decompress(file_obj,
file_type='zip')
# load the first file with a .dae extension
file_name = next(i for i in archive.keys()
if i.lower().endswith('.dae'))
# a resolver so the loader can load textures / etc
resolver = visual.resolvers.ZipResolver(archive)
# run the regular collada loader
loaded = load_collada(archive[file_name],
resolver=resolver,
**kwargs)
return loaded
|
python
|
{
"resource": ""
}
|
q22995
|
load_off
|
train
|
def load_off(file_obj, **kwargs):
"""
Load an OFF file into the kwargs for a Trimesh constructor
Parameters
----------
file_obj : file object
Contains an OFF file
Returns
----------
loaded : dict
kwargs for Trimesh constructor
"""
header_string = file_obj.readline()
if hasattr(header_string, 'decode'):
header_string = header_string.decode('utf-8')
header_string = header_string.strip().upper()
if not header_string == 'OFF':
raise NameError('Not an OFF file! Header was ' +
header_string)
header = np.array(
file_obj.readline().strip().split()).astype(np.int64)
vertex_count, face_count = header[:2]
# read the rest of the file
blob = np.array(file_obj.read().strip().split())
# there should be 3 points per vertex
# and 3 indexes + 1 count per face
data_ok = np.sum(header * [3, 4, 0]) == len(blob)
if not data_ok:
raise NameError('Incorrect number of vertices or faces!')
vertices = blob[:(vertex_count * 3)].astype(
np.float64).reshape((-1, 3))
# strip the first column which is a per- face count
faces = blob[(vertex_count * 3):].astype(
np.int64).reshape((-1, 4))[:, 1:]
kwargs = {'vertices': vertices,
'faces': faces}
return kwargs
|
python
|
{
"resource": ""
}
|
q22996
|
load_msgpack
|
train
|
def load_msgpack(blob, **kwargs):
"""
Load a dict packed with msgpack into kwargs for
a Trimesh constructor
Parameters
----------
blob : bytes
msgpack packed dict containing
keys 'vertices' and 'faces'
Returns
----------
loaded : dict
Keyword args for Trimesh constructor, aka
mesh=trimesh.Trimesh(**loaded)
"""
import msgpack
if hasattr(blob, 'read'):
data = msgpack.load(blob)
else:
data = msgpack.loads(blob)
loaded = load_dict(data)
return loaded
|
python
|
{
"resource": ""
}
|
q22997
|
discretize_bspline
|
train
|
def discretize_bspline(control,
knots,
count=None,
scale=1.0):
"""
Given a B-Splines control points and knot vector, return
a sampled version of the curve.
Parameters
----------
control : (o, d) float
Control points of the b- spline
knots : (j,) float
B-spline knots
count : int
Number of line segments to discretize the spline
If not specified will be calculated as something reasonable
Returns
----------
discrete : (count, dimension) float
Points on a polyline version of the B-spline
"""
# evaluate the b-spline using scipy/fitpack
from scipy.interpolate import splev
# (n, d) control points where d is the dimension of vertices
control = np.asanyarray(control, dtype=np.float64)
degree = len(knots) - len(control) - 1
if count is None:
norm = np.linalg.norm(np.diff(control, axis=0), axis=1).sum()
count = int(np.clip(norm / (res.seg_frac * scale),
res.min_sections * len(control),
res.max_sections * len(control)))
ipl = np.linspace(knots[0], knots[-1], count)
discrete = splev(ipl, [knots, control.T, degree])
discrete = np.column_stack(discrete)
return discrete
|
python
|
{
"resource": ""
}
|
q22998
|
binomial
|
train
|
def binomial(n):
"""
Return all binomial coefficients for a given order.
For n > 5, scipy.special.binom is used, below we hardcode
to avoid the scipy.special dependency.
Parameters
--------------
n : int
Order
Returns
---------------
binom : (n + 1,) int
Binomial coefficients of a given order
"""
if n == 1:
return [1, 1]
elif n == 2:
return [1, 2, 1]
elif n == 3:
return [1, 3, 3, 1]
elif n == 4:
return [1, 4, 6, 4, 1]
elif n == 5:
return [1, 5, 10, 10, 5, 1]
else:
from scipy.special import binom
return binom(n, np.arange(n + 1))
|
python
|
{
"resource": ""
}
|
q22999
|
Path.process
|
train
|
def process(self):
"""
Apply basic cleaning functions to the Path object, in- place.
"""
log.debug('Processing drawing')
with self._cache:
for func in self._process_functions():
func()
return self
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.