_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q23000
|
Path.layers
|
train
|
def layers(self):
"""
If entities have a layer defined, return it.
Returns
---------
layers: (len(entities), ) list of str
"""
layer = ['NONE'] * len(self.entities)
for i, e in enumerate(self.entities):
if hasattr(e, 'layer'):
layer[i] = str(e.layer)
return layer
|
python
|
{
"resource": ""
}
|
q23001
|
Path.crc
|
train
|
def crc(self):
"""
A CRC of the current vertices and entities.
Returns
------------
crc: int, CRC of entity points and vertices
"""
# first CRC the points in every entity
target = caching.crc32(bytes().join(e._bytes()
for e in self.entities))
# add the CRC for the vertices
target ^= self.vertices.crc()
return target
|
python
|
{
"resource": ""
}
|
q23002
|
Path.md5
|
train
|
def md5(self):
"""
An MD5 hash of the current vertices and entities.
Returns
------------
md5: str, two appended MD5 hashes
"""
# first MD5 the points in every entity
target = '{}{}'.format(
util.md5_object(bytes().join(e._bytes()
for e in self.entities)),
self.vertices.md5())
return target
|
python
|
{
"resource": ""
}
|
q23003
|
Path.paths
|
train
|
def paths(self):
"""
Sequence of closed paths, encoded by entity index.
Returns
---------
paths: (n,) sequence of (*,) int referencing self.entities
"""
paths = traversal.closed_paths(self.entities,
self.vertices)
return paths
|
python
|
{
"resource": ""
}
|
q23004
|
Path.dangling
|
train
|
def dangling(self):
"""
List of entities that aren't included in a closed path
Returns
----------
dangling: (n,) int, index of self.entities
"""
if len(self.paths) == 0:
return np.arange(len(self.entities))
else:
included = np.hstack(self.paths)
dangling = np.setdiff1d(np.arange(len(self.entities)),
included)
return dangling
|
python
|
{
"resource": ""
}
|
q23005
|
Path.kdtree
|
train
|
def kdtree(self):
"""
A KDTree object holding the vertices of the path.
Returns
----------
kdtree: scipy.spatial.cKDTree object holding self.vertices
"""
kdtree = KDTree(self.vertices.view(np.ndarray))
return kdtree
|
python
|
{
"resource": ""
}
|
q23006
|
Path.scale
|
train
|
def scale(self):
"""
What is a representitive number that reflects the magnitude
of the world holding the paths, for numerical comparisons.
Returns
----------
scale : float
Approximate size of the world holding this path
"""
# use vertices peak-peak rather than exact extents
scale = float((self.vertices.ptp(axis=0) ** 2).sum() ** .5)
return scale
|
python
|
{
"resource": ""
}
|
q23007
|
Path.bounds
|
train
|
def bounds(self):
"""
Return the axis aligned bounding box of the current path.
Returns
----------
bounds: (2, dimension) float, (min, max) coordinates
"""
# get the exact bounds of each entity
# some entities (aka 3- point Arc) have bounds that can't
# be generated from just bound box of vertices
points = np.array([e.bounds(self.vertices)
for e in self.entities],
dtype=np.float64)
# flatten bound extrema into (n, dimension) array
points = points.reshape((-1, self.vertices.shape[1]))
# get the max and min of all bounds
bounds = np.array([points.min(axis=0),
points.max(axis=0)],
dtype=np.float64)
return bounds
|
python
|
{
"resource": ""
}
|
q23008
|
Path.explode
|
train
|
def explode(self):
"""
Turn every multi- segment entity into single segment entities, in- place
"""
new_entities = collections.deque()
for entity in self.entities:
new_entities.extend(entity.explode())
self.entities = np.array(new_entities)
|
python
|
{
"resource": ""
}
|
q23009
|
Path.is_closed
|
train
|
def is_closed(self):
"""
Are all entities connected to other entities.
Returns
-----------
closed : bool
Every entity is connected at its ends
"""
closed = all(i == 2 for i in
dict(self.vertex_graph.degree()).values())
return closed
|
python
|
{
"resource": ""
}
|
q23010
|
Path.vertex_nodes
|
train
|
def vertex_nodes(self):
"""
Get a list of which vertex indices are nodes,
which are either endpoints or points where the
entity makes a direction change.
Returns
--------------
nodes : (n, 2) int
Indexes of self.vertices which are nodes
"""
nodes = np.vstack([e.nodes for e in self.entities])
return nodes
|
python
|
{
"resource": ""
}
|
q23011
|
Path.rezero
|
train
|
def rezero(self):
"""
Translate so that every vertex is positive in the current
mesh is positive.
Returns
-----------
matrix : (dimension + 1, dimension + 1) float
Homogenous transformation that was applied
to the current Path object.
"""
dimension = self.vertices.shape[1]
matrix = np.eye(dimension + 1)
matrix[:dimension, dimension] = -self.vertices.min(axis=0)
self.apply_transform(matrix)
return matrix
|
python
|
{
"resource": ""
}
|
q23012
|
Path.merge_vertices
|
train
|
def merge_vertices(self, digits=None):
"""
Merges vertices which are identical and replace references.
Parameters
--------------
digits : None, or int
How many digits to consider when merging vertices
Alters
-----------
self.entities : entity.points re- referenced
self.vertices : duplicates removed
"""
if len(self.vertices) == 0:
return
if digits is None:
digits = util.decimal_to_digits(tol.merge * self.scale,
min_digits=1)
unique, inverse = grouping.unique_rows(self.vertices,
digits=digits)
self.vertices = self.vertices[unique]
entities_ok = np.ones(len(self.entities), dtype=np.bool)
for index, entity in enumerate(self.entities):
# what kind of entity are we dealing with
kind = type(entity).__name__
# entities that don't need runs merged
# don't screw up control- point- knot relationship
if kind in 'BSpline Bezier Text':
entity.points = inverse[entity.points]
continue
# if we merged duplicate vertices, the entity may
# have multiple references to the same vertex
points = grouping.merge_runs(inverse[entity.points])
# if there are three points and two are identical fix it
if kind == 'Line':
if len(points) == 3 and points[0] == points[-1]:
points = points[:2]
elif len(points) < 2:
# lines need two or more vertices
entities_ok[index] = False
elif kind == 'Arc' and len(points) != 3:
# three point arcs need three points
entities_ok[index] = False
# store points in entity
entity.points = points
# remove degenerate entities
self.entities = self.entities[entities_ok]
|
python
|
{
"resource": ""
}
|
q23013
|
Path.replace_vertex_references
|
train
|
def replace_vertex_references(self, mask):
"""
Replace the vertex index references in every entity.
Parameters
------------
mask : (len(self.vertices), ) int
Contains new vertex indexes
Alters
------------
entity.points in self.entities
Replaced by mask[entity.points]
"""
for entity in self.entities:
entity.points = mask[entity.points]
|
python
|
{
"resource": ""
}
|
q23014
|
Path.remove_entities
|
train
|
def remove_entities(self, entity_ids):
"""
Remove entities by index.
Parameters
-----------
entity_ids : (n,) int
Indexes of self.entities to remove
"""
if len(entity_ids) == 0:
return
keep = np.ones(len(self.entities))
keep[entity_ids] = False
self.entities = self.entities[keep]
|
python
|
{
"resource": ""
}
|
q23015
|
Path.remove_invalid
|
train
|
def remove_invalid(self):
"""
Remove entities which declare themselves invalid
Alters
----------
self.entities: shortened
"""
valid = np.array([i.is_valid for i in self.entities],
dtype=np.bool)
self.entities = self.entities[valid]
|
python
|
{
"resource": ""
}
|
q23016
|
Path.remove_duplicate_entities
|
train
|
def remove_duplicate_entities(self):
"""
Remove entities that are duplicated
Alters
-------
self.entities: length same or shorter
"""
entity_hashes = np.array([hash(i) for i in self.entities])
unique, inverse = grouping.unique_rows(entity_hashes)
if len(unique) != len(self.entities):
self.entities = self.entities[unique]
|
python
|
{
"resource": ""
}
|
q23017
|
Path.referenced_vertices
|
train
|
def referenced_vertices(self):
"""
Which vertices are referenced by an entity.
Returns
-----------
referenced_vertices: (n,) int, indexes of self.vertices
"""
# no entities no reference
if len(self.entities) == 0:
return np.array([], dtype=np.int64)
referenced = np.concatenate([e.points for e in self.entities])
referenced = np.unique(referenced.astype(np.int64))
return referenced
|
python
|
{
"resource": ""
}
|
q23018
|
Path.remove_unreferenced_vertices
|
train
|
def remove_unreferenced_vertices(self):
"""
Removes all vertices which aren't used by an entity.
Alters
---------
self.vertices: reordered and shortened
self.entities: entity.points references updated
"""
unique = self.referenced_vertices
mask = np.ones(len(self.vertices), dtype=np.int64) * -1
mask[unique] = np.arange(len(unique), dtype=np.int64)
self.replace_vertex_references(mask=mask)
self.vertices = self.vertices[unique]
|
python
|
{
"resource": ""
}
|
q23019
|
Path.discretize_path
|
train
|
def discretize_path(self, path):
"""
Given a list of entities, return a list of connected points.
Parameters
-----------
path: (n,) int, indexes of self.entities
Returns
-----------
discrete: (m, dimension)
"""
discrete = traversal.discretize_path(self.entities,
self.vertices,
path,
scale=self.scale)
return discrete
|
python
|
{
"resource": ""
}
|
q23020
|
Path.discrete
|
train
|
def discrete(self):
"""
A sequence of connected vertices in space, corresponding to
self.paths.
Returns
---------
discrete : (len(self.paths),)
A sequence of (m*, dimension) float
"""
discrete = np.array([self.discretize_path(i)
for i in self.paths])
return discrete
|
python
|
{
"resource": ""
}
|
q23021
|
Path.export
|
train
|
def export(self,
file_obj=None,
file_type=None,
**kwargs):
"""
Export the path to a file object or return data.
Parameters
---------------
file_obj : None, str, or file object
File object or string to export to
file_type : None or str
Type of file: dxf, dict, svg
Returns
---------------
exported : bytes or str
Exported as specified type
"""
return export_path(self,
file_type=file_type,
file_obj=file_obj,
**kwargs)
|
python
|
{
"resource": ""
}
|
q23022
|
Path.copy
|
train
|
def copy(self):
"""
Get a copy of the current mesh
Returns
---------
copied: Path object, copy of self
"""
metadata = {}
# grab all the keys into a list so if something is added
# in another thread it probably doesn't stomp on our loop
for key in list(self.metadata.keys()):
try:
metadata[key] = copy.deepcopy(self.metadata[key])
except RuntimeError:
# multiple threads
log.warning('key {} changed during copy'.format(key))
# copy the core data
copied = type(self)(entities=copy.deepcopy(self.entities),
vertices=copy.deepcopy(self.vertices),
metadata=metadata)
cache = {}
# try to copy the cache over to the new object
try:
# save dict keys before doing slow iteration
keys = list(self._cache.cache.keys())
# run through each key and copy into new cache
for k in keys:
cache[k] = copy.deepcopy(self._cache.cache[k])
except RuntimeError:
# if we have multiple threads this may error and is NBD
log.debug('unable to copy cache')
except BaseException:
# catch and log errors we weren't expecting
log.error('unable to copy cache', exc_info=True)
copied._cache.cache = cache
copied._cache.id_set()
return copied
|
python
|
{
"resource": ""
}
|
q23023
|
Path3D.to_planar
|
train
|
def to_planar(self,
to_2D=None,
normal=None,
check=True):
"""
Check to see if current vectors are all coplanar.
If they are, return a Path2D and a transform which will
transform the 2D representation back into 3 dimensions
Parameters
-----------
to_2D: (4,4) float
Homogenous transformation matrix to apply,
If not passed a plane will be fitted to vertices.
normal: (3,) float, or None
Approximate normal of direction of plane
If to_2D is not specified sign
will be applied to fit plane normal
check: bool
If True: Raise a ValueError if
points aren't coplanar
Returns
-----------
planar : trimesh.path.Path2D
Current path transformed onto plane
to_3D : (4,4) float
Homeogenous transformation to move planar
back into 3D space
"""
# which vertices are actually referenced
referenced = self.referenced_vertices
# if nothing is referenced return an empty path
if len(referenced) == 0:
return Path2D(), np.eye(4)
# no explicit transform passed
if to_2D is None:
# fit a plane to our vertices
C, N = plane_fit(self.vertices[referenced])
# apply the normal sign hint
if normal is not None:
normal = np.asanyarray(normal, dtype=np.float64)
if normal.shape == (3,):
N *= np.sign(np.dot(N, normal))
N = normal
else:
log.warning(
"passed normal not used: {}".format(
normal.shape))
# create a transform from fit plane to XY
to_2D = plane_transform(origin=C,
normal=N)
# make sure we've extracted a transform
to_2D = np.asanyarray(to_2D, dtype=np.float64)
if to_2D.shape != (4, 4):
raise ValueError('unable to create transform!')
# transform all vertices to 2D plane
flat = transformations.transform_points(self.vertices,
to_2D)
# Z values of vertices which are referenced
heights = flat[referenced][:, 2]
# points are not on a plane because Z varies
if heights.ptp() > tol.planar:
# since Z is inconsistent set height to zero
height = 0.0
if check:
raise ValueError('points are not flat!')
else:
# if the points were planar store the height
height = heights.mean()
# the transform from 2D to 3D
to_3D = np.linalg.inv(to_2D)
# if the transform didn't move the path to
# exactly Z=0 adjust it so the returned transform does
if np.abs(height) > tol.planar:
# adjust to_3D transform by height
adjust = transformations.translation_matrix(
[0, 0, height])
# apply the height adjustment to_3D
to_3D = np.dot(to_3D, adjust)
# copy metadata to new object
metadata = copy.deepcopy(self.metadata)
# store transform we used to move it onto the plane
metadata['to_3D'] = to_3D
# create the Path2D with the same entities
# and XY values of vertices projected onto the plane
planar = Path2D(entities=copy.deepcopy(self.entities),
vertices=flat[:, :2],
metadata=metadata,
process=False)
return planar, to_3D
|
python
|
{
"resource": ""
}
|
q23024
|
Path3D.plot_discrete
|
train
|
def plot_discrete(self, show=False):
"""
Plot closed curves
Parameters
------------
show : bool
If False will not execute matplotlib.pyplot.show
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
fig = plt.figure()
axis = fig.add_subplot(111, projection='3d')
for discrete in self.discrete:
axis.plot(*discrete.T)
if show:
plt.show()
|
python
|
{
"resource": ""
}
|
q23025
|
Path3D.plot_entities
|
train
|
def plot_entities(self, show=False):
"""
Plot discrete version of entities without regards
for connectivity.
Parameters
-------------
show : bool
If False will not execute matplotlib.pyplot.show
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
fig = plt.figure()
axis = fig.add_subplot(111, projection='3d')
for entity in self.entities:
vertices = entity.discrete(self.vertices)
axis.plot(*vertices.T)
if show:
plt.show()
|
python
|
{
"resource": ""
}
|
q23026
|
Path2D.show
|
train
|
def show(self, annotations=True):
"""
Plot the current Path2D object using matplotlib.
"""
if self.is_closed:
self.plot_discrete(show=True, annotations=annotations)
else:
self.plot_entities(show=True, annotations=annotations)
|
python
|
{
"resource": ""
}
|
q23027
|
Path2D.apply_obb
|
train
|
def apply_obb(self):
"""
Transform the current path so that its OBB is axis aligned
and OBB center is at the origin.
"""
if len(self.root) == 1:
matrix, bounds = polygons.polygon_obb(
self.polygons_closed[self.root[0]])
self.apply_transform(matrix)
return matrix
else:
raise ValueError('Not implemented for multibody geometry')
|
python
|
{
"resource": ""
}
|
q23028
|
Path2D.to_3D
|
train
|
def to_3D(self, transform=None):
"""
Convert 2D path to 3D path on the XY plane.
Parameters
-------------
transform : (4, 4) float
If passed, will transform vertices.
If not passed and 'to_3D' is in metadata
that transform will be used.
Returns
-----------
path_3D: Path3D version of current path
"""
# if there is a stored 'to_3D' transform in metadata use it
if transform is None and 'to_3D' in self.metadata:
transform = self.metadata['to_3D']
# copy vertices and stack with zeros from (n, 2) to (n, 3)
vertices = np.column_stack((copy.deepcopy(self.vertices),
np.zeros(len(self.vertices))))
if transform is not None:
vertices = transformations.transform_points(vertices,
transform)
# make sure everything is deep copied
path_3D = Path3D(entities=copy.deepcopy(self.entities),
vertices=vertices,
metadata=copy.deepcopy(self.metadata))
return path_3D
|
python
|
{
"resource": ""
}
|
q23029
|
Path2D.polygons_full
|
train
|
def polygons_full(self):
"""
A list of shapely.geometry.Polygon objects with interiors created
by checking which closed polygons enclose which other polygons.
Returns
---------
full : (len(self.root),) shapely.geometry.Polygon
Polygons containing interiors
"""
# pre- allocate the list to avoid indexing problems
full = [None] * len(self.root)
# store the graph to avoid cache thrashing
enclosure = self.enclosure_directed
# store closed polygons to avoid cache hits
closed = self.polygons_closed
# loop through root curves
for i, root in enumerate(self.root):
# a list of multiple Polygon objects that
# are fully contained by the root curve
children = [closed[child]
for child in enclosure[root].keys()]
# all polygons_closed are CCW, so for interiors reverse them
holes = [np.array(p.exterior.coords)[::-1]
for p in children]
# a single Polygon object
shell = closed[root].exterior
# create a polygon with interiors
full[i] = polygons.repair_invalid(Polygon(shell=shell,
holes=holes))
# so we can use advanced indexing
full = np.array(full)
return full
|
python
|
{
"resource": ""
}
|
q23030
|
Path2D.area
|
train
|
def area(self):
"""
Return the area of the polygons interior.
Returns
---------
area: float, total area of polygons minus interiors
"""
area = float(sum(i.area for i in self.polygons_full))
return area
|
python
|
{
"resource": ""
}
|
q23031
|
Path2D.length
|
train
|
def length(self):
"""
The total discretized length of every entity.
Returns
--------
length: float, summed length of every entity
"""
length = float(sum(i.length(self.vertices)
for i in self.entities))
return length
|
python
|
{
"resource": ""
}
|
q23032
|
Path2D.extrude
|
train
|
def extrude(self, height, **kwargs):
"""
Extrude the current 2D path into a 3D mesh.
Parameters
----------
height: float, how far to extrude the profile
kwargs: passed directly to meshpy.triangle.build:
triangle.build(mesh_info,
verbose=False,
refinement_func=None,
attributes=False,
volume_constraints=True,
max_volume=None,
allow_boundary_steiner=True,
allow_volume_steiner=True,
quality_meshing=True,
generate_edges=None,
generate_faces=False,
min_angle=None)
Returns
--------
mesh: trimesh object representing extruded polygon
"""
from ..primitives import Extrusion
result = [Extrusion(polygon=i, height=height, **kwargs)
for i in self.polygons_full]
if len(result) == 1:
return result[0]
return result
|
python
|
{
"resource": ""
}
|
q23033
|
Path2D.triangulate
|
train
|
def triangulate(self, **kwargs):
"""
Create a region- aware triangulation of the 2D path.
Parameters
-------------
**kwargs : dict
Passed to trimesh.creation.triangulate_polygon
Returns
-------------
vertices : (n, 2) float
2D vertices of triangulation
faces : (n, 3) int
Indexes of vertices for triangles
"""
from ..creation import triangulate_polygon
# append vertices and faces into sequence
v_seq = []
f_seq = []
# loop through polygons with interiors
for polygon in self.polygons_full:
v, f = triangulate_polygon(polygon, **kwargs)
v_seq.append(v)
f_seq.append(f)
return util.append_faces(v_seq, f_seq)
|
python
|
{
"resource": ""
}
|
q23034
|
Path2D.medial_axis
|
train
|
def medial_axis(self, resolution=None, clip=None):
"""
Find the approximate medial axis based
on a voronoi diagram of evenly spaced points on the
boundary of the polygon.
Parameters
----------
resolution : None or float
Distance between each sample on the polygon boundary
clip : None, or (2,) float
Min, max number of samples
Returns
----------
medial : Path2D object
Contains only medial axis of Path
"""
if resolution is None:
resolution = self.scale / 1000.0
# convert the edges to Path2D kwargs
from .exchange.misc import edges_to_path
# edges and vertices
edge_vert = [polygons.medial_axis(i, resolution, clip)
for i in self.polygons_full]
# create a Path2D object for each region
medials = [Path2D(**edges_to_path(
edges=e, vertices=v)) for e, v in edge_vert]
# get a single Path2D of medial axis
medial = concatenate(medials)
return medial
|
python
|
{
"resource": ""
}
|
q23035
|
Path2D.connected_paths
|
train
|
def connected_paths(self, path_id, include_self=False):
"""
Given an index of self.paths find other paths which
overlap with that path.
Parameters
-----------
path_id : int
Index of self.paths
include_self : bool
Should the result include path_id or not
Returns
-----------
path_ids : (n, ) int
Indexes of self.paths that overlap input path_id
"""
if len(self.root) == 1:
path_ids = np.arange(len(self.polygons_closed))
else:
path_ids = list(nx.node_connected_component(
self.enclosure,
path_id))
if include_self:
return np.array(path_ids)
return np.setdiff1d(path_ids, [path_id])
|
python
|
{
"resource": ""
}
|
q23036
|
Path2D.simplify_spline
|
train
|
def simplify_spline(self, path_indexes=None, smooth=.0002):
"""
Convert paths into b-splines.
Parameters
-----------
path_indexes : (n) int
List of indexes of self.paths to convert
smooth : float
How much the spline should smooth the curve
Returns
------------
simplified: Path2D object
"""
return simplify.simplify_spline(self,
path_indexes=path_indexes,
smooth=smooth)
|
python
|
{
"resource": ""
}
|
q23037
|
Path2D.plot_discrete
|
train
|
def plot_discrete(self, show=False, annotations=True):
"""
Plot the closed curves of the path.
"""
import matplotlib.pyplot as plt
axis = plt.axes()
axis.set_aspect('equal', 'datalim')
for i, points in enumerate(self.discrete):
color = ['g', 'k'][i in self.root]
axis.plot(*points.T, color=color)
if annotations:
for e in self.entities:
if not hasattr(e, 'plot'):
continue
e.plot(self.vertices)
if show:
plt.show()
return axis
|
python
|
{
"resource": ""
}
|
q23038
|
Path2D.plot_entities
|
train
|
def plot_entities(self, show=False, annotations=True, color=None):
"""
Plot the entities of the path, with no notion of topology
"""
import matplotlib.pyplot as plt
plt.axes().set_aspect('equal', 'datalim')
eformat = {'Line0': {'color': 'g', 'linewidth': 1},
'Line1': {'color': 'y', 'linewidth': 1},
'Arc0': {'color': 'r', 'linewidth': 1},
'Arc1': {'color': 'b', 'linewidth': 1},
'Bezier0': {'color': 'k', 'linewidth': 1},
'Bezier1': {'color': 'k', 'linewidth': 1},
'BSpline0': {'color': 'm', 'linewidth': 1},
'BSpline1': {'color': 'm', 'linewidth': 1}}
for entity in self.entities:
if annotations and hasattr(entity, 'plot'):
entity.plot(self.vertices)
continue
discrete = entity.discrete(self.vertices)
e_key = entity.__class__.__name__ + str(int(entity.closed))
fmt = eformat[e_key]
if color is not None:
# passed color will override other optons
fmt['color'] = color
elif hasattr(entity, 'color'):
# if entity has specified color use it
fmt['color'] = entity.color
plt.plot(*discrete.T, **fmt)
if show:
plt.show()
|
python
|
{
"resource": ""
}
|
q23039
|
Path2D.identifier
|
train
|
def identifier(self):
"""
A unique identifier for the path.
Returns
---------
identifier: (5,) float, unique identifier
"""
if len(self.polygons_full) != 1:
raise TypeError('Identifier only valid for single body')
return polygons.polygon_hash(self.polygons_full[0])
|
python
|
{
"resource": ""
}
|
q23040
|
Path2D.identifier_md5
|
train
|
def identifier_md5(self):
"""
Return an MD5 of the identifier
"""
as_int = (self.identifier * 1e4).astype(np.int64)
hashed = util.md5_object(as_int.tostring(order='C'))
return hashed
|
python
|
{
"resource": ""
}
|
q23041
|
Path2D.enclosure_directed
|
train
|
def enclosure_directed(self):
"""
Networkx DiGraph of polygon enclosure
"""
root, enclosure = polygons.enclosure_tree(self.polygons_closed)
self._cache['root'] = root
return enclosure
|
python
|
{
"resource": ""
}
|
q23042
|
Path2D.enclosure_shell
|
train
|
def enclosure_shell(self):
"""
A dictionary of path indexes which are 'shell' paths, and values
of 'hole' paths.
Returns
----------
corresponding: dict, {index of self.paths of shell : [indexes of holes]}
"""
pairs = [(r, self.connected_paths(r, include_self=False))
for r in self.root]
# OrderedDict to maintain corresponding order
corresponding = collections.OrderedDict(pairs)
return corresponding
|
python
|
{
"resource": ""
}
|
q23043
|
log_time
|
train
|
def log_time(method):
"""
A decorator for methods which will time the method
and then emit a log.debug message with the method name
and how long it took to execute.
"""
def timed(*args, **kwargs):
tic = time_function()
result = method(*args, **kwargs)
log.debug('%s executed in %.4f seconds.',
method.__name__,
time_function() - tic)
return result
timed.__name__ = method.__name__
timed.__doc__ = method.__doc__
return timed
|
python
|
{
"resource": ""
}
|
q23044
|
nearby_faces
|
train
|
def nearby_faces(mesh, points):
"""
For each point find nearby faces relatively quickly.
The closest point on the mesh to the queried point is guaranteed to be
on one of the faces listed.
Does this by finding the nearest vertex on the mesh to each point, and
then returns all the faces that intersect the axis aligned bounding box
centered at the queried point and extending to the nearest vertex.
Parameters
----------
mesh : Trimesh object
points : (n,3) float , points in space
Returns
-----------
candidates : (points,) int, sequence of indexes for mesh.faces
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# an r-tree containing the axis aligned bounding box for every triangle
rtree = mesh.triangles_tree
# a kd-tree containing every vertex of the mesh
kdtree = mesh.kdtree
# query the distance to the nearest vertex to get AABB of a sphere
distance_vertex = kdtree.query(points)[0].reshape((-1, 1))
distance_vertex += tol.merge
# axis aligned bounds
bounds = np.column_stack((points - distance_vertex,
points + distance_vertex))
# faces that intersect axis aligned bounding box
candidates = [list(rtree.intersection(b)) for b in bounds]
return candidates
|
python
|
{
"resource": ""
}
|
q23045
|
signed_distance
|
train
|
def signed_distance(mesh, points):
"""
Find the signed distance from a mesh to a list of points.
* Points OUTSIDE the mesh will have NEGATIVE distance
* Points within tol.merge of the surface will have POSITIVE distance
* Points INSIDE the mesh will have POSITIVE distance
Parameters
-----------
mesh : Trimesh object
points : (n,3) float, list of points in space
Returns
----------
signed_distance : (n,3) float, signed distance from point to mesh
"""
# make sure we have a numpy array
points = np.asanyarray(points, dtype=np.float64)
# find the closest point on the mesh to the queried points
closest, distance, triangle_id = closest_point(mesh, points)
# we only care about nonzero distances
nonzero = distance > tol.merge
if not nonzero.any():
return distance
inside = mesh.ray.contains_points(points[nonzero])
sign = (inside.astype(int) * 2) - 1
# apply sign to previously computed distance
distance[nonzero] *= sign
return distance
|
python
|
{
"resource": ""
}
|
q23046
|
longest_ray
|
train
|
def longest_ray(mesh, points, directions):
"""
Find the lengths of the longest rays which do not intersect the mesh
cast from a list of points in the provided directions.
Parameters
-----------
points : (n,3) float, list of points in space
directions : (n,3) float, directions of rays
Returns
----------
signed_distance : (n,) float, length of rays
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
directions = np.asanyarray(directions, dtype=np.float64)
if not util.is_shape(directions, (-1, 3)):
raise ValueError('directions must be (n,3)!')
if len(points) != len(directions):
raise ValueError('number of points must equal number of directions!')
faces, rays, locations = mesh.ray.intersects_id(points, directions,
return_locations=True,
multiple_hits=True)
if len(rays) > 0:
distances = np.linalg.norm(locations - points[rays],
axis=1)
else:
distances = np.array([])
# Reject intersections at distance less than tol.planar
rays = rays[distances > tol.planar]
distances = distances[distances > tol.planar]
# Add infinite length for those with no valid intersection
no_intersections = np.setdiff1d(np.arange(len(points)), rays)
rays = np.concatenate((rays, no_intersections))
distances = np.concatenate((distances,
np.repeat(np.inf,
len(no_intersections))))
return group_min(rays, distances)
|
python
|
{
"resource": ""
}
|
q23047
|
thickness
|
train
|
def thickness(mesh,
points,
exterior=False,
normals=None,
method='max_sphere'):
"""
Find the thickness of the mesh at the given points.
Parameters
----------
points : (n,3) float, list of points in space
exterior : bool, whether to compute the exterior thickness
(a.k.a. reach)
normals : (n,3) float, normals of the mesh at the given points
None, compute this automatically.
method : string, one of 'max_sphere' or 'ray'
Returns
----------
thickness : (n,) float, thickness
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
if normals is not None:
normals = np.asanyarray(normals, dtype=np.float64)
if not util.is_shape(normals, (-1, 3)):
raise ValueError('normals must be (n,3)!')
if len(points) != len(normals):
raise ValueError('number of points must equal number of normals!')
else:
normals = mesh.face_normals[closest_point(mesh, points)[2]]
if method == 'max_sphere':
centers, radius = max_tangent_sphere(mesh=mesh,
points=points,
inwards=not exterior,
normals=normals)
thickness = radius * 2
return thickness
elif method == 'ray':
if exterior:
return longest_ray(mesh, points, normals)
else:
return longest_ray(mesh, points, -normals)
else:
raise ValueError('Invalid method, use "max_sphere" or "ray"')
|
python
|
{
"resource": ""
}
|
q23048
|
ProximityQuery.vertex
|
train
|
def vertex(self, points):
"""
Given a set of points, return the closest vertex index to each point
Parameters
----------
points : (n,3) float, list of points in space
Returns
----------
distance : (n,) float, distance from source point to vertex
vertex_id : (n,) int, index of mesh.vertices which is closest
"""
tree = self._mesh.kdtree
return tree.query(points)
|
python
|
{
"resource": ""
}
|
q23049
|
procrustes
|
train
|
def procrustes(a,
b,
reflection=True,
translation=True,
scale=True,
return_cost=True):
"""
Perform Procrustes' analysis subject to constraints. Finds the
transformation T mapping a to b which minimizes the square sum
distances between Ta and b, also called the cost.
Parameters
----------
a : (n,3) float
List of points in space
b : (n,3) float
List of points in space
reflection : bool
If the transformation is allowed reflections
translation : bool
If the transformation is allowed translations
scale : bool
If the transformation is allowed scaling
return_cost : bool
Whether to return the cost and transformed a as well
Returns
----------
matrix : (4,4) float
The transformation matrix sending a to b
transformed : (n,3) float
The image of a under the transformation
cost : float
The cost of the transformation
"""
a = np.asanyarray(a, dtype=np.float64)
b = np.asanyarray(b, dtype=np.float64)
if not util.is_shape(a, (-1, 3)) or not util.is_shape(b, (-1, 3)):
raise ValueError('points must be (n,3)!')
if len(a) != len(b):
raise ValueError('a and b must contain same number of points!')
# Remove translation component
if translation:
acenter = a.mean(axis=0)
bcenter = b.mean(axis=0)
else:
acenter = np.zeros(a.shape[1])
bcenter = np.zeros(b.shape[1])
# Remove scale component
if scale:
ascale = np.sqrt(((a - acenter)**2).sum() / len(a))
bscale = np.sqrt(((b - bcenter)**2).sum() / len(b))
else:
ascale = 1
bscale = 1
# Use SVD to find optimal orthogonal matrix R
# constrained to det(R) = 1 if necessary.
u, s, vh = np.linalg.svd(
np.dot(((b - bcenter) / bscale).T, ((a - acenter) / ascale)))
if reflection:
R = np.dot(u, vh)
else:
R = np.dot(np.dot(u, np.diag(
[1, 1, np.linalg.det(np.dot(u, vh))])), vh)
# Compute our 4D transformation matrix encoding
# a -> (R @ (a - acenter)/ascale) * bscale + bcenter
# = (bscale/ascale)R @ a + (bcenter - (bscale/ascale)R @ acenter)
translation = bcenter - (bscale / ascale) * np.dot(R, acenter)
matrix = np.hstack((bscale / ascale * R, translation.reshape(-1, 1)))
matrix = np.vstack(
(matrix, np.array([0.] * (a.shape[1]) + [1.]).reshape(1, -1)))
if return_cost:
transformed = transform_points(a, matrix)
cost = ((b - transformed)**2).mean()
return matrix, transformed, cost
else:
return matrix
|
python
|
{
"resource": ""
}
|
q23050
|
concatenate
|
train
|
def concatenate(visuals, *args):
"""
Concatenate multiple visual objects.
Parameters
----------
visuals: ColorVisuals object, or list of same
*args: ColorVisuals object, or list of same
Returns
----------
concat: ColorVisuals object
"""
# get a flat list of ColorVisuals objects
if len(args) > 0:
visuals = np.append(visuals, args)
else:
visuals = np.array(visuals)
# get the type of visuals (vertex or face) removing undefined
modes = {v.kind for v in visuals}.difference({None})
if len(modes) == 0:
# none of the visuals have anything defined
return ColorVisuals()
else:
# if we have visuals with different modes defined
# arbitrarily get one of them
mode = modes.pop()
# a linked list to store colors before stacking
colors = collections.deque()
# a string to evaluate which returns the colors we want
append = 'v.{}_colors'.format(mode)
for v in visuals:
# use an eval so we can use the object property
colors.append(eval(append))
# use an eval so we can use the constructor
concat = eval('ColorVisuals({}_colors=np.vstack(colors))'.format(mode))
return concat
|
python
|
{
"resource": ""
}
|
q23051
|
to_rgba
|
train
|
def to_rgba(colors, dtype=np.uint8):
"""
Convert a single or multiple RGB colors to RGBA colors.
Parameters
----------
colors: (n,[3|4]) list of RGB or RGBA colors
Returns
----------
colors: (n,4) list of RGBA colors
(4,) single RGBA color
"""
if not util.is_sequence(colors):
return
# colors as numpy array
colors = np.asanyarray(colors)
# integer value for opaque alpha given our datatype
opaque = np.iinfo(dtype).max
if (colors.dtype.kind == 'f' and colors.max() < (1.0 + 1e-8)):
colors = (colors * opaque).astype(dtype)
elif (colors.max() <= opaque):
colors = colors.astype(dtype)
else:
raise ValueError('colors non- convertible!')
if util.is_shape(colors, (-1, 3)):
# add an opaque alpha for RGB colors
colors = np.column_stack((
colors,
opaque * np.ones(len(colors)))).astype(dtype)
elif util.is_shape(colors, (3,)):
# if passed a single RGB color add an alpha
colors = np.append(colors, opaque)
if not (util.is_shape(colors, (4,)) or
util.is_shape(colors, (-1, 4))):
raise ValueError('Colors not of appropriate shape!')
return colors
|
python
|
{
"resource": ""
}
|
q23052
|
random_color
|
train
|
def random_color(dtype=np.uint8):
"""
Return a random RGB color using datatype specified.
Parameters
----------
dtype: numpy dtype of result
Returns
----------
color: (4,) dtype, random color that looks OK
"""
hue = np.random.random() + .61803
hue %= 1.0
color = np.array(colorsys.hsv_to_rgb(hue, .99, .99))
if np.dtype(dtype).kind in 'iu':
max_value = (2**(np.dtype(dtype).itemsize * 8)) - 1
color *= max_value
color = np.append(color, max_value).astype(dtype)
return color
|
python
|
{
"resource": ""
}
|
q23053
|
vertex_to_face_color
|
train
|
def vertex_to_face_color(vertex_colors, faces):
"""
Convert a list of vertex colors to face colors.
Parameters
----------
vertex_colors: (n,(3,4)), colors
faces: (m,3) int, face indexes
Returns
-----------
face_colors: (m,4) colors
"""
vertex_colors = to_rgba(vertex_colors)
face_colors = vertex_colors[faces].mean(axis=1)
return face_colors.astype(np.uint8)
|
python
|
{
"resource": ""
}
|
q23054
|
face_to_vertex_color
|
train
|
def face_to_vertex_color(mesh, face_colors, dtype=np.uint8):
"""
Convert a list of face colors into a list of vertex colors.
Parameters
-----------
mesh : trimesh.Trimesh object
face_colors: (n, (3,4)) int, face colors
dtype: data type of output
Returns
-----------
vertex_colors: (m,4) dtype, colors for each vertex
"""
rgba = to_rgba(face_colors)
vertex_colors = mesh.faces_sparse.dot(
rgba.astype(np.float64))
vertex_colors /= mesh.faces_sparse.sum(axis=1)
vertex_colors = vertex_colors.astype(dtype)
return vertex_colors
|
python
|
{
"resource": ""
}
|
q23055
|
colors_to_materials
|
train
|
def colors_to_materials(colors, count=None):
"""
Convert a list of colors into a list of unique materials
and material indexes.
Parameters
-----------
colors : (n, 3) or (n, 4) float
RGB or RGBA colors
count : int
Number of entities to apply color to
Returns
-----------
diffuse : (m, 4) int
Colors
index : (count,) int
Index of each color
"""
# convert RGB to RGBA
rgba = to_rgba(colors)
# if we were only passed a single color
if util.is_shape(rgba, (4,)) and count is not None:
diffuse = rgba.reshape((-1, 4))
index = np.zeros(count, dtype=np.int)
elif util.is_shape(rgba, (-1, 4)):
# we were passed multiple colors
# find the unique colors in the list to save as materials
unique, index = grouping.unique_rows(rgba)
diffuse = rgba[unique]
else:
raise ValueError('Colors not convertible!')
return diffuse, index
|
python
|
{
"resource": ""
}
|
q23056
|
linear_color_map
|
train
|
def linear_color_map(values, color_range=None):
"""
Linearly interpolate between two colors.
If colors are not specified the function will
interpolate between 0.0 values as red and 1.0 as green.
Parameters
--------------
values : (n, ) float
Values to interpolate
color_range : None, or (2, 4) uint8
What colors should extrema be set to
Returns
---------------
colors : (n, 4) uint8
RGBA colors for interpolated values
"""
if color_range is None:
color_range = np.array([[255, 0, 0, 255],
[0, 255, 0, 255]],
dtype=np.uint8)
else:
color_range = np.asanyarray(color_range,
dtype=np.uint8)
if color_range.shape != (2, 4):
raise ValueError('color_range must be RGBA (2, 4)')
# float 1D array clamped to 0.0 - 1.0
values = np.clip(np.asanyarray(
values, dtype=np.float64).ravel(),
0.0, 1.0).reshape((-1, 1))
# the stacked component colors
color = [np.ones((len(values), 4)) * c
for c in color_range.astype(np.float64)]
# interpolated colors
colors = (color[1] * values) + (color[0] * (1.0 - values))
# rounded and set to correct data type
colors = np.round(colors).astype(np.uint8)
return colors
|
python
|
{
"resource": ""
}
|
q23057
|
interpolate
|
train
|
def interpolate(values, color_map=None, dtype=np.uint8):
"""
Given a 1D list of values, return interpolated colors
for the range.
Parameters
---------------
values : (n, ) float
Values to be interpolated over
color_map : None, or str
Key to a colormap contained in:
matplotlib.pyplot.colormaps()
e.g: 'viridis'
Returns
-------------
interpolated : (n, 4) dtype
Interpolated RGBA colors
"""
# get a color interpolation function
if color_map is None:
cmap = linear_color_map
else:
from matplotlib.pyplot import get_cmap
cmap = get_cmap(color_map)
# make input always float
values = np.asanyarray(values, dtype=np.float64).ravel()
# scale values to 0.0 - 1.0 and get colors
colors = cmap((values - values.min()) / values.ptp())
# convert to 0-255 RGBA
rgba = to_rgba(colors, dtype=dtype)
return rgba
|
python
|
{
"resource": ""
}
|
q23058
|
ColorVisuals.transparency
|
train
|
def transparency(self):
"""
Does the current object contain any transparency.
Returns
----------
transparency: bool, does the current visual contain transparency
"""
if 'vertex_colors' in self._data:
a_min = self._data['vertex_colors'][:, 3].min()
elif 'face_colors' in self._data:
a_min = self._data['face_colors'][:, 3].min()
else:
return False
return bool(a_min < 255)
|
python
|
{
"resource": ""
}
|
q23059
|
ColorVisuals.kind
|
train
|
def kind(self):
"""
What color mode has been set.
Returns
----------
mode: 'face', 'vertex', or None
"""
self._verify_crc()
if 'vertex_colors' in self._data:
mode = 'vertex'
elif 'face_colors' in self._data:
mode = 'face'
else:
mode = None
return mode
|
python
|
{
"resource": ""
}
|
q23060
|
ColorVisuals.crc
|
train
|
def crc(self):
"""
A checksum for the current visual object and its parent mesh.
Returns
----------
crc: int, checksum of data in visual object and its parent mesh
"""
# will make sure everything has been transferred
# to datastore that needs to be before returning crc
result = self._data.fast_hash()
if hasattr(self.mesh, 'crc'):
# bitwise xor combines hashes better than a sum
result ^= self.mesh.crc()
return result
|
python
|
{
"resource": ""
}
|
q23061
|
ColorVisuals.copy
|
train
|
def copy(self):
"""
Return a copy of the current ColorVisuals object.
Returns
----------
copied : ColorVisuals
Contains the same information as self
"""
copied = ColorVisuals()
copied._data.data = copy.deepcopy(self._data.data)
return copied
|
python
|
{
"resource": ""
}
|
q23062
|
ColorVisuals.face_colors
|
train
|
def face_colors(self, values):
"""
Set the colors for each face of a mesh.
This will apply these colors and delete any previously specified
color information.
Parameters
------------
colors: (len(mesh.faces), 3), set each face to the specified color
(len(mesh.faces), 4), set each face to the specified color
(3,) int, set the whole mesh this color
(4,) int, set the whole mesh this color
"""
if values is None:
if 'face_colors' in self._data:
self._data.data.pop('face_colors')
return
colors = to_rgba(values)
if (self.mesh is not None and
colors.shape == (4,)):
count = len(self.mesh.faces)
colors = np.tile(colors, (count, 1))
# if we set any color information, clear the others
self._data.clear()
self._data['face_colors'] = colors
self._cache.verify()
|
python
|
{
"resource": ""
}
|
q23063
|
ColorVisuals.vertex_colors
|
train
|
def vertex_colors(self, values):
"""
Set the colors for each vertex of a mesh
This will apply these colors and delete any previously specified
color information.
Parameters
------------
colors: (len(mesh.vertices), 3), set each face to the color
(len(mesh.vertices), 4), set each face to the color
(3,) int, set the whole mesh this color
(4,) int, set the whole mesh this color
"""
if values is None:
if 'vertex_colors' in self._data:
self._data.data.pop('vertex_colors')
return
# make sure passed values are numpy array
values = np.asanyarray(values)
# Ensure the color shape is sane
if (self.mesh is not None and not
(values.shape == (len(self.mesh.vertices), 3) or
values.shape == (len(self.mesh.vertices), 4) or
values.shape == (3,) or
values.shape == (4,))):
return
colors = to_rgba(values)
if (self.mesh is not None and
colors.shape == (4,)):
count = len(self.mesh.vertices)
colors = np.tile(colors, (count, 1))
# if we set any color information, clear the others
self._data.clear()
self._data['vertex_colors'] = colors
self._cache.verify()
|
python
|
{
"resource": ""
}
|
q23064
|
ColorVisuals._get_colors
|
train
|
def _get_colors(self,
name):
"""
A magical function which maintains the sanity of vertex and face colors.
* If colors have been explicitly stored or changed, they are considered
user data, stored in self._data (DataStore), and are returned immediately
when requested.
* If colors have never been set, a (count,4) tiled copy of the default diffuse
color will be stored in the cache
** the CRC on creation for these cached default colors will also be stored
** if the cached color array is altered (different CRC than when it was
created) we consider that now to be user data and the array is moved from
the cache to the DataStore.
Parameters
-----------
name: str, 'face', or 'vertex'
Returns
-----------
colors: (count, 4) uint8, RGBA colors
"""
try:
counts = {'face': len(self.mesh.faces),
'vertex': len(self.mesh.vertices)}
count = counts[name]
except AttributeError:
count = None
# the face or vertex colors
key_colors = str(name) + '_colors'
# the initial crc of the
key_crc = key_colors + '_crc'
if key_colors in self._data:
# if a user has explicitly stored or changed the color it
# will be in data
return self._data[key_colors]
elif key_colors in self._cache:
# if the colors have been autogenerated already they
# will be in the cache
colors = self._cache[key_colors]
# if the cached colors have been changed since creation we move
# them to data
if colors.crc() != self._cache[key_crc]:
# call the setter on the property using exec
# this avoids having to pass a setter to this function
if name == 'face':
self.face_colors = colors
elif name == 'vertex':
self.vertex_colors = colors
else:
raise ValueError('unsupported name!!!')
self._cache.verify()
else:
# colors have never been accessed
if self.kind is None:
# no colors are defined, so create a (count, 4) tiled
# copy of the default color
colors = np.tile(self.defaults['material_diffuse'],
(count, 1))
elif (self.kind == 'vertex' and
name == 'face'):
colors = vertex_to_face_color(
vertex_colors=self.vertex_colors,
faces=self.mesh.faces)
elif (self.kind == 'face' and
name == 'vertex'):
colors = face_to_vertex_color(
mesh=self.mesh,
face_colors=self.face_colors)
else:
raise ValueError('self.kind not accepted values!!')
if (count is not None and
colors.shape != (count, 4)):
raise ValueError('face colors incorrect shape!')
# subclass the array to track for changes using a CRC
colors = caching.tracked_array(colors)
# put the generated colors and their initial checksum into cache
self._cache[key_colors] = colors
self._cache[key_crc] = colors.crc()
return colors
|
python
|
{
"resource": ""
}
|
q23065
|
ColorVisuals._verify_crc
|
train
|
def _verify_crc(self):
"""
Verify the checksums of cached face and vertex color, to verify
that a user hasn't altered them since they were generated from
defaults.
If the colors have been altered since creation, move them into
the DataStore at self._data since the user action has made them
user data.
"""
if not hasattr(self, '_cache') or len(self._cache) == 0:
return
for name in ['face', 'vertex']:
# the face or vertex colors
key_colors = str(name) + '_colors'
# the initial crc of the
key_crc = key_colors + '_crc'
if key_colors not in self._cache:
continue
colors = self._cache[key_colors]
# if the cached colors have been changed since creation
# move them to data
if colors.crc() != self._cache[key_crc]:
if name == 'face':
self.face_colors = colors
elif name == 'vertex':
self.vertex_colors = colors
else:
raise ValueError('unsupported name!!!')
self._cache.verify()
|
python
|
{
"resource": ""
}
|
q23066
|
ColorVisuals.face_subset
|
train
|
def face_subset(self, face_index):
"""
Given a mask of face indices, return a sliced version.
Parameters
----------
face_index: (n,) int, mask for faces
(n,) bool, mask for faces
Returns
----------
visual: ColorVisuals object containing a subset of faces.
"""
if self.defined:
result = ColorVisuals(
face_colors=self.face_colors[face_index])
else:
result = ColorVisuals()
return result
|
python
|
{
"resource": ""
}
|
q23067
|
ColorVisuals.main_color
|
train
|
def main_color(self):
"""
What is the most commonly occurring color.
Returns
------------
color: (4,) uint8, most common color
"""
if self.kind is None:
return DEFAULT_COLOR
elif self.kind == 'face':
colors = self.face_colors
elif self.kind == 'vertex':
colors = self.vertex_colors
else:
raise ValueError('color kind incorrect!')
# find the unique colors
unique, inverse = grouping.unique_rows(colors)
# the most commonly occurring color, or mode
# this will be an index of inverse, not colors
mode_index = np.bincount(inverse).argmax()
color = colors[unique[mode_index]]
return color
|
python
|
{
"resource": ""
}
|
q23068
|
ColorVisuals.concatenate
|
train
|
def concatenate(self, other, *args):
"""
Concatenate two or more ColorVisuals objects into a single object.
Parameters
-----------
other : ColorVisuals
Object to append
*args: ColorVisuals objects
Returns
-----------
result: ColorVisuals object containing information from current
object and others in the order it was passed.
"""
# avoid a circular import
from . import objects
result = objects.concatenate(self, other, *args)
return result
|
python
|
{
"resource": ""
}
|
q23069
|
ColorVisuals._update_key
|
train
|
def _update_key(self, mask, key):
"""
Mask the value contained in the DataStore at a specified key.
Parameters
-----------
mask: (n,) int
(n,) bool
key: hashable object, in self._data
"""
mask = np.asanyarray(mask)
if key in self._data:
self._data[key] = self._data[key][mask]
|
python
|
{
"resource": ""
}
|
q23070
|
Trimesh.process
|
train
|
def process(self):
"""
Do the bare minimum processing to make a mesh useful.
Does this by:
1) removing NaN and Inf values
2) merging duplicate vertices
If self._validate:
3) Remove triangles which have one edge of their rectangular 2D
oriented bounding box shorter than tol.merge
4) remove duplicated triangles
Returns
------------
self: trimesh.Trimesh
Current mesh
"""
# if there are no vertices or faces exit early
if self.is_empty:
return self
# avoid clearing the cache during operations
with self._cache:
self.remove_infinite_values()
self.merge_vertices()
# if we're cleaning remove duplicate
# and degenerate faces
if self._validate:
self.remove_duplicate_faces()
self.remove_degenerate_faces()
# since none of our process operations moved vertices or faces,
# we can keep face and vertex normals in the cache without recomputing
# if faces or vertices have been removed, normals are validated before
# being returned so there is no danger of inconsistent dimensions
self._cache.clear(exclude=['face_normals',
'vertex_normals'])
self.metadata['processed'] = True
return self
|
python
|
{
"resource": ""
}
|
q23071
|
Trimesh.faces
|
train
|
def faces(self, values):
"""
Set the vertex indexes that make up triangular faces.
Parameters
--------------
values : (n, 3) int
Indexes of self.vertices
"""
if values is None:
values = []
values = np.asanyarray(values, dtype=np.int64)
# automatically triangulate quad faces
if util.is_shape(values, (-1, 4)):
log.info('triangulating quad faces')
values = geometry.triangulate_quads(values)
self._data['faces'] = values
|
python
|
{
"resource": ""
}
|
q23072
|
Trimesh.faces_sparse
|
train
|
def faces_sparse(self):
"""
A sparse matrix representation of the faces.
Returns
----------
sparse : scipy.sparse.coo_matrix
Has properties:
dtype : bool
shape : (len(self.vertices), len(self.faces))
"""
sparse = geometry.index_sparse(
column_count=len(self.vertices),
indices=self.faces)
return sparse
|
python
|
{
"resource": ""
}
|
q23073
|
Trimesh.face_normals
|
train
|
def face_normals(self):
"""
Return the unit normal vector for each face.
If a face is degenerate and a normal can't be generated
a zero magnitude unit vector will be returned for that face.
Returns
-----------
normals : (len(self.faces), 3) np.float64
Normal vectors of each face
"""
# check shape of cached normals
cached = self._cache['face_normals']
if np.shape(cached) == np.shape(self._data['faces']):
return cached
log.debug('generating face normals')
# use cached triangle cross products to generate normals
# this will always return the correct shape but some values
# will be zero or an arbitrary vector if the inputs had
# a cross product below machine epsilon
normals, valid = triangles.normals(
triangles=self.triangles,
crosses=self.triangles_cross)
# if all triangles are valid shape is correct
if valid.all():
# put calculated face normals into cache manually
self._cache['face_normals'] = normals
return normals
# make a padded list of normals for correct shape
padded = np.zeros((len(self.triangles), 3),
dtype=np.float64)
padded[valid] = normals
# put calculated face normals into cache manually
self._cache['face_normals'] = padded
return padded
|
python
|
{
"resource": ""
}
|
q23074
|
Trimesh.face_normals
|
train
|
def face_normals(self, values):
"""
Assign values to face normals.
Parameters
-------------
values : (len(self.faces), 3) float
Unit face normals
"""
if values is not None:
# make sure face normals are C- contiguous float
values = np.asanyarray(values,
order='C',
dtype=np.float64)
# check if any values are larger than tol.merge
# this check is equivalent to but 25% faster than:
# `np.abs(values) > tol.merge`
nonzero = np.logical_or(values > tol.merge,
values < -tol.merge)
# don't set the normals if they are all zero
if not nonzero.any():
log.warning('face_normals all zero, ignoring!')
return
# make sure the first few normals match the first few triangles
check, valid = triangles.normals(
self.vertices.view(np.ndarray)[self.faces[:20]])
compare = np.zeros((len(valid), 3))
compare[valid] = check
if not np.allclose(compare, values[:20]):
log.warning('face_normals didn\'t match triangles, ignoring!')
return
self._cache['face_normals'] = values
|
python
|
{
"resource": ""
}
|
q23075
|
Trimesh.vertices
|
train
|
def vertices(self, values):
"""
Assign vertex values to the mesh.
Parameters
--------------
values : (n, 3) float
Points in space
"""
self._data['vertices'] = np.asanyarray(values,
order='C',
dtype=np.float64)
|
python
|
{
"resource": ""
}
|
q23076
|
Trimesh.vertex_normals
|
train
|
def vertex_normals(self):
"""
The vertex normals of the mesh. If the normals were loaded
we check to make sure we have the same number of vertex
normals and vertices before returning them. If there are
no vertex normals defined or a shape mismatch we calculate
the vertex normals from the mean normals of the faces the
vertex is used in.
Returns
----------
vertex_normals : (n,3) float
Represents the surface normal at each vertex.
Where n == len(self.vertices)
"""
# make sure we have faces_sparse
assert hasattr(self.faces_sparse, 'dot')
vertex_normals = geometry.mean_vertex_normals(
vertex_count=len(self.vertices),
faces=self.faces,
face_normals=self.face_normals,
sparse=self.faces_sparse)
return vertex_normals
|
python
|
{
"resource": ""
}
|
q23077
|
Trimesh.vertex_normals
|
train
|
def vertex_normals(self, values):
"""
Assign values to vertex normals
Parameters
-------------
values : (len(self.vertices), 3) float
Unit normal vectors for each vertex
"""
if values is not None:
values = np.asanyarray(values,
order='C',
dtype=np.float64)
if values.shape == self.vertices.shape:
self._cache['vertex_normals'] = values
|
python
|
{
"resource": ""
}
|
q23078
|
Trimesh.bounds
|
train
|
def bounds(self):
"""
The axis aligned bounds of the faces of the mesh.
Returns
-----------
bounds : (2, 3) float
Bounding box with [min, max] coordinates
"""
# return bounds including ONLY referenced vertices
in_mesh = self.vertices[self.referenced_vertices]
# get mesh bounds with min and max
mesh_bounds = np.array([in_mesh.min(axis=0),
in_mesh.max(axis=0)])
# should not be mutable
mesh_bounds.flags.writeable = False
return mesh_bounds
|
python
|
{
"resource": ""
}
|
q23079
|
Trimesh.extents
|
train
|
def extents(self):
"""
The length, width, and height of the bounding box of the mesh.
Returns
-----------
extents : (3,) float
Array containing axis aligned [length, width, height]
"""
extents = self.bounds.ptp(axis=0)
extents.flags.writeable = False
return extents
|
python
|
{
"resource": ""
}
|
q23080
|
Trimesh.centroid
|
train
|
def centroid(self):
"""
The point in space which is the average of the triangle centroids
weighted by the area of each triangle.
This will be valid even for non- watertight meshes,
unlike self.center_mass
Returns
----------
centroid : (3,) float
The average vertex weighted by face area
"""
# use the centroid of each triangle weighted by
# the area of the triangle to find the overall centroid
centroid = np.average(self.triangles_center,
axis=0,
weights=self.area_faces)
centroid.flags.writeable = False
return centroid
|
python
|
{
"resource": ""
}
|
q23081
|
Trimesh.density
|
train
|
def density(self, value):
"""
Set the density of the mesh.
Parameters
-------------
density : float
Specify the density of the mesh to be used in inertia calculations
"""
self._density = float(value)
self._cache.delete('mass_properties')
|
python
|
{
"resource": ""
}
|
q23082
|
Trimesh.principal_inertia_components
|
train
|
def principal_inertia_components(self):
"""
Return the principal components of inertia
Ordering corresponds to mesh.principal_inertia_vectors
Returns
----------
components : (3,) float
Principal components of inertia
"""
# both components and vectors from inertia matrix
components, vectors = inertia.principal_axis(self.moment_inertia)
# store vectors in cache for later
self._cache['principal_inertia_vectors'] = vectors
return components
|
python
|
{
"resource": ""
}
|
q23083
|
Trimesh.principal_inertia_transform
|
train
|
def principal_inertia_transform(self):
"""
A transform which moves the current mesh so the principal
inertia vectors are on the X,Y, and Z axis, and the centroid is
at the origin.
Returns
----------
transform : (4, 4) float
Homogenous transformation matrix
"""
order = np.argsort(self.principal_inertia_components)[1:][::-1]
vectors = self.principal_inertia_vectors[order]
vectors = np.vstack((vectors, np.cross(*vectors)))
transform = np.eye(4)
transform[:3, :3] = vectors
transform = transformations.transform_around(
matrix=transform,
point=self.centroid)
transform[:3, 3] -= self.centroid
return transform
|
python
|
{
"resource": ""
}
|
q23084
|
Trimesh.edges_unique
|
train
|
def edges_unique(self):
"""
The unique edges of the mesh.
Returns
----------
edges_unique : (n, 2) int
Vertex indices for unique edges
"""
unique, inverse = grouping.unique_rows(self.edges_sorted)
edges_unique = self.edges_sorted[unique]
# edges_unique will be added automatically by the decorator
# additional terms generated need to be added to the cache manually
self._cache['edges_unique_idx'] = unique
self._cache['edges_unique_inverse'] = inverse
return edges_unique
|
python
|
{
"resource": ""
}
|
q23085
|
Trimesh.edges_unique_length
|
train
|
def edges_unique_length(self):
"""
How long is each unique edge.
Returns
----------
length : (len(self.edges_unique), ) float
Length of each unique edge
"""
vector = np.subtract(*self.vertices[self.edges_unique.T])
length = np.linalg.norm(vector, axis=1)
return length
|
python
|
{
"resource": ""
}
|
q23086
|
Trimesh.edges_sparse
|
train
|
def edges_sparse(self):
"""
Edges in sparse bool COO graph format where connected
vertices are True.
Returns
----------
sparse: (len(self.vertices), len(self.vertices)) bool
Sparse graph in COO format
"""
sparse = graph.edges_to_coo(self.edges,
count=len(self.vertices))
return sparse
|
python
|
{
"resource": ""
}
|
q23087
|
Trimesh.body_count
|
train
|
def body_count(self):
"""
How many connected groups of vertices exist in this mesh.
Note that this number may differ from result in mesh.split,
which is calculated from FACE rather than vertex adjacency.
Returns
-----------
count : int
Number of connected vertex groups
"""
# labels are (len(vertices), int) OB
count, labels = graph.csgraph.connected_components(
self.edges_sparse,
directed=False,
return_labels=True)
self._cache['vertices_component_label'] = labels
return count
|
python
|
{
"resource": ""
}
|
q23088
|
Trimesh.faces_unique_edges
|
train
|
def faces_unique_edges(self):
"""
For each face return which indexes in mesh.unique_edges constructs
that face.
Returns
---------
faces_unique_edges : (len(self.faces), 3) int
Indexes of self.edges_unique that
construct self.faces
Examples
---------
In [0]: mesh.faces[0:2]
Out[0]:
TrackedArray([[ 1, 6946, 24224],
[ 6946, 1727, 24225]])
In [1]: mesh.edges_unique[mesh.faces_unique_edges[0:2]]
Out[1]:
array([[[ 1, 6946],
[ 6946, 24224],
[ 1, 24224]],
[[ 1727, 6946],
[ 1727, 24225],
[ 6946, 24225]]])
"""
# make sure we have populated unique edges
populate = self.edges_unique
# we are relying on the fact that edges are stacked in triplets
result = self._cache['edges_unique_inverse'].reshape((-1, 3))
return result
|
python
|
{
"resource": ""
}
|
q23089
|
Trimesh.referenced_vertices
|
train
|
def referenced_vertices(self):
"""
Which vertices in the current mesh are referenced by a face.
Returns
-------------
referenced : (len(self.vertices),) bool
Which vertices are referenced by a face
"""
referenced = np.zeros(len(self.vertices), dtype=np.bool)
referenced[self.faces] = True
return referenced
|
python
|
{
"resource": ""
}
|
q23090
|
Trimesh.convert_units
|
train
|
def convert_units(self, desired, guess=False):
"""
Convert the units of the mesh into a specified unit.
Parameters
----------
desired : string
Units to convert to (eg 'inches')
guess : boolean
If self.units are not defined should we
guess the current units of the document and then convert?
"""
units._convert_units(self, desired, guess)
return self
|
python
|
{
"resource": ""
}
|
q23091
|
Trimesh.merge_vertices
|
train
|
def merge_vertices(self, digits=None, textured=True):
"""
If a mesh has vertices that are closer than
trimesh.constants.tol.merge reindex faces to reference
the same index for both vertices.
Parameters
--------------
digits : int
If specified overrides tol.merge
textured : bool
If True avoids merging vertices with different UV
coordinates. No effect on untextured meshes.
"""
grouping.merge_vertices(self,
digits=digits,
textured=textured)
|
python
|
{
"resource": ""
}
|
q23092
|
Trimesh.update_vertices
|
train
|
def update_vertices(self, mask, inverse=None):
"""
Update vertices with a mask.
Parameters
----------
vertex_mask : (len(self.vertices)) bool
Array of which vertices to keep
inverse : (len(self.vertices)) int
Array to reconstruct vertex references
such as output by np.unique
"""
# if the mesh is already empty we can't remove anything
if self.is_empty:
return
# make sure mask is a numpy array
mask = np.asanyarray(mask)
if ((mask.dtype.name == 'bool' and mask.all()) or
len(mask) == 0 or self.is_empty):
# mask doesn't remove any vertices so exit early
return
# create the inverse mask if not passed
if inverse is None:
inverse = np.zeros(len(self.vertices), dtype=np.int64)
if mask.dtype.kind == 'b':
inverse[mask] = np.arange(mask.sum())
elif mask.dtype.kind == 'i':
inverse[mask] = np.arange(len(mask))
else:
inverse = None
# re- index faces from inverse
if inverse is not None and util.is_shape(self.faces, (-1, 3)):
self.faces = inverse[self.faces.reshape(-1)].reshape((-1, 3))
# update the visual object with our mask
self.visual.update_vertices(mask)
# get the normals from cache before dumping
cached_normals = self._cache['vertex_normals']
# actually apply the mask
self.vertices = self.vertices[mask]
# if we had passed vertex normals try to save them
if util.is_shape(cached_normals, (-1, 3)):
try:
self.vertex_normals = cached_normals[mask]
except BaseException:
pass
|
python
|
{
"resource": ""
}
|
q23093
|
Trimesh.update_faces
|
train
|
def update_faces(self, mask):
"""
In many cases, we will want to remove specific faces.
However, there is additional bookkeeping to do this cleanly.
This function updates the set of faces with a validity mask,
as well as keeping track of normals and colors.
Parameters
---------
valid : (m) int or (len(self.faces)) bool
Mask to remove faces
"""
# if the mesh is already empty we can't remove anything
if self.is_empty:
return
mask = np.asanyarray(mask)
if mask.dtype.name == 'bool' and mask.all():
# mask removes no faces so exit early
return
# try to save face normals before dumping cache
cached_normals = self._cache['face_normals']
faces = self._data['faces']
# if Trimesh has been subclassed and faces have been moved from data
# to cache, get faces from cache.
if not util.is_shape(faces, (-1, 3)):
faces = self._cache['faces']
# actually apply the mask
self.faces = faces[mask]
# apply the mask to the visual object
self.visual.update_faces(mask)
# if our normals were the correct shape apply them
if util.is_shape(cached_normals, (-1, 3)):
self.face_normals = cached_normals[mask]
|
python
|
{
"resource": ""
}
|
q23094
|
Trimesh.remove_infinite_values
|
train
|
def remove_infinite_values(self):
"""
Ensure that every vertex and face consists of finite numbers.
This will remove vertices or faces containing np.nan and np.inf
Alters
----------
self.faces : masked to remove np.inf/np.nan
self.vertices : masked to remove np.inf/np.nan
"""
if util.is_shape(self.faces, (-1, 3)):
# (len(self.faces),) bool, mask for faces
face_mask = np.isfinite(self.faces).all(axis=1)
self.update_faces(face_mask)
if util.is_shape(self.vertices, (-1, 3)):
# (len(self.vertices),) bool, mask for vertices
vertex_mask = np.isfinite(self.vertices).all(axis=1)
self.update_vertices(vertex_mask)
|
python
|
{
"resource": ""
}
|
q23095
|
Trimesh.remove_duplicate_faces
|
train
|
def remove_duplicate_faces(self):
"""
On the current mesh remove any faces which are duplicates.
Alters
----------
self.faces : removes duplicates
"""
unique, inverse = grouping.unique_rows(np.sort(self.faces, axis=1))
self.update_faces(unique)
|
python
|
{
"resource": ""
}
|
q23096
|
Trimesh.split
|
train
|
def split(self, only_watertight=True, adjacency=None, **kwargs):
"""
Returns a list of Trimesh objects, based on face connectivity.
Splits into individual components, sometimes referred to as 'bodies'
Parameters
---------
only_watertight : bool
Only return watertight meshes and discard remainder
adjacency : None or (n, 2) int
Override face adjacency with custom values
Returns
---------
meshes : (n,) trimesh.Trimesh
Separate bodies from original mesh
"""
meshes = graph.split(self,
only_watertight=only_watertight,
adjacency=adjacency,
**kwargs)
return meshes
|
python
|
{
"resource": ""
}
|
q23097
|
Trimesh.face_adjacency
|
train
|
def face_adjacency(self):
"""
Find faces that share an edge, which we call here 'adjacent'.
Returns
----------
adjacency : (n,2) int
Pairs of faces which share an edge
Examples
---------
In [1]: mesh = trimesh.load('models/featuretype.STL')
In [2]: mesh.face_adjacency
Out[2]:
array([[ 0, 1],
[ 2, 3],
[ 0, 3],
...,
[1112, 949],
[3467, 3475],
[1113, 3475]])
In [3]: mesh.faces[mesh.face_adjacency[0]]
Out[3]:
TrackedArray([[ 1, 0, 408],
[1239, 0, 1]], dtype=int64)
In [4]: import networkx as nx
In [5]: graph = nx.from_edgelist(mesh.face_adjacency)
In [6]: groups = nx.connected_components(graph)
"""
adjacency, edges = graph.face_adjacency(mesh=self,
return_edges=True)
self._cache['face_adjacency_edges'] = edges
return adjacency
|
python
|
{
"resource": ""
}
|
q23098
|
Trimesh.face_adjacency_angles
|
train
|
def face_adjacency_angles(self):
"""
Return the angle between adjacent faces
Returns
--------
adjacency_angle : (n,) float
Angle between adjacent faces
Each value corresponds with self.face_adjacency
"""
pairs = self.face_normals[self.face_adjacency]
angles = geometry.vector_angle(pairs)
return angles
|
python
|
{
"resource": ""
}
|
q23099
|
Trimesh.face_adjacency_radius
|
train
|
def face_adjacency_radius(self):
"""
The approximate radius of a cylinder that fits inside adjacent faces.
Returns
------------
radii : (len(self.face_adjacency),) float
Approximate radius formed by triangle pair
"""
radii, span = graph.face_adjacency_radius(mesh=self)
self._cache['face_adjacency_span'] = span
return radii
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.