_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q23100
|
Trimesh.vertex_neighbors
|
train
|
def vertex_neighbors(self):
"""
The vertex neighbors of each vertex of the mesh, determined from
the cached vertex_adjacency_graph, if already existent.
Returns
----------
vertex_neighbors : (len(self.vertices),) int
Represents immediate neighbors of each vertex along
the edge of a triangle
Examples
----------
This is useful for getting nearby vertices for a given vertex,
potentially for some simple smoothing techniques.
>>> mesh = trimesh.primitives.Box()
>>> mesh.vertex_neighbors[0]
[1,2,3,4]
"""
graph = self.vertex_adjacency_graph
neighbors = [list(graph.neighbors(i)) for
i in range(len(self.vertices))]
return np.array(neighbors)
|
python
|
{
"resource": ""
}
|
q23101
|
Trimesh.is_winding_consistent
|
train
|
def is_winding_consistent(self):
"""
Does the mesh have consistent winding or not.
A mesh with consistent winding has each shared edge
going in an opposite direction from the other in the pair.
Returns
--------
consistent : bool
Is winding is consistent or not
"""
if self.is_empty:
return False
# consistent winding check is populated into the cache by is_watertight
populate = self.is_watertight
return self._cache['is_winding_consistent']
|
python
|
{
"resource": ""
}
|
q23102
|
Trimesh.is_watertight
|
train
|
def is_watertight(self):
"""
Check if a mesh is watertight by making sure every edge is
included in two faces.
Returns
----------
is_watertight : bool
Is mesh watertight or not
"""
if self.is_empty:
return False
watertight, winding = graph.is_watertight(
edges=self.edges, edges_sorted=self.edges_sorted)
self._cache['is_winding_consistent'] = winding
return watertight
|
python
|
{
"resource": ""
}
|
q23103
|
Trimesh.is_volume
|
train
|
def is_volume(self):
"""
Check if a mesh has all the properties required to represent
a valid volume, rather than just a surface.
These properties include being watertight, having consistent
winding and outward facing normals.
Returns
---------
valid : bool
Does the mesh represent a volume
"""
valid = bool(self.is_watertight and
self.is_winding_consistent and
np.isfinite(self.center_mass).all() and
self.volume > 0.0)
return valid
|
python
|
{
"resource": ""
}
|
q23104
|
Trimesh.is_convex
|
train
|
def is_convex(self):
"""
Check if a mesh is convex or not.
Returns
----------
is_convex: bool
Is mesh convex or not
"""
if self.is_empty:
return False
is_convex = bool(convex.is_convex(self))
return is_convex
|
python
|
{
"resource": ""
}
|
q23105
|
Trimesh.kdtree
|
train
|
def kdtree(self):
"""
Return a scipy.spatial.cKDTree of the vertices of the mesh.
Not cached as this lead to observed memory issues and segfaults.
Returns
---------
tree : scipy.spatial.cKDTree
Contains mesh.vertices
"""
from scipy.spatial import cKDTree as KDTree
tree = KDTree(self.vertices.view(np.ndarray))
return tree
|
python
|
{
"resource": ""
}
|
q23106
|
Trimesh.facets_area
|
train
|
def facets_area(self):
"""
Return an array containing the area of each facet.
Returns
---------
area : (len(self.facets),) float
Total area of each facet (group of faces)
"""
# avoid thrashing the cache inside a loop
area_faces = self.area_faces
# sum the area of each group of faces represented by facets
# use native python sum in tight loop as opposed to array.sum()
# as in this case the lower function call overhead of
# native sum provides roughly a 50% speedup
areas = np.array([sum(area_faces[i])
for i in self.facets],
dtype=np.float64)
return areas
|
python
|
{
"resource": ""
}
|
q23107
|
Trimesh.facets_normal
|
train
|
def facets_normal(self):
"""
Return the normal of each facet
Returns
---------
normals: (len(self.facets), 3) float
A unit normal vector for each facet
"""
if len(self.facets) == 0:
return np.array([])
area_faces = self.area_faces
# sum the area of each group of faces represented by facets
# the face index of the first face in each facet
index = np.array([i[area_faces[i].argmax()]
for i in self.facets])
# (n,3) float, unit normal vectors of facet plane
normals = self.face_normals[index]
# (n,3) float, points on facet plane
origins = self.vertices[self.faces[:, 0][index]]
# save origins in cache
self._cache['facets_origin'] = origins
return normals
|
python
|
{
"resource": ""
}
|
q23108
|
Trimesh.facets_boundary
|
train
|
def facets_boundary(self):
"""
Return the edges which represent the boundary of each facet
Returns
---------
edges_boundary : sequence of (n, 2) int
Indices of self.vertices
"""
# make each row correspond to a single face
edges = self.edges_sorted.reshape((-1, 6))
# get the edges for each facet
edges_facet = [edges[i].reshape((-1, 2)) for i in self.facets]
edges_boundary = np.array([i[grouping.group_rows(i, require_count=1)]
for i in edges_facet])
return edges_boundary
|
python
|
{
"resource": ""
}
|
q23109
|
Trimesh.facets_on_hull
|
train
|
def facets_on_hull(self):
"""
Find which facets of the mesh are on the convex hull.
Returns
---------
on_hull : (len(mesh.facets),) bool
is A facet on the meshes convex hull or not
"""
# facets plane, origin and normal
normals = self.facets_normal
origins = self.facets_origin
# (n,3) convex hull vertices
convex = self.convex_hull.vertices.view(np.ndarray).copy()
# boolean mask for which facets are on convex hull
on_hull = np.zeros(len(self.facets), dtype=np.bool)
for i, normal, origin in zip(range(len(normals)), normals, origins):
# a facet plane is on the convex hull if every vertex
# of the convex hull is behind that plane
# which we are checking with dot products
dot = np.dot(normal, (convex - origin).T)
on_hull[i] = (dot < tol.merge).all()
return on_hull
|
python
|
{
"resource": ""
}
|
q23110
|
Trimesh.fix_normals
|
train
|
def fix_normals(self, multibody=None):
"""
Find and fix problems with self.face_normals and self.faces
winding direction.
For face normals ensure that vectors are consistently pointed
outwards, and that self.faces is wound in the correct direction
for all connected components.
Parameters
-------------
multibody : None or bool
Fix normals across multiple bodies
if None automatically pick from body_count
"""
if multibody is None:
multibody = self.body_count > 1
repair.fix_normals(self, multibody=multibody)
|
python
|
{
"resource": ""
}
|
q23111
|
Trimesh.subdivide
|
train
|
def subdivide(self, face_index=None):
"""
Subdivide a mesh, with each subdivided face replaced with four
smaller faces.
Parameters
----------
face_index: (m,) int or None
If None all faces of mesh will be subdivided
If (m,) int array of indices: only specified faces will be
subdivided. Note that in this case the mesh will generally
no longer be manifold, as the additional vertex on the midpoint
will not be used by the adjacent faces to the faces specified,
and an additional postprocessing step will be required to
make resulting mesh watertight
"""
vertices, faces = remesh.subdivide(vertices=self.vertices,
faces=self.faces,
face_index=face_index)
return Trimesh(vertices=vertices, faces=faces)
|
python
|
{
"resource": ""
}
|
q23112
|
Trimesh.smoothed
|
train
|
def smoothed(self, angle=.4):
"""
Return a version of the current mesh which will render
nicely, without changing source mesh.
Parameters
-------------
angle : float
Angle in radians, face pairs with angles smaller than
this value will appear smoothed
Returns
---------
smoothed : trimesh.Trimesh
Non watertight version of current mesh
which will render nicely with smooth shading
"""
# smooth should be recomputed if visuals change
self.visual._verify_crc()
cached = self.visual._cache['smoothed']
if cached is not None:
return cached
smoothed = graph.smoothed(self, angle)
self.visual._cache['smoothed'] = smoothed
return smoothed
|
python
|
{
"resource": ""
}
|
q23113
|
Trimesh.section
|
train
|
def section(self,
plane_normal,
plane_origin):
"""
Returns a 3D cross section of the current mesh and a plane
defined by origin and normal.
Parameters
---------
plane_normal: (3) vector for plane normal
Normal vector of section plane
plane_origin : (3,) float
Point on the cross section plane
Returns
---------
intersections: Path3D or None
Curve of intersection
"""
# turn line segments into Path2D/Path3D objects
from .exchange.load import load_path
# return a single cross section in 3D
lines, face_index = intersections.mesh_plane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
return_faces=True)
# if the section didn't hit the mesh return None
if len(lines) == 0:
return None
# otherwise load the line segments into a Path3D object
path = load_path(lines)
# add the face index info into metadata
path.metadata['face_index'] = face_index
return path
|
python
|
{
"resource": ""
}
|
q23114
|
Trimesh.section_multiplane
|
train
|
def section_multiplane(self,
plane_origin,
plane_normal,
heights):
"""
Return multiple parallel cross sections of the current
mesh in 2D.
Parameters
---------
plane_normal: (3) vector for plane normal
Normal vector of section plane
plane_origin : (3,) float
Point on the cross section plane
heights : (n,) float
Each section is offset by height along
the plane normal.
Returns
---------
paths : (n,) Path2D or None
2D cross sections at specified heights.
path.metadata['to_3D'] contains transform
to return 2D section back into 3D space.
"""
# turn line segments into Path2D/Path3D objects
from .exchange.load import load_path
# do a multiplane intersection
lines, transforms, faces = intersections.mesh_multiplane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
heights=heights)
# turn the line segments into Path2D objects
paths = [None] * len(lines)
for index, L, T in zip(range(len(lines)),
lines,
transforms):
if len(L) > 0:
paths[index] = load_path(
L, metadata={'to_3D': T})
return paths
|
python
|
{
"resource": ""
}
|
q23115
|
Trimesh.slice_plane
|
train
|
def slice_plane(self,
plane_origin,
plane_normal,
**kwargs):
"""
Returns another mesh that is the current mesh
sliced by the plane defined by origin and normal.
Parameters
---------
plane_normal: (3) vector for plane normal
Normal vector of slicing plane
plane_origin : (3,) float
Point on the slicing plane
Returns
---------
new_mesh: trimesh.Trimesh or None
Subset of current mesh sliced by plane
"""
# return a new mesh
new_mesh = intersections.slice_mesh_plane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
**kwargs)
return new_mesh
|
python
|
{
"resource": ""
}
|
q23116
|
Trimesh.sample
|
train
|
def sample(self, count, return_index=False):
"""
Return random samples distributed normally across the
surface of the mesh
Parameters
---------
count : int
Number of points to sample
return_index : bool
If True will also return the index of which face each
sample was taken from.
Returns
---------
samples : (count, 3) float
Points on surface of mesh
face_index : (count, ) int
Index of self.faces
"""
samples, index = sample.sample_surface(self, count)
if return_index:
return samples, index
return samples
|
python
|
{
"resource": ""
}
|
q23117
|
Trimesh.remove_unreferenced_vertices
|
train
|
def remove_unreferenced_vertices(self):
"""
Remove all vertices in the current mesh which are not
referenced by a face.
"""
referenced = np.zeros(len(self.vertices), dtype=np.bool)
referenced[self.faces] = True
inverse = np.zeros(len(self.vertices), dtype=np.int64)
inverse[referenced] = np.arange(referenced.sum())
self.update_vertices(mask=referenced, inverse=inverse)
|
python
|
{
"resource": ""
}
|
q23118
|
Trimesh.unmerge_vertices
|
train
|
def unmerge_vertices(self):
"""
Removes all face references so that every face contains
three unique vertex indices and no faces are adjacent.
"""
# new faces are incrementing so every vertex is unique
faces = np.arange(len(self.faces) * 3,
dtype=np.int64).reshape((-1, 3))
# use update_vertices to apply mask to
# all properties that are per-vertex
self.update_vertices(self.faces.reshape(-1))
# set faces to incrementing indexes
self.faces = faces
# keep face normals as the haven't changed
self._cache.clear(exclude=['face_normals'])
|
python
|
{
"resource": ""
}
|
q23119
|
Trimesh.apply_obb
|
train
|
def apply_obb(self):
"""
Apply the oriented bounding box transform to the current mesh.
This will result in a mesh with an AABB centered at the
origin and the same dimensions as the OBB.
Returns
----------
matrix : (4, 4) float
Transformation matrix that was applied
to mesh to move it into OBB frame
"""
matrix = self.bounding_box_oriented.primitive.transform
matrix = np.linalg.inv(matrix)
self.apply_transform(matrix)
return matrix
|
python
|
{
"resource": ""
}
|
q23120
|
Trimesh.apply_transform
|
train
|
def apply_transform(self, matrix):
"""
Transform mesh by a homogenous transformation matrix.
Does the bookkeeping to avoid recomputing things so this function
should be used rather than directly modifying self.vertices
if possible.
Parameters
----------
matrix : (4, 4) float
Homogenous transformation matrix
"""
# get c-order float64 matrix
matrix = np.asanyarray(matrix,
order='C',
dtype=np.float64)
# only support homogenous transformations
if matrix.shape != (4, 4):
raise ValueError('Transformation matrix must be (4,4)!')
# exit early if we've been passed an identity matrix
# np.allclose is surprisingly slow so do this test
elif np.abs(matrix - np.eye(4)).max() < 1e-8:
log.debug('apply_tranform passed identity matrix')
return
# new vertex positions
new_vertices = transformations.transform_points(
self.vertices,
matrix=matrix)
# overridden center of mass
if self._center_mass is not None:
self._center_mass = transformations.transform_points(
np.array([self._center_mass, ]),
matrix)[0]
# preserve face normals if we have them stored
new_face_normals = None
if 'face_normals' in self._cache:
# transform face normals by rotation component
new_face_normals = util.unitize(
transformations.transform_points(
self.face_normals,
matrix=matrix,
translate=False))
# preserve vertex normals if we have them stored
new_vertex_normals = None
if 'vertex_normals' in self._cache:
new_vertex_normals = util.unitize(
transformations.transform_points(
self.vertex_normals,
matrix=matrix,
translate=False))
# a test triangle pre and post transform
triangle_pre = self.vertices[self.faces[:5]]
# we don't care about scale so make sure they aren't tiny
triangle_pre /= np.abs(triangle_pre).max()
# do the same for the post- transform test
triangle_post = new_vertices[self.faces[:5]]
triangle_post /= np.abs(triangle_post).max()
# compute triangle normal before and after transform
normal_pre, valid_pre = triangles.normals(triangle_pre)
normal_post, valid_post = triangles.normals(triangle_post)
# check the first few faces against normals to check winding
aligned_pre = triangles.windings_aligned(triangle_pre[valid_pre],
normal_pre)
# windings aligned after applying transform
aligned_post = triangles.windings_aligned(triangle_post[valid_post],
normal_post)
# convert multiple face checks to single bool, allowing outliers
pre = (aligned_pre.sum() / float(len(aligned_pre))) > .6
post = (aligned_post.sum() / float(len(aligned_post))) > .6
if pre != post:
log.debug('transform flips winding')
# fliplr will make array non C contiguous, which will
# cause hashes to be more expensive than necessary
self.faces = np.ascontiguousarray(np.fliplr(self.faces))
# assign the new values
self.vertices = new_vertices
# may be None if we didn't have them previously
self.face_normals = new_face_normals
self.vertex_normals = new_vertex_normals
# preserve normals and topology in cache
# while dumping everything else
self._cache.clear(exclude=[
'face_normals', # transformed by us
'face_adjacency', # topological
'face_adjacency_edges',
'face_adjacency_unshared',
'edges',
'edges_sorted',
'edges_unique',
'edges_sparse',
'body_count',
'faces_unique_edges',
'euler_number',
'vertex_normals'])
# set the cache ID with the current hash value
self._cache.id_set()
log.debug('mesh transformed by matrix')
return self
|
python
|
{
"resource": ""
}
|
q23121
|
Trimesh.voxelized
|
train
|
def voxelized(self, pitch, **kwargs):
"""
Return a Voxel object representing the current mesh
discretized into voxels at the specified pitch
Parameters
----------
pitch : float
The edge length of a single voxel
Returns
----------
voxelized : Voxel object
Representing the current mesh
"""
voxelized = voxel.VoxelMesh(self,
pitch=pitch,
**kwargs)
return voxelized
|
python
|
{
"resource": ""
}
|
q23122
|
Trimesh.outline
|
train
|
def outline(self, face_ids=None, **kwargs):
"""
Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
----------
face_ids : (n,) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline
"""
from .path.exchange.misc import faces_to_path
from .path.exchange.load import _create_path
path = _create_path(**faces_to_path(self,
face_ids,
**kwargs))
return path
|
python
|
{
"resource": ""
}
|
q23123
|
Trimesh.area_faces
|
train
|
def area_faces(self):
"""
The area of each face in the mesh.
Returns
---------
area_faces : (n,) float
Area of each face
"""
area_faces = triangles.area(crosses=self.triangles_cross,
sum=False)
return area_faces
|
python
|
{
"resource": ""
}
|
q23124
|
Trimesh.mass_properties
|
train
|
def mass_properties(self):
"""
Returns the mass properties of the current mesh.
Assumes uniform density, and result is probably garbage if mesh
isn't watertight.
Returns
----------
properties : dict
With keys:
'volume' : in global units^3
'mass' : From specified density
'density' : Included again for convenience (same as kwarg density)
'inertia' : Taken at the center of mass and aligned with global
coordinate system
'center_mass' : Center of mass location, in global coordinate system
"""
mass = triangles.mass_properties(triangles=self.triangles,
crosses=self.triangles_cross,
density=self._density,
center_mass=self._center_mass,
skip_inertia=False)
# if magical clean- up mode is enabled
# and mesh is watertight/wound correctly but with negative
# volume it means that every triangle is probably facing
# inwards, so we invert it in- place without dumping cache
if (self._validate and
self.is_watertight and
self.is_winding_consistent and
np.linalg.det(mass['inertia']) < 0.0 and
mass['mass'] < 0.0 and
mass['volume'] < 0.0):
# negate mass properties so we don't need to recalculate
mass['inertia'] = -mass['inertia']
mass['mass'] = -mass['mass']
mass['volume'] = -mass['volume']
# invert the faces and normals of the mesh
self.invert()
return mass
|
python
|
{
"resource": ""
}
|
q23125
|
Trimesh.invert
|
train
|
def invert(self):
"""
Invert the mesh in- place by reversing the winding of every
face and negating normals without dumping the cache.
Alters
---------
self.faces : columns reversed
self.face_normals : negated if defined
self.vertex_normals : negated if defined
"""
with self._cache:
if 'face_normals' in self._cache:
self.face_normals *= -1.0
if 'vertex_normals' in self._cache:
self.vertex_normals *= -1.0
self.faces = np.fliplr(self.faces)
# save our normals
self._cache.clear(exclude=['face_normals',
'vertex_normals'])
|
python
|
{
"resource": ""
}
|
q23126
|
Trimesh.submesh
|
train
|
def submesh(self, faces_sequence, **kwargs):
"""
Return a subset of the mesh.
Parameters
----------
faces_sequence : sequence (m,) int
Face indices of mesh
only_watertight : bool
Only return submeshes which are watertight
append : bool
Return a single mesh which has the faces appended.
if this flag is set, only_watertight is ignored
Returns
---------
if append : trimesh.Trimesh object
else : list of trimesh.Trimesh objects
"""
return util.submesh(mesh=self,
faces_sequence=faces_sequence,
**kwargs)
|
python
|
{
"resource": ""
}
|
q23127
|
Trimesh.export
|
train
|
def export(self, file_obj=None, file_type=None, **kwargs):
"""
Export the current mesh to a file object.
If file_obj is a filename, file will be written there.
Supported formats are stl, off, ply, collada, json, dict, glb,
dict64, msgpack.
Parameters
---------
file_obj: open writeable file object
str, file name where to save the mesh
None, if you would like this function to return the export blob
file_type: str
Which file type to export as.
If file name is passed this is not required
"""
return export_mesh(mesh=self,
file_obj=file_obj,
file_type=file_type,
**kwargs)
|
python
|
{
"resource": ""
}
|
q23128
|
Trimesh.intersection
|
train
|
def intersection(self, other, engine=None):
"""
Boolean intersection between this mesh and n other meshes
Parameters
---------
other : trimesh.Trimesh, or list of trimesh.Trimesh objects
Meshes to calculate intersections with
Returns
---------
intersection : trimesh.Trimesh
Mesh of the volume contained by all passed meshes
"""
result = boolean.intersection(meshes=np.append(self, other),
engine=engine)
return result
|
python
|
{
"resource": ""
}
|
q23129
|
Trimesh.contains
|
train
|
def contains(self, points):
"""
Given a set of points, determine whether or not they are inside the mesh.
This raises an error if called on a non- watertight mesh.
Parameters
---------
points : (n, 3) float
Points in cartesian space
Returns
---------
contains : (n, ) bool
Whether or not each point is inside the mesh
"""
if not self.is_watertight:
log.warning('Mesh is non- watertight for contained point query!')
contains = self.ray.contains_points(points)
return contains
|
python
|
{
"resource": ""
}
|
q23130
|
Trimesh.face_adjacency_tree
|
train
|
def face_adjacency_tree(self):
"""
An R-tree of face adjacencies.
Returns
--------
tree: rtree.index
Where each edge in self.face_adjacency has a
rectangular cell
"""
# the (n,6) interleaved bounding box for every line segment
segment_bounds = np.column_stack((
self.vertices[self.face_adjacency_edges].min(axis=1),
self.vertices[self.face_adjacency_edges].max(axis=1)))
tree = util.bounds_tree(segment_bounds)
return tree
|
python
|
{
"resource": ""
}
|
q23131
|
Trimesh.copy
|
train
|
def copy(self):
"""
Safely get a copy of the current mesh.
Copied objects will have emptied caches to avoid memory
issues and so may be slow on initial operations until
caches are regenerated.
Current object will *not* have its cache cleared.
Returns
---------
copied : trimesh.Trimesh
Copy of current mesh
"""
copied = Trimesh()
# copy vertex and face data
copied._data.data = copy.deepcopy(self._data.data)
# copy visual information
copied.visual = self.visual.copy()
# get metadata
copied.metadata = copy.deepcopy(self.metadata)
# get center_mass and density
if self._center_mass is not None:
copied.center_mass = self.center_mass
copied._density = self._density
# make sure cache is set from here
copied._cache.clear()
return copied
|
python
|
{
"resource": ""
}
|
q23132
|
Trimesh.eval_cached
|
train
|
def eval_cached(self, statement, *args):
"""
Evaluate a statement and cache the result before returning.
Statements are evaluated inside the Trimesh object, and
Parameters
-----------
statement : str
Statement of valid python code
*args : list
Available inside statement as args[0], etc
Returns
-----------
result : result of running eval on statement with args
Examples
-----------
r = mesh.eval_cached('np.dot(self.vertices, args[0])', [0,0,1])
"""
statement = str(statement)
key = 'eval_cached_' + statement
key += '_'.join(str(i) for i in args)
if key in self._cache:
return self._cache[key]
result = eval(statement)
self._cache[key] = result
return result
|
python
|
{
"resource": ""
}
|
q23133
|
fix_winding
|
train
|
def fix_winding(mesh):
"""
Traverse and change mesh faces in-place to make sure winding
is correct, with edges on adjacent faces in
opposite directions.
Parameters
-------------
mesh: Trimesh object
Alters
-------------
mesh.face: will reverse columns of certain faces
"""
# anything we would fix is already done
if mesh.is_winding_consistent:
return
graph_all = nx.from_edgelist(mesh.face_adjacency)
flipped = 0
faces = mesh.faces.view(np.ndarray).copy()
# we are going to traverse the graph using BFS
# start a traversal for every connected component
for components in nx.connected_components(graph_all):
# get a subgraph for this component
g = graph_all.subgraph(components)
# get the first node in the graph in a way that works on nx's
# new API and their old API
start = next(iter(g.nodes()))
# we traverse every pair of faces in the graph
# we modify mesh.faces and mesh.face_normals in place
for face_pair in nx.bfs_edges(g, start):
# for each pair of faces, we convert them into edges,
# find the edge that both faces share and then see if edges
# are reversed in order as you would expect
# (2, ) int
face_pair = np.ravel(face_pair)
# (2, 3) int
pair = faces[face_pair]
# (6, 2) int
edges = faces_to_edges(pair)
overlap = group_rows(np.sort(edges, axis=1),
require_count=2)
if len(overlap) == 0:
# only happens on non-watertight meshes
continue
edge_pair = edges[overlap[0]]
if edge_pair[0][0] == edge_pair[1][0]:
# if the edges aren't reversed, invert the order of one face
flipped += 1
faces[face_pair[1]] = faces[face_pair[1]][::-1]
if flipped > 0:
mesh.faces = faces
log.debug('flipped %d/%d edges', flipped, len(mesh.faces) * 3)
|
python
|
{
"resource": ""
}
|
q23134
|
fix_inversion
|
train
|
def fix_inversion(mesh, multibody=False):
"""
Check to see if a mesh has normals pointing "out."
Parameters
-------------
mesh: Trimesh object
multibody: bool, if True will try to fix normals on every body
Alters
-------------
mesh.face: may reverse faces
"""
if multibody:
groups = graph.connected_components(mesh.face_adjacency)
# escape early for single body
if len(groups) == 1:
if mesh.volume < 0.0:
mesh.invert()
return
# mask of faces to flip
flip = np.zeros(len(mesh.faces), dtype=np.bool)
# save these to avoid thrashing cache
tri = mesh.triangles
cross = mesh.triangles_cross
# indexes of mesh.faces, not actual faces
for faces in groups:
# calculate the volume of the submesh faces
volume = triangles.mass_properties(
tri[faces],
crosses=cross[faces],
skip_inertia=True)['volume']
# if that volume is negative it is either
# inverted or just total garbage
if volume < 0.0:
flip[faces] = True
# one or more faces needs flipping
if flip.any():
with mesh._cache:
# flip normals of necessary faces
if 'face_normals' in mesh._cache:
mesh.face_normals[flip] *= -1.0
# flip faces
mesh.faces[flip] = np.fliplr(mesh.faces[flip])
# save wangled normals
mesh._cache.clear(exclude=['face_normals'])
elif mesh.volume < 0.0:
# reverse every triangles and flip every normals
mesh.invert()
|
python
|
{
"resource": ""
}
|
q23135
|
fix_normals
|
train
|
def fix_normals(mesh, multibody=False):
"""
Fix the winding and direction of a mesh face and
face normals in-place.
Really only meaningful on watertight meshes, but will orient all
faces and winding in a uniform way for non-watertight face
patches as well.
Parameters
-------------
mesh: Trimesh object
multibody: bool, if True try to correct normals direction
on every body.
Alters
--------------
mesh.faces: will flip columns on inverted faces
"""
# traverse face adjacency to correct winding
fix_winding(mesh)
# check to see if a mesh is inverted
fix_inversion(mesh, multibody=multibody)
|
python
|
{
"resource": ""
}
|
q23136
|
broken_faces
|
train
|
def broken_faces(mesh, color=None):
"""
Return the index of faces in the mesh which break the
watertight status of the mesh.
Parameters
--------------
mesh: Trimesh object
color: (4,) uint8, will set broken faces to this color
None, will not alter mesh colors
Returns
---------------
broken: (n, ) int, indexes of mesh.faces
"""
adjacency = nx.from_edgelist(mesh.face_adjacency)
broken = [k for k, v in dict(adjacency.degree()).items()
if v != 3]
broken = np.array(broken)
if color is not None:
# if someone passed a broken color
color = np.array(color)
if not (color.shape == (4,) or color.shape == (3,)):
color = [255, 0, 0, 255]
mesh.visual.face_colors[broken] = color
return broken
|
python
|
{
"resource": ""
}
|
q23137
|
mesh_multiplane
|
train
|
def mesh_multiplane(mesh,
plane_origin,
plane_normal,
heights):
"""
A utility function for slicing a mesh by multiple
parallel planes, which caches the dot product operation.
Parameters
-------------
mesh : trimesh.Trimesh
Geometry to be sliced by planes
plane_normal : (3,) float
Normal vector of plane
plane_origin : (3,) float
Point on a plane
heights : (m,) float
Offset distances from plane to slice at
Returns
--------------
lines : (m,) sequence of (n, 2, 2) float
Lines in space for m planes
to_3D : (m, 4, 4) float
Transform to move each section back to 3D
face_index : (m,) sequence of (n,) int
Indexes of mesh.faces for each segment
"""
# check input plane
plane_normal = util.unitize(plane_normal)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
heights = np.asanyarray(heights, dtype=np.float64)
# dot product of every vertex with plane
vertex_dots = np.dot(plane_normal,
(mesh.vertices - plane_origin).T)
# reconstruct transforms for each 2D section
base_transform = geometry.plane_transform(origin=plane_origin,
normal=plane_normal)
base_transform = np.linalg.inv(base_transform)
# alter translation Z inside loop
translation = np.eye(4)
# store results
transforms = []
face_index = []
segments = []
# loop through user specified heights
for height in heights:
# offset the origin by the height
new_origin = plane_origin + (plane_normal * height)
# offset the dot products by height and index by faces
new_dots = (vertex_dots - height)[mesh.faces]
# run the intersection with the cached dot products
lines, index = mesh_plane(mesh=mesh,
plane_origin=new_origin,
plane_normal=plane_normal,
return_faces=True,
cached_dots=new_dots)
# get the transforms to 3D space and back
translation[2, 3] = height
to_3D = np.dot(base_transform, translation)
to_2D = np.linalg.inv(to_3D)
transforms.append(to_3D)
# transform points to 2D frame
lines_2D = transformations.transform_points(
lines.reshape((-1, 3)),
to_2D)
# if we didn't screw up the transform all
# of the Z values should be zero
assert np.allclose(lines_2D[:, 2], 0.0)
# reshape back in to lines and discard Z
lines_2D = lines_2D[:, :2].reshape((-1, 2, 2))
# store (n, 2, 2) float lines
segments.append(lines_2D)
# store (n,) int indexes of mesh.faces
face_index.append(face_index)
# (n, 4, 4) transforms from 2D to 3D
transforms = np.array(transforms, dtype=np.float64)
return segments, transforms, face_index
|
python
|
{
"resource": ""
}
|
q23138
|
plane_lines
|
train
|
def plane_lines(plane_origin,
plane_normal,
endpoints,
line_segments=True):
"""
Calculate plane-line intersections
Parameters
---------
plane_origin : (3,) float
Point on plane
plane_normal : (3,) float
Plane normal vector
endpoints : (2, n, 3) float
Points defining lines to be tested
line_segments : bool
If True, only returns intersections as valid if
vertices from endpoints are on different sides
of the plane.
Returns
---------
intersections : (m, 3) float
Cartesian intersection points
valid : (n, 3) bool
Indicate whether a valid intersection exists
for each input line segment
"""
endpoints = np.asanyarray(endpoints)
plane_origin = np.asanyarray(plane_origin).reshape(3)
line_dir = util.unitize(endpoints[1] - endpoints[0])
plane_normal = util.unitize(np.asanyarray(plane_normal).reshape(3))
t = np.dot(plane_normal, (plane_origin - endpoints[0]).T)
b = np.dot(plane_normal, line_dir.T)
# If the plane normal and line direction are perpendicular, it means
# the vector is 'on plane', and there isn't a valid intersection.
# We discard on-plane vectors by checking that the dot product is nonzero
valid = np.abs(b) > tol.zero
if line_segments:
test = np.dot(plane_normal,
np.transpose(plane_origin - endpoints[1]))
different_sides = np.sign(t) != np.sign(test)
nonzero = np.logical_or(np.abs(t) > tol.zero,
np.abs(test) > tol.zero)
valid = np.logical_and(valid, different_sides)
valid = np.logical_and(valid, nonzero)
d = np.divide(t[valid], b[valid])
intersection = endpoints[0][valid]
intersection = intersection + np.reshape(d, (-1, 1)) * line_dir[valid]
return intersection, valid
|
python
|
{
"resource": ""
}
|
q23139
|
planes_lines
|
train
|
def planes_lines(plane_origins,
plane_normals,
line_origins,
line_directions):
"""
Given one line per plane, find the intersection points.
Parameters
-----------
plane_origins : (n,3) float
Point on each plane
plane_normals : (n,3) float
Normal vector of each plane
line_origins : (n,3) float
Point at origin of each line
line_directions : (n,3) float
Direction vector of each line
Returns
----------
on_plane : (n,3) float
Points on specified planes
valid : (n,) bool
Did plane intersect line or not
"""
# check input types
plane_origins = np.asanyarray(plane_origins, dtype=np.float64)
plane_normals = np.asanyarray(plane_normals, dtype=np.float64)
line_origins = np.asanyarray(line_origins, dtype=np.float64)
line_directions = np.asanyarray(line_directions, dtype=np.float64)
# vector from line to plane
origin_vectors = plane_origins - line_origins
projection_ori = util.diagonal_dot(origin_vectors, plane_normals)
projection_dir = util.diagonal_dot(line_directions, plane_normals)
valid = np.abs(projection_dir) > tol.merge
distance = np.divide(projection_ori[valid],
projection_dir[valid])
on_plane = line_directions[valid] * distance.reshape((-1, 1))
on_plane += line_origins[valid]
return on_plane, valid
|
python
|
{
"resource": ""
}
|
q23140
|
slice_mesh_plane
|
train
|
def slice_mesh_plane(mesh,
plane_normal,
plane_origin,
**kwargs):
"""
Slice a mesh with a plane, returning a new mesh that is the
portion of the original mesh to the positive normal side of the plane
Parameters
---------
mesh : Trimesh object
Source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin: (3,) float
Point on plane to intersect with mesh
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
----------
new_mesh : Trimesh object
Sliced mesh
"""
# check input for none
if mesh is None:
return None
# avoid circular import
from .base import Trimesh
# check input plane
plane_normal = np.asanyarray(plane_normal,
dtype=np.float64)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
# check to make sure origins and normals have acceptable shape
shape_ok = ((plane_origin.shape == (3,) or
util.is_shape(plane_origin, (-1, 3))) and
(plane_normal.shape == (3,) or
util.is_shape(plane_normal, (-1, 3))) and
plane_origin.shape == plane_normal.shape)
if not shape_ok:
raise ValueError('plane origins and normals must be (n, 3)!')
# start with original vertices and faces
vertices = mesh.vertices.copy()
faces = mesh.faces.copy()
# slice away specified planes
for origin, normal in zip(plane_origin.reshape((-1, 3)),
plane_normal.reshape((-1, 3))):
# save the new vertices and faces
vertices, faces = slice_faces_plane(vertices=vertices,
faces=faces,
plane_normal=normal,
plane_origin=origin,
**kwargs)
# create a mesh from the sliced result
new_mesh = Trimesh(vertices=vertices,
faces=faces,
process=False)
return new_mesh
|
python
|
{
"resource": ""
}
|
q23141
|
create_scene
|
train
|
def create_scene():
"""
Create a scene with a Fuze bottle, some cubes, and an axis.
Returns
----------
scene : trimesh.Scene
Object with geometry
"""
scene = trimesh.Scene()
# plane
geom = trimesh.creation.box((0.5, 0.5, 0.01))
geom.apply_translation((0, 0, -0.005))
geom.visual.face_colors = (.6, .6, .6)
scene.add_geometry(geom)
# axis
geom = trimesh.creation.axis(0.02)
scene.add_geometry(geom)
box_size = 0.1
# box1
geom = trimesh.creation.box((box_size,) * 3)
geom.visual.face_colors = np.random.uniform(
0, 1, (len(geom.faces), 3))
transform = tf.translation_matrix([0.1, 0.1, box_size / 2])
scene.add_geometry(geom, transform=transform)
# box2
geom = trimesh.creation.box((box_size,) * 3)
geom.visual.face_colors = np.random.uniform(
0, 1, (len(geom.faces), 3))
transform = tf.translation_matrix([-0.1, 0.1, box_size / 2])
scene.add_geometry(geom, transform=transform)
# fuze
geom = trimesh.load(str(here / '../models/fuze.obj'))
transform = tf.translation_matrix([-0.1, -0.1, 0])
scene.add_geometry(geom, transform=transform)
# sphere
geom = trimesh.creation.icosphere(radius=0.05)
geom.visual.face_colors = np.random.uniform(
0, 1, (len(geom.faces), 3))
transform = tf.translation_matrix([0.1, -0.1, box_size / 2])
scene.add_geometry(geom, transform=transform)
return scene
|
python
|
{
"resource": ""
}
|
q23142
|
face_angles_sparse
|
train
|
def face_angles_sparse(mesh):
"""
A sparse matrix representation of the face angles.
Returns
----------
sparse: scipy.sparse.coo_matrix with:
dtype: float
shape: (len(mesh.vertices), len(mesh.faces))
"""
matrix = coo_matrix((mesh.face_angles.flatten(),
(mesh.faces_sparse.row, mesh.faces_sparse.col)),
mesh.faces_sparse.shape)
return matrix
|
python
|
{
"resource": ""
}
|
q23143
|
discrete_gaussian_curvature_measure
|
train
|
def discrete_gaussian_curvature_measure(mesh, points, radius):
"""
Return the discrete gaussian curvature measure of a sphere centered
at a point as detailed in 'Restricted Delaunay triangulations and normal
cycle', Cohen-Steiner and Morvan.
Parameters
----------
points : (n,3) float, list of points in space
radius : float, the sphere radius
Returns
--------
gaussian_curvature: (n,) float, discrete gaussian curvature measure.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
nearest = mesh.kdtree.query_ball_point(points, radius)
gauss_curv = [mesh.vertex_defects[vertices].sum() for vertices in nearest]
return np.asarray(gauss_curv)
|
python
|
{
"resource": ""
}
|
q23144
|
discrete_mean_curvature_measure
|
train
|
def discrete_mean_curvature_measure(mesh, points, radius):
"""
Return the discrete mean curvature measure of a sphere centered
at a point as detailed in 'Restricted Delaunay triangulations and normal
cycle', Cohen-Steiner and Morvan.
Parameters
----------
points : (n,3) float, list of points in space
radius : float, the sphere radius
Returns
--------
mean_curvature: (n,) float, discrete mean curvature measure.
"""
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)!')
# axis aligned bounds
bounds = np.column_stack((points - radius,
points + radius))
# line segments that intersect axis aligned bounding box
candidates = [list(mesh.face_adjacency_tree.intersection(b))
for b in bounds]
mean_curv = np.empty(len(points))
for i, (x, x_candidates) in enumerate(zip(points, candidates)):
endpoints = mesh.vertices[mesh.face_adjacency_edges[x_candidates]]
lengths = line_ball_intersection(
endpoints[:, 0],
endpoints[:, 1],
center=x,
radius=radius)
angles = mesh.face_adjacency_angles[x_candidates]
signs = np.where(mesh.face_adjacency_convex[x_candidates], 1, -1)
mean_curv[i] = (lengths * angles * signs).sum() / 2
return mean_curv
|
python
|
{
"resource": ""
}
|
q23145
|
line_ball_intersection
|
train
|
def line_ball_intersection(start_points, end_points, center, radius):
"""
Compute the length of the intersection of a line segment with a ball.
Parameters
----------
start_points : (n,3) float, list of points in space
end_points : (n,3) float, list of points in space
center : (3,) float, the sphere center
radius : float, the sphere radius
Returns
--------
lengths: (n,) float, the lengths.
"""
# We solve for the intersection of |x-c|**2 = r**2 and
# x = o + dL. This yields
# d = (-l.(o-c) +- sqrt[ l.(o-c)**2 - l.l((o-c).(o-c) - r^**2) ]) / l.l
L = end_points - start_points
oc = start_points - center # o-c
r = radius
ldotl = np.einsum('ij, ij->i', L, L) # l.l
ldotoc = np.einsum('ij, ij->i', L, oc) # l.(o-c)
ocdotoc = np.einsum('ij, ij->i', oc, oc) # (o-c).(o-c)
discrims = ldotoc**2 - ldotl * (ocdotoc - r**2)
# If discriminant is non-positive, then we have zero length
lengths = np.zeros(len(start_points))
# Otherwise we solve for the solns with d2 > d1.
m = discrims > 0 # mask
d1 = (-ldotoc[m] - np.sqrt(discrims[m])) / ldotl[m]
d2 = (-ldotoc[m] + np.sqrt(discrims[m])) / ldotl[m]
# Line segment means we have 0 <= d <= 1
d1 = np.clip(d1, 0, 1)
d2 = np.clip(d2, 0, 1)
# Length is |o + d2 l - o + d1 l| = (d2 - d1) |l|
lengths[m] = (d2 - d1) * np.sqrt(ldotl[m])
return lengths
|
python
|
{
"resource": ""
}
|
q23146
|
uv_to_color
|
train
|
def uv_to_color(uv, image):
"""
Get the color in a texture image.
Parameters
-------------
uv : (n, 2) float
UV coordinates on texture image
image : PIL.Image
Texture image
Returns
----------
colors : (n, 4) float
RGBA color at each of the UV coordinates
"""
if image is None or uv is None:
return None
# UV coordinates should be (n, 2) float
uv = np.asanyarray(uv, dtype=np.float64)
# get texture image pixel positions of UV coordinates
x = (uv[:, 0] * (image.width - 1))
y = ((1 - uv[:, 1]) * (image.height - 1))
# convert to int and wrap to image
# size in the manner of GL_REPEAT
x = x.round().astype(np.int64) % image.width
y = y.round().astype(np.int64) % image.height
# access colors from pixel locations
# make sure image is RGBA before getting values
colors = np.asanyarray(image.convert('RGBA'))[y, x]
# conversion to RGBA should have corrected shape
assert colors.ndim == 2 and colors.shape[1] == 4
return colors
|
python
|
{
"resource": ""
}
|
q23147
|
TextureVisuals.uv
|
train
|
def uv(self, values):
"""
Set the UV coordinates.
Parameters
--------------
values : (n, 2) float
Pixel locations on a texture per- vertex
"""
if values is None:
self._data.clear()
else:
self._data['uv'] = np.asanyarray(values, dtype=np.float64)
|
python
|
{
"resource": ""
}
|
q23148
|
TextureVisuals.copy
|
train
|
def copy(self):
"""
Return a copy of the current TextureVisuals object.
Returns
----------
copied : TextureVisuals
Contains the same information in a new object
"""
uv = self.uv
if uv is not None:
uv = uv.copy()
copied = TextureVisuals(
uv=uv,
material=copy.deepcopy(self.material))
return copied
|
python
|
{
"resource": ""
}
|
q23149
|
TextureVisuals.to_color
|
train
|
def to_color(self):
"""
Convert textured visuals to a ColorVisuals with vertex
color calculated from texture.
Returns
-----------
vis : trimesh.visuals.ColorVisuals
Contains vertex color from texture
"""
# find the color at each UV coordinate
colors = self.material.to_color(self.uv)
# create ColorVisuals from result
vis = color.ColorVisuals(vertex_colors=colors)
return vis
|
python
|
{
"resource": ""
}
|
q23150
|
TextureVisuals.update_vertices
|
train
|
def update_vertices(self, mask):
"""
Apply a mask to remove or duplicate vertex properties.
"""
if self.uv is not None:
self.uv = self.uv[mask]
|
python
|
{
"resource": ""
}
|
q23151
|
load_stl
|
train
|
def load_stl(file_obj, file_type=None, **kwargs):
"""
Load an STL file from a file object.
Parameters
----------
file_obj: open file- like object
file_type: not used
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
"""
# save start of file obj
file_pos = file_obj.tell()
try:
# check the file for a header which matches the file length
# if that is true, it is almost certainly a binary STL file
# if the header doesn't match the file length a HeaderError will be
# raised
return load_stl_binary(file_obj)
except HeaderError:
# move the file back to where it was initially
file_obj.seek(file_pos)
# try to load the file as an ASCII STL
# if the header doesn't match the file length a HeaderError will be
# raised
return load_stl_ascii(file_obj)
|
python
|
{
"resource": ""
}
|
q23152
|
load_stl_binary
|
train
|
def load_stl_binary(file_obj):
"""
Load a binary STL file from a file object.
Parameters
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
"""
# the header is always 84 bytes long, we just reference the dtype.itemsize
# to be explicit about where that magical number comes from
header_length = _stl_dtype_header.itemsize
header_data = file_obj.read(header_length)
if len(header_data) < header_length:
raise HeaderError('Binary STL shorter than a fixed header!')
try:
header = np.frombuffer(header_data,
dtype=_stl_dtype_header)
except BaseException:
raise HeaderError('Binary header incorrect type')
try:
# save the header block as a string
# there could be any garbage in there so wrap in try
metadata = {'header':
bytes(header['header'][0]).decode('utf-8').strip()}
except BaseException:
metadata = {}
# now we check the length from the header versus the length of the file
# data_start should always be position 84, but hard coding that felt ugly
data_start = file_obj.tell()
# this seeks to the end of the file
# position 0, relative to the end of the file 'whence=2'
file_obj.seek(0, 2)
# we save the location of the end of the file and seek back to where we
# started from
data_end = file_obj.tell()
file_obj.seek(data_start)
# the binary format has a rigidly defined structure, and if the length
# of the file doesn't match the header, the loaded version is almost
# certainly going to be garbage.
len_data = data_end - data_start
len_expected = header['face_count'] * _stl_dtype.itemsize
# this check is to see if this really is a binary STL file.
# if we don't do this and try to load a file that isn't structured properly
# we will be producing garbage or crashing hard
# so it's much better to raise an exception here.
if len_data != len_expected:
raise HeaderError('Binary STL has incorrect length in header!')
blob = np.frombuffer(file_obj.read(), dtype=_stl_dtype)
# all of our vertices will be loaded in order
# so faces are just sequential indices reshaped.
faces = np.arange(header['face_count'] * 3).reshape((-1, 3))
result = {'vertices': blob['vertices'].reshape((-1, 3)),
'face_normals': blob['normals'].reshape((-1, 3)),
'faces': faces,
'metadata': metadata}
return result
|
python
|
{
"resource": ""
}
|
q23153
|
load_stl_ascii
|
train
|
def load_stl_ascii(file_obj):
"""
Load an ASCII STL file from a file object.
Parameters
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
"""
# the first line is the header
header = file_obj.readline()
# make sure header is a string, not bytes
if hasattr(header, 'decode'):
try:
header = header.decode('utf-8')
except BaseException:
header = ''
# save header to metadata
metadata = {'header': header}
# read all text into one string
text = file_obj.read()
# convert bytes to string
if hasattr(text, 'decode'):
text = text.decode('utf-8')
# split by endsolid keyword
text = text.lower().split('endsolid')[0]
# create array of splits
blob = np.array(text.strip().split())
# there are 21 'words' in each face
face_len = 21
# length of blob should be multiple of face_len
if (len(blob) % face_len) != 0:
raise HeaderError('Incorrect length STL file!')
face_count = int(len(blob) / face_len)
# this offset is to be added to a fixed set of tiled indices
offset = face_len * np.arange(face_count).reshape((-1, 1))
normal_index = np.tile([2, 3, 4], (face_count, 1)) + offset
vertex_index = np.tile([8, 9, 10,
12, 13, 14,
16, 17, 18], (face_count, 1)) + offset
# faces are groups of three sequential vertices
faces = np.arange(face_count * 3).reshape((-1, 3))
face_normals = blob[normal_index].astype('<f8')
vertices = blob[vertex_index.reshape((-1, 3))].astype('<f8')
return {'vertices': vertices,
'faces': faces,
'metadata': metadata,
'face_normals': face_normals}
|
python
|
{
"resource": ""
}
|
q23154
|
export_stl
|
train
|
def export_stl(mesh):
"""
Convert a Trimesh object into a binary STL file.
Parameters
---------
mesh: Trimesh object
Returns
---------
export: bytes, representing mesh in binary STL form
"""
header = np.zeros(1, dtype=_stl_dtype_header)
header['face_count'] = len(mesh.faces)
packed = np.zeros(len(mesh.faces), dtype=_stl_dtype)
packed['normals'] = mesh.face_normals
packed['vertices'] = mesh.triangles
export = header.tostring()
export += packed.tostring()
return export
|
python
|
{
"resource": ""
}
|
q23155
|
export_stl_ascii
|
train
|
def export_stl_ascii(mesh):
"""
Convert a Trimesh object into an ASCII STL file.
Parameters
---------
mesh : trimesh.Trimesh
Returns
---------
export : str
Mesh represented as an ASCII STL file
"""
# move all the data that's going into the STL file into one array
blob = np.zeros((len(mesh.faces), 4, 3))
blob[:, 0, :] = mesh.face_normals
blob[:, 1:, :] = mesh.triangles
# create a lengthy format string for the data section of the file
format_string = 'facet normal {} {} {}\nouter loop\n'
format_string += 'vertex {} {} {}\n' * 3
format_string += 'endloop\nendfacet\n'
format_string *= len(mesh.faces)
# concatenate the header, data, and footer
export = 'solid \n'
export += format_string.format(*blob.reshape(-1))
export += 'endsolid'
return export
|
python
|
{
"resource": ""
}
|
q23156
|
vertex_graph
|
train
|
def vertex_graph(entities):
"""
Given a set of entity objects generate a networkx.Graph
that represents their vertex nodes.
Parameters
--------------
entities : list
Objects with 'closed' and 'nodes' attributes
Returns
-------------
graph : networkx.Graph
Graph where node indexes represent vertices
closed : (n,) int
Indexes of entities which are 'closed'
"""
graph = nx.Graph()
closed = []
for index, entity in enumerate(entities):
if entity.closed:
closed.append(index)
else:
graph.add_edges_from(entity.nodes,
entity_index=index)
return graph, np.array(closed)
|
python
|
{
"resource": ""
}
|
q23157
|
vertex_to_entity_path
|
train
|
def vertex_to_entity_path(vertex_path,
graph,
entities,
vertices=None):
"""
Convert a path of vertex indices to a path of entity indices.
Parameters
----------
vertex_path : (n,) int
Ordered list of vertex indices representing a path
graph : nx.Graph
Vertex connectivity
entities : (m,) list
Entity objects
vertices : (p, dimension) float
Vertex points in space
Returns
----------
entity_path : (q,) int
Entity indices which make up vertex_path
"""
def edge_direction(a, b):
"""
Given two edges, figure out if the first needs to be
reversed to keep the progression forward.
[1,0] [1,2] -1 1
[1,0] [2,1] -1 -1
[0,1] [1,2] 1 1
[0,1] [2,1] 1 -1
Parameters
------------
a : (2,) int
b : (2,) int
Returns
------------
a_direction : int
b_direction : int
"""
if a[0] == b[0]:
return -1, 1
elif a[0] == b[1]:
return -1, -1
elif a[1] == b[0]:
return 1, 1
elif a[1] == b[1]:
return 1, -1
else:
msg = 'edges not connected!'
msg += '\nvertex_path: {}'.format(vertex_path)
msg += '\nentity_path: {}'.format(entity_path)
msg += '\nentity[a]: {}'.format(entities[ea].points)
msg += '\nentity[b]: {}'.format(entities[eb].points)
constants.log.warning(msg)
return None, None
if vertices is None or vertices.shape[1] != 2:
ccw_direction = 1
else:
ccw_check = is_ccw(vertices[np.append(vertex_path,
vertex_path[0])])
ccw_direction = (ccw_check * 2) - 1
# make sure vertex path is correct type
vertex_path = np.asanyarray(vertex_path, dtype=np.int64)
# we will be saving entity indexes
entity_path = []
# loop through pairs of vertices
for i in np.arange(len(vertex_path) + 1):
# get two wrapped vertex positions
vertex_path_pos = np.mod(np.arange(2) + i, len(vertex_path))
vertex_index = vertex_path[vertex_path_pos]
entity_index = graph.get_edge_data(*vertex_index)['entity_index']
entity_path.append(entity_index)
# remove duplicate entities and order CCW
entity_path = grouping.unique_ordered(entity_path)[::ccw_direction]
# check to make sure there is more than one entity
if len(entity_path) == 1:
# apply CCW reverse in place if necessary
if ccw_direction < 0:
index = entity_path[0]
entities[index].reverse()
return entity_path
# traverse the entity path and reverse entities in place to
# align with this path ordering
round_trip = np.append(entity_path, entity_path[0])
round_trip = zip(round_trip[:-1], round_trip[1:])
for ea, eb in round_trip:
da, db = edge_direction(entities[ea].end_points,
entities[eb].end_points)
if da is not None:
entities[ea].reverse(direction=da)
entities[eb].reverse(direction=db)
entity_path = np.array(entity_path)
return entity_path
|
python
|
{
"resource": ""
}
|
q23158
|
closed_paths
|
train
|
def closed_paths(entities, vertices):
"""
Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities
"""
# get a networkx graph of entities
graph, closed = vertex_graph(entities)
# add entities that are closed as single- entity paths
entity_paths = np.reshape(closed, (-1, 1)).tolist()
# look for cycles in the graph, or closed loops
vertex_paths = np.array(nx.cycles.cycle_basis(graph))
# loop through every vertex cycle
for vertex_path in vertex_paths:
# a path has no length if it has fewer than 2 vertices
if len(vertex_path) < 2:
continue
# convert vertex indices to entity indices
entity_paths.append(
vertex_to_entity_path(vertex_path,
graph,
entities,
vertices))
entity_paths = np.array(entity_paths)
return entity_paths
|
python
|
{
"resource": ""
}
|
q23159
|
discretize_path
|
train
|
def discretize_path(entities, vertices, path, scale=1.0):
"""
Turn a list of entity indices into a path of connected points.
Parameters
-----------
entities : (j,) entity objects
Objects like 'Line', 'Arc', etc.
vertices: (n, dimension) float
Vertex points in space.
path : (m,) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
-----------
discrete : (p, dimension) float
Connected points in space that lie on the
path and can be connected with line segments.
"""
# make sure vertices are numpy array
vertices = np.asanyarray(vertices)
path_len = len(path)
if path_len == 0:
raise ValueError('Cannot discretize empty path!')
if path_len == 1:
# case where we only have one entity
discrete = np.asanyarray(entities[path[0]].discrete(
vertices,
scale=scale))
else:
# run through path appending each entity
discrete = []
for i, entity_id in enumerate(path):
# the current (n, dimension) discrete curve of an entity
current = entities[entity_id].discrete(vertices, scale=scale)
# check if we are on the final entity
if i >= (path_len - 1):
# if we are on the last entity include the last point
discrete.append(current)
else:
# slice off the last point so we don't get duplicate
# points from the end of one entity and the start of another
discrete.append(current[:-1])
# stack all curves to one nice (n, dimension) curve
discrete = np.vstack(discrete)
# make sure 2D curves are are counterclockwise
if vertices.shape[1] == 2 and not is_ccw(discrete):
# reversing will make array non c- contiguous
discrete = np.ascontiguousarray(discrete[::-1])
return discrete
|
python
|
{
"resource": ""
}
|
q23160
|
split
|
train
|
def split(self):
"""
Split a Path2D into multiple Path2D objects where each
one has exactly one root curve.
Parameters
--------------
self : trimesh.path.Path2D
Input geometry
Returns
-------------
split : list of trimesh.path.Path2D
Original geometry as separate paths
"""
# avoid a circular import by referencing class of self
Path2D = type(self)
# save the results of the split to an array
split = []
# get objects from cache to avoid a bajillion
# cache checks inside the tight loop
paths = self.paths
discrete = self.discrete
polygons_closed = self.polygons_closed
enclosure_directed = self.enclosure_directed
for root_index, root in enumerate(self.root):
# get a list of the root curve's children
connected = list(enclosure_directed[root].keys())
# add the root node to the list
connected.append(root)
# store new paths and entities
new_paths = []
new_entities = []
for index in connected:
path = paths[index]
# add a path which is just sequential indexes
new_paths.append(np.arange(len(path)) +
len(new_entities))
# save the entity indexes
new_entities.extend(path)
# store the root index from the original drawing
metadata = copy.deepcopy(self.metadata)
metadata['split_2D'] = root_index
# we made the root path the last index of connected
new_root = np.array([len(new_paths) - 1])
# prevents the copying from nuking our cache
with self._cache:
# create the Path2D
split.append(Path2D(
entities=copy.deepcopy(self.entities[new_entities]),
vertices=copy.deepcopy(self.vertices),
metadata=metadata))
# add back expensive things to the cache
split[-1]._cache.update(
{'paths': new_paths,
'polygons_closed': polygons_closed[connected],
'discrete': discrete[connected],
'root': new_root})
# set the cache ID
split[-1]._cache.id_set()
return np.array(split)
|
python
|
{
"resource": ""
}
|
q23161
|
ray_triangle_id
|
train
|
def ray_triangle_id(triangles,
ray_origins,
ray_directions,
triangles_normal=None,
tree=None,
multiple_hits=True):
"""
Find the intersections between a group of triangles and rays
Parameters
-------------
triangles : (n, 3, 3) float
Triangles in space
ray_origins : (m, 3) float
Ray origin points
ray_directions : (m, 3) float
Ray direction vectors
triangles_normal : (n, 3) float
Normal vector of triangles, optional
tree : rtree.Index
Rtree object holding triangle bounds
Returns
-----------
index_triangle : (h,) int
Index of triangles hit
index_ray : (h,) int
Index of ray that hit triangle
locations : (h, 3) float
Position of intersection in space
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
ray_origins = np.asanyarray(ray_origins, dtype=np.float64)
ray_directions = np.asanyarray(ray_directions, dtype=np.float64)
# if we didn't get passed an r-tree for the bounds of each
# triangle create one here
if tree is None:
tree = triangles_mod.bounds_tree(triangles)
# find the list of likely triangles and which ray they
# correspond with, via rtree queries
ray_candidates, ray_id = ray_triangle_candidates(
ray_origins=ray_origins,
ray_directions=ray_directions,
tree=tree)
# get subsets which are corresponding rays and triangles
# (c,3,3) triangle candidates
triangle_candidates = triangles[ray_candidates]
# (c,3) origins and vectors for the rays
line_origins = ray_origins[ray_id]
line_directions = ray_directions[ray_id]
# get the plane origins and normals from the triangle candidates
plane_origins = triangle_candidates[:, 0, :]
if triangles_normal is None:
plane_normals, triangle_ok = triangles_mod.normals(
triangle_candidates)
if not triangle_ok.all():
raise ValueError('Invalid triangles!')
else:
plane_normals = triangles_normal[ray_candidates]
# find the intersection location of the rays with the planes
location, valid = intersections.planes_lines(
plane_origins=plane_origins,
plane_normals=plane_normals,
line_origins=line_origins,
line_directions=line_directions)
if (len(triangle_candidates) == 0 or
not valid.any()):
return [], [], []
# find the barycentric coordinates of each plane intersection on the
# triangle candidates
barycentric = triangles_mod.points_to_barycentric(
triangle_candidates[valid], location)
# the plane intersection is inside the triangle if all barycentric coordinates
# are between 0.0 and 1.0
hit = np.logical_and((barycentric > -tol.zero).all(axis=1),
(barycentric < (1 + tol.zero)).all(axis=1))
# the result index of the triangle is a candidate with a valid plane intersection and
# a triangle which contains the plane intersection point
index_tri = ray_candidates[valid][hit]
# the ray index is a subset with a valid plane intersection and contained
# by a triangle
index_ray = ray_id[valid][hit]
# locations are already valid plane intersections, just mask by hits
location = location[hit]
# only return points that are forward from the origin
vector = location - ray_origins[index_ray]
distance = util.diagonal_dot(vector, ray_directions[index_ray])
forward = distance > -1e-6
index_tri = index_tri[forward]
index_ray = index_ray[forward]
location = location[forward]
distance = distance[forward]
if multiple_hits:
return index_tri, index_ray, location
# since we are not returning multiple hits, we need to
# figure out which hit is first
if len(index_ray) == 0:
return index_tri, index_ray, location
first = np.zeros(len(index_ray), dtype=np.bool)
groups = grouping.group(index_ray)
for group in groups:
index = group[distance[group].argmin()]
first[index] = True
return index_tri[first], index_ray[first], location[first]
|
python
|
{
"resource": ""
}
|
q23162
|
ray_triangle_candidates
|
train
|
def ray_triangle_candidates(ray_origins,
ray_directions,
tree):
"""
Do broad- phase search for triangles that the rays
may intersect.
Does this by creating a bounding box for the ray as it
passes through the volume occupied by the tree
Parameters
------------
ray_origins: (m,3) float, ray origin points
ray_directions: (m,3) float, ray direction vectors
tree: rtree object, contains AABB of each triangle
Returns
----------
ray_candidates: (n,) int, triangle indexes
ray_id: (n,) int, corresponding ray index for a triangle candidate
"""
ray_bounding = ray_bounds(ray_origins=ray_origins,
ray_directions=ray_directions,
bounds=tree.bounds)
ray_candidates = [[]] * len(ray_origins)
ray_id = [[]] * len(ray_origins)
for i, bounds in enumerate(ray_bounding):
ray_candidates[i] = np.array(list(tree.intersection(bounds)),
dtype=np.int)
ray_id[i] = np.ones(len(ray_candidates[i]), dtype=np.int) * i
ray_id = np.hstack(ray_id)
ray_candidates = np.hstack(ray_candidates)
return ray_candidates, ray_id
|
python
|
{
"resource": ""
}
|
q23163
|
ray_bounds
|
train
|
def ray_bounds(ray_origins,
ray_directions,
bounds,
buffer_dist=1e-5):
"""
Given a set of rays and a bounding box for the volume of interest
where the rays will be passing through, find the bounding boxes
of the rays as they pass through the volume.
Parameters
------------
ray_origins: (m,3) float, ray origin points
ray_directions: (m,3) float, ray direction vectors
bounds: (2,3) bounding box (min, max)
buffer_dist: float, distance to pad zero width bounding boxes
Returns
---------
ray_bounding: (n) set of AABB of rays passing through volume
"""
ray_origins = np.asanyarray(ray_origins, dtype=np.float64)
ray_directions = np.asanyarray(ray_directions, dtype=np.float64)
# bounding box we are testing against
bounds = np.asanyarray(bounds)
# find the primary axis of the vector
axis = np.abs(ray_directions).argmax(axis=1)
axis_bound = bounds.reshape((2, -1)).T[axis]
axis_ori = np.array([ray_origins[i][a]
for i, a in enumerate(axis)]).reshape((-1, 1))
axis_dir = np.array([ray_directions[i][a]
for i, a in enumerate(axis)]).reshape((-1, 1))
# parametric equation of a line
# point = direction*t + origin
# p = dt + o
# t = (p-o)/d
t = (axis_bound - axis_ori) / axis_dir
# prevent the bounding box from including triangles
# behind the ray origin
t[t < buffer_dist] = buffer_dist
# the value of t for both the upper and lower bounds
t_a = t[:, 0].reshape((-1, 1))
t_b = t[:, 1].reshape((-1, 1))
# the cartesion point for where the line hits the plane defined by
# axis
on_a = (ray_directions * t_a) + ray_origins
on_b = (ray_directions * t_b) + ray_origins
on_plane = np.column_stack(
(on_a, on_b)).reshape(
(-1, 2, ray_directions.shape[1]))
ray_bounding = np.hstack((on_plane.min(axis=1),
on_plane.max(axis=1)))
# pad the bounding box by TOL_BUFFER
# not sure if this is necessary, but if the ray is axis aligned
# this function will otherwise return zero volume bounding boxes
# which may or may not screw up the r-tree intersection queries
ray_bounding += np.array([-1, -1, -1, 1, 1, 1]) * buffer_dist
return ray_bounding
|
python
|
{
"resource": ""
}
|
q23164
|
RayMeshIntersector.intersects_id
|
train
|
def intersects_id(self,
ray_origins,
ray_directions,
return_locations=False,
multiple_hits=True,
**kwargs):
"""
Find the intersections between the current mesh and a list of rays.
Parameters
------------
ray_origins: (m,3) float, ray origin points
ray_directions: (m,3) float, ray direction vectors
multiple_hits: bool, consider multiple hits of each ray or not
return_locations: bool, return hit locations or not
Returns
-----------
index_triangle: (h,) int, index of triangles hit
index_ray: (h,) int, index of ray that hit triangle
locations: (h,3) float, (optional) position of intersection in space
"""
(index_tri,
index_ray,
locations) = ray_triangle_id(triangles=self.mesh.triangles,
ray_origins=ray_origins,
ray_directions=ray_directions,
tree=self.mesh.triangles_tree,
multiple_hits=multiple_hits,
triangles_normal=self.mesh.face_normals)
if return_locations:
if len(index_tri) == 0:
return index_tri, index_ray, locations
unique = grouping.unique_rows(np.column_stack((locations,
index_ray)))[0]
return index_tri[unique], index_ray[unique], locations[unique]
return index_tri, index_ray
|
python
|
{
"resource": ""
}
|
q23165
|
RayMeshIntersector.intersects_any
|
train
|
def intersects_any(self,
ray_origins,
ray_directions,
**kwargs):
"""
Find out if each ray hit any triangle on the mesh.
Parameters
------------
ray_origins: (m,3) float, ray origin points
ray_directions: (m,3) float, ray direction vectors
Returns
---------
hit: boolean, whether any ray hit any triangle on the mesh
"""
index_tri, index_ray = self.intersects_id(ray_origins,
ray_directions)
hit_any = np.zeros(len(ray_origins), dtype=np.bool)
hit_idx = np.unique(index_ray)
if len(hit_idx) > 0:
hit_any[hit_idx] = True
return hit_any
|
python
|
{
"resource": ""
}
|
q23166
|
dict_to_path
|
train
|
def dict_to_path(as_dict):
"""
Turn a pure dict into a dict containing entity objects that
can be sent directly to a Path constructor.
Parameters
-----------
as_dict : dict
Has keys: 'vertices', 'entities'
Returns
------------
kwargs : dict
Has keys: 'vertices', 'entities'
"""
# start kwargs with initial value
result = as_dict.copy()
# map of constructors
loaders = {'Arc': Arc, 'Line': Line}
# pre- allocate entity array
entities = [None] * len(as_dict['entities'])
# run constructor for dict kwargs
for entity_index, entity in enumerate(as_dict['entities']):
entities[entity_index] = loaders[entity['type']](
points=entity['points'], closed=entity['closed'])
result['entities'] = entities
return result
|
python
|
{
"resource": ""
}
|
q23167
|
lines_to_path
|
train
|
def lines_to_path(lines):
"""
Turn line segments into a Path2D or Path3D object.
Parameters
------------
lines : (n, 2, dimension) or (n, dimension) float
Line segments or connected polyline curve in 2D or 3D
Returns
-----------
kwargs : dict
kwargs for Path constructor
"""
lines = np.asanyarray(lines, dtype=np.float64)
if util.is_shape(lines, (-1, (2, 3))):
# the case where we have a list of points
# we are going to assume they are connected
result = {'entities': np.array([Line(np.arange(len(lines)))]),
'vertices': lines}
return result
elif util.is_shape(lines, (-1, 2, (2, 3))):
# case where we have line segments in 2D or 3D
dimension = lines.shape[-1]
# convert lines to even number of (n, dimension) points
lines = lines.reshape((-1, dimension))
# merge duplicate vertices
unique, inverse = grouping.unique_rows(lines)
# use scipy edges_to_path to skip creating
# a bajillion individual line entities which
# will be super slow vs. fewer polyline entities
return edges_to_path(edges=inverse.reshape((-1, 2)),
vertices=lines[unique])
else:
raise ValueError('Lines must be (n,(2|3)) or (n,2,(2|3))')
return result
|
python
|
{
"resource": ""
}
|
q23168
|
polygon_to_path
|
train
|
def polygon_to_path(polygon):
"""
Load shapely Polygon objects into a trimesh.path.Path2D object
Parameters
-------------
polygon : shapely.geometry.Polygon
Input geometry
Returns
-------------
kwargs : dict
Keyword arguments for Path2D constructor
"""
# start with a single polyline for the exterior
entities = deque([Line(points=np.arange(
len(polygon.exterior.coords)))])
# start vertices
vertices = np.array(polygon.exterior.coords).tolist()
# append interiors as single Line objects
for boundary in polygon.interiors:
entities.append(Line(np.arange(len(boundary.coords)) +
len(vertices)))
# append the new vertex array
vertices.extend(boundary.coords)
# make sure result arrays are numpy
kwargs = {'entities': np.array(entities),
'vertices': np.array(vertices)}
return kwargs
|
python
|
{
"resource": ""
}
|
q23169
|
linestrings_to_path
|
train
|
def linestrings_to_path(multi):
"""
Load shapely LineString objects into a trimesh.path.Path2D object
Parameters
-------------
multi : shapely.geometry.LineString or MultiLineString
Input 2D geometry
Returns
-------------
kwargs : dict
Keyword arguments for Path2D constructor
"""
# append to result as we go
entities = []
vertices = []
if not util.is_sequence(multi):
multi = [multi]
for line in multi:
# only append geometry with points
if hasattr(line, 'coords'):
coords = np.array(line.coords)
if len(coords) < 2:
continue
entities.append(Line(np.arange(len(coords)) +
len(vertices)))
vertices.extend(coords)
kwargs = {'entities': np.array(entities),
'vertices': np.array(vertices)}
return kwargs
|
python
|
{
"resource": ""
}
|
q23170
|
faces_to_path
|
train
|
def faces_to_path(mesh, face_ids=None, **kwargs):
"""
Given a mesh and face indices find the outline edges and
turn them into a Path3D.
Parameters
---------
mesh : trimesh.Trimesh
Triangulated surface in 3D
face_ids : (n,) int
Indexes referencing mesh.faces
Returns
---------
kwargs : dict
Kwargs for Path3D constructor
"""
if face_ids is None:
edges = mesh.edges_sorted
else:
# take advantage of edge ordering to index as single row
edges = mesh.edges_sorted.reshape(
(-1, 6))[face_ids].reshape((-1, 2))
# an edge which occurs onely once is on the boundary
unique_edges = grouping.group_rows(
edges, require_count=1)
# add edges and vertices to kwargs
kwargs.update(edges_to_path(edges=edges[unique_edges],
vertices=mesh.vertices))
return kwargs
|
python
|
{
"resource": ""
}
|
q23171
|
edges_to_path
|
train
|
def edges_to_path(edges,
vertices,
**kwargs):
"""
Given an edge list of indices and associated vertices
representing lines, generate kwargs for a Path object.
Parameters
-----------
edges : (n, 2) int
Vertex indices of line segments
vertices : (m, dimension) float
Vertex positions where dimension is 2 or 3
Returns
----------
kwargs: dict, kwargs for Path constructor
"""
# sequence of ordered traversals
dfs = graph.traversals(edges, mode='dfs')
# make sure every consecutive index in DFS
# traversal is an edge in the source edge list
dfs_connected = graph.fill_traversals(dfs, edges=edges)
# kwargs for Path constructor
# turn traversals into Line objects
lines = [Line(d) for d in dfs_connected]
kwargs.update({'entities': lines,
'vertices': vertices})
return kwargs
|
python
|
{
"resource": ""
}
|
q23172
|
point_plane_distance
|
train
|
def point_plane_distance(points,
plane_normal,
plane_origin=[0.0, 0.0, 0.0]):
"""
The minimum perpendicular distance of a point to a plane.
Parameters
-----------
points: (n, 3) float, points in space
plane_normal: (3,) float, normal vector
plane_origin: (3,) float, plane origin in space
Returns
------------
distances: (n,) float, distance from point to plane
"""
points = np.asanyarray(points, dtype=np.float64)
w = points - plane_origin
distances = np.dot(plane_normal, w.T) / np.linalg.norm(plane_normal)
return distances
|
python
|
{
"resource": ""
}
|
q23173
|
major_axis
|
train
|
def major_axis(points):
"""
Returns an approximate vector representing the major axis of points
Parameters
-------------
points: (n, dimension) float, points in space
Returns
-------------
axis: (dimension,) float, vector along approximate major axis
"""
U, S, V = np.linalg.svd(points)
axis = util.unitize(np.dot(S, V))
return axis
|
python
|
{
"resource": ""
}
|
q23174
|
plane_fit
|
train
|
def plane_fit(points):
"""
Given a set of points, find an origin and normal using SVD.
Parameters
---------
points : (n,3) float
Points in 3D space
Returns
---------
C : (3,) float
Point on the plane
N : (3,) float
Normal vector of plane
"""
# make sure input is numpy array
points = np.asanyarray(points, dtype=np.float64)
# make the plane origin the mean of the points
C = points.mean(axis=0)
# points offset by the plane origin
x = points - C
# create a (3, 3) matrix
M = np.dot(x.T, x)
# run SVD
N = np.linalg.svd(M)[0][:, -1]
return C, N
|
python
|
{
"resource": ""
}
|
q23175
|
tsp
|
train
|
def tsp(points, start=0):
"""
Find an ordering of points where each is visited and
the next point is the closest in euclidean distance,
and if there are multiple points with equal distance
go to an arbitrary one.
Assumes every point is visitable from every other point,
i.e. the travelling salesman problem on a fully connected
graph. It is not a MINIMUM traversal; rather it is a
"not totally goofy traversal, quickly." On random points
this traversal is often ~20x shorter than random ordering.
Parameters
---------------
points : (n, dimension) float
ND points in space
start : int
The index of points we should start at
Returns
---------------
traversal : (n,) int
Ordered traversal visiting every point
distances : (n - 1,) float
The euclidean distance between points in traversal
"""
# points should be float
points = np.asanyarray(points, dtype=np.float64)
if len(points.shape) != 2:
raise ValueError('points must be (n, dimension)!')
# start should be an index
start = int(start)
# a mask of unvisited points by index
unvisited = np.ones(len(points), dtype=np.bool)
unvisited[start] = False
# traversal of points by index
traversal = np.zeros(len(points), dtype=np.int64) - 1
traversal[0] = start
# list of distances
distances = np.zeros(len(points) - 1, dtype=np.float64)
# a mask of indexes in order
index_mask = np.arange(len(points), dtype=np.int64)
# in the loop we want to call distances.sum(axis=1)
# a lot and it's actually kind of slow for "reasons"
# dot products with ones is equivalent and ~2x faster
sum_ones = np.ones(points.shape[1])
# loop through all points
for i in range(len(points) - 1):
# which point are we currently on
current = points[traversal[i]]
# do NlogN distance query
# use dot instead of .sum(axis=1) or np.linalg.norm
# as it is faster, also don't square root here
dist = np.dot((points[unvisited] - current) ** 2,
sum_ones)
# minimum distance index
min_index = dist.argmin()
# successor is closest unvisited point
successor = index_mask[unvisited][min_index]
# update the mask
unvisited[successor] = False
# store the index to the traversal
traversal[i + 1] = successor
# store the distance
distances[i] = dist[min_index]
# we were comparing distance^2 so take square root
distances **= 0.5
return traversal, distances
|
python
|
{
"resource": ""
}
|
q23176
|
PointCloud.copy
|
train
|
def copy(self):
"""
Safely get a copy of the current point cloud.
Copied objects will have emptied caches to avoid memory
issues and so may be slow on initial operations until
caches are regenerated.
Current object will *not* have its cache cleared.
Returns
---------
copied : trimesh.PointCloud
Copy of current point cloud
"""
copied = PointCloud(vertices=None)
# copy vertex and face data
copied._data.data = copy.deepcopy(self._data.data)
# get metadata
copied.metadata = copy.deepcopy(self.metadata)
# make sure cache is set from here
copied._cache.clear()
return copied
|
python
|
{
"resource": ""
}
|
q23177
|
PointCloud.apply_transform
|
train
|
def apply_transform(self, transform):
"""
Apply a homogenous transformation to the PointCloud
object in- place.
Parameters
--------------
transform : (4, 4) float
Homogenous transformation to apply to PointCloud
"""
self.vertices = transformations.transform_points(self.vertices,
matrix=transform)
|
python
|
{
"resource": ""
}
|
q23178
|
PointCloud.bounds
|
train
|
def bounds(self):
"""
The axis aligned bounds of the PointCloud
Returns
------------
bounds : (2, 3) float
Miniumum, Maximum verteex
"""
return np.array([self.vertices.min(axis=0),
self.vertices.max(axis=0)])
|
python
|
{
"resource": ""
}
|
q23179
|
reflection_matrix
|
train
|
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = np.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = np.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> np.allclose(2, np.trace(R))
True
>>> np.allclose(v0, np.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> np.allclose(v2, np.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = np.identity(4)
M[:3, :3] -= 2.0 * np.outer(normal, normal)
M[:3, 3] = (2.0 * np.dot(point[:3], normal)) * normal
return M
|
python
|
{
"resource": ""
}
|
q23180
|
reflection_from_matrix
|
train
|
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = np.random.random(3) - 0.5
>>> v1 = np.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = np.array(matrix, dtype=np.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = np.linalg.eig(M[:3, :3])
i = np.where(abs(np.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = np.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = np.linalg.eig(M)
i = np.where(abs(np.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = np.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
|
python
|
{
"resource": ""
}
|
q23181
|
clip_matrix
|
train
|
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = np.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> a = np.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> np.allclose(a, [-1., -1., -1., 1.])
True
>>> b = np.dot(M, [frustum[1], frustum[3], frustum[5], 1])
>>> np.allclose(b, [ 1., 1., 1., 1.])
True
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = np.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> c = v / v[3]
>>> np.allclose(c, [-1., -1., -1., 1.])
True
>>> v = np.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> d = v / v[3]
>>> np.allclose(d, [ 1., 1., -1., 1.])
True
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t / (left - right), 0.0, (right + left) / (right - left), 0.0],
[0.0, t / (bottom - top), (top + bottom) / (top - bottom), 0.0],
[0.0, 0.0, (far + near) / (near - far), t * far / (far - near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0 / (right - left), 0.0, 0.0, (right + left) / (left - right)],
[0.0, 2.0 / (top - bottom), 0.0, (top + bottom) / (bottom - top)],
[0.0, 0.0, 2.0 / (far - near), (far + near) / (near - far)],
[0.0, 0.0, 0.0, 1.0]]
return np.array(M)
|
python
|
{
"resource": ""
}
|
q23182
|
shear_matrix
|
train
|
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = np.random.random(3) - 0.5
>>> point = np.random.random(3) - 0.5
>>> normal = np.cross(direct, np.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> np.allclose(1, np.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(np.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = np.identity(4)
M[:3, :3] += angle * np.outer(direction, normal)
M[:3, 3] = -angle * np.dot(point[:3], normal) * direction
return M
|
python
|
{
"resource": ""
}
|
q23183
|
shear_from_matrix
|
train
|
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = np.pi / 2.0
>>> direct = [0.0, 1.0, 0.0]
>>> point = [0.0, 0.0, 0.0]
>>> normal = np.cross(direct, np.roll(direct,1))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = np.array(matrix, dtype=np.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = np.linalg.eig(M33)
i = np.where(abs(np.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = np.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = np.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = np.dot(M33 - np.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = np.linalg.eig(M)
i = np.where(abs(np.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = np.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
|
python
|
{
"resource": ""
}
|
q23184
|
compose_matrix
|
train
|
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = np.random.random(3) - 0.5
>>> shear = np.random.random(3) - 0.5
>>> angles = (np.random.random(3) - 0.5) * (2*math.pi)
>>> trans = np.random.random(3) - 0.5
>>> persp = np.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = np.identity(4)
if perspective is not None:
P = np.identity(4)
P[3, :] = perspective[:4]
M = np.dot(M, P)
if translate is not None:
T = np.identity(4)
T[:3, 3] = translate[:3]
M = np.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = np.dot(M, R)
if shear is not None:
Z = np.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = np.dot(M, Z)
if scale is not None:
S = np.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = np.dot(M, S)
M /= M[3, 3]
return M
|
python
|
{
"resource": ""
}
|
q23185
|
euler_matrix
|
train
|
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> np.allclose(np.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> np.allclose(np.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
M = np.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj * si
M[i, k] = sj * ci
M[j, i] = sj * sk
M[j, j] = -cj * ss + cc
M[j, k] = -cj * cs - sc
M[k, i] = -sj * ck
M[k, j] = cj * sc + cs
M[k, k] = cj * cc - ss
else:
M[i, i] = cj * ck
M[i, j] = sj * sc - cs
M[i, k] = sj * cc + ss
M[j, i] = cj * sk
M[j, j] = sj * ss + cc
M[j, k] = sj * cs - sc
M[k, i] = -sj
M[k, j] = cj * si
M[k, k] = cj * ci
return M
|
python
|
{
"resource": ""
}
|
q23186
|
euler_from_matrix
|
train
|
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> np.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (np.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not np.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
M = np.array(matrix, dtype=np.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k])
if sy > _EPS:
ax = math.atan2(M[i, j], M[i, k])
ay = math.atan2(sy, M[i, i])
az = math.atan2(M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i])
if cy > _EPS:
ax = math.atan2(M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2(M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
|
python
|
{
"resource": ""
}
|
q23187
|
quaternion_from_euler
|
train
|
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> np.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i + parity - 1] + 1
k = _NEXT_AXIS[i - parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci * ck
cs = ci * sk
sc = si * ck
ss = si * sk
q = np.empty((4, ))
if repetition:
q[0] = cj * (cc - ss)
q[i] = cj * (cs + sc)
q[j] = sj * (cc + ss)
q[k] = sj * (cs - sc)
else:
q[0] = cj * cc + sj * ss
q[i] = cj * sc - sj * cs
q[j] = cj * ss + sj * cc
q[k] = cj * cs - sj * sc
if parity:
q[j] *= -1.0
return q
|
python
|
{
"resource": ""
}
|
q23188
|
arcball_constrain_to_axis
|
train
|
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = np.array(point, dtype=np.float64, copy=True)
a = np.array(axis, dtype=np.float64, copy=True)
v -= a * np.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
np.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return np.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
|
python
|
{
"resource": ""
}
|
q23189
|
arcball_nearest_axis
|
train
|
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = np.array(point, dtype=np.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = np.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
|
python
|
{
"resource": ""
}
|
q23190
|
vector_norm
|
train
|
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = np.random.random(3)
>>> n = vector_norm(v)
>>> np.allclose(n, np.linalg.norm(v))
True
>>> v = np.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1)))
True
>>> v = np.random.rand(5, 4, 3)
>>> n = np.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = np.array(data, dtype=np.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(np.dot(data, data))
data *= data
out = np.atleast_1d(np.sum(data, axis=axis))
np.sqrt(out, out)
return out
else:
data *= data
np.sum(data, axis=axis, out=out)
np.sqrt(out, out)
|
python
|
{
"resource": ""
}
|
q23191
|
is_same_transform
|
train
|
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(np.identity(4), np.identity(4))
True
>>> is_same_transform(np.identity(4), random_rotation_matrix())
False
"""
matrix0 = np.array(matrix0, dtype=np.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = np.array(matrix1, dtype=np.float64, copy=True)
matrix1 /= matrix1[3, 3]
return np.allclose(matrix0, matrix1)
|
python
|
{
"resource": ""
}
|
q23192
|
is_same_quaternion
|
train
|
def is_same_quaternion(q0, q1):
"""Return True if two quaternions are equal."""
q0 = np.array(q0)
q1 = np.array(q1)
return np.allclose(q0, q1) or np.allclose(q0, -q1)
|
python
|
{
"resource": ""
}
|
q23193
|
transform_around
|
train
|
def transform_around(matrix, point):
"""
Given a transformation matrix, apply its rotation
around a point in space.
Parameters
----------
matrix: (4,4) or (3, 3) float, transformation matrix
point: (3,) or (2,) float, point in space
Returns
---------
result: (4,4) transformation matrix
"""
point = np.asanyarray(point)
matrix = np.asanyarray(matrix)
dim = len(point)
if matrix.shape != (dim + 1,
dim + 1):
raise ValueError('matrix must be (d+1, d+1)')
translate = np.eye(dim + 1)
translate[:dim, dim] = -point
result = np.dot(matrix, translate)
translate[:dim, dim] = point
result = np.dot(translate, result)
return result
|
python
|
{
"resource": ""
}
|
q23194
|
planar_matrix
|
train
|
def planar_matrix(offset=None,
theta=None,
point=None):
"""
2D homogeonous transformation matrix
Parameters
----------
offset : (2,) float
XY offset
theta : float
Rotation around Z in radians
point : (2, ) float
point to rotate around
Returns
----------
matrix : (3, 3) flat
Homogenous 2D transformation matrix
"""
if offset is None:
offset = [0.0, 0.0]
if theta is None:
theta = 0.0
offset = np.asanyarray(offset, dtype=np.float64)
theta = float(theta)
if not np.isfinite(theta):
raise ValueError('theta must be finite angle!')
if offset.shape != (2,):
raise ValueError('offset must be length 2!')
T = np.eye(3, dtype=np.float64)
s = np.sin(theta)
c = np.cos(theta)
T[0, 0:2] = [c, s]
T[1, 0:2] = [-s, c]
T[0:2, 2] = offset
if point is not None:
T = transform_around(matrix=T, point=point)
return T
|
python
|
{
"resource": ""
}
|
q23195
|
planar_matrix_to_3D
|
train
|
def planar_matrix_to_3D(matrix_2D):
"""
Given a 2D homogenous rotation matrix convert it to a 3D rotation
matrix that is rotating around the Z axis
Parameters
----------
matrix_2D: (3,3) float, homogenous 2D rotation matrix
Returns
----------
matrix_3D: (4,4) float, homogenous 3D rotation matrix
"""
matrix_2D = np.asanyarray(matrix_2D, dtype=np.float64)
if matrix_2D.shape != (3, 3):
raise ValueError('Homogenous 2D transformation matrix required!')
matrix_3D = np.eye(4)
# translation
matrix_3D[0:2, 3] = matrix_2D[0:2, 2]
# rotation from 2D to around Z
matrix_3D[0:2, 0:2] = matrix_2D[0:2, 0:2]
return matrix_3D
|
python
|
{
"resource": ""
}
|
q23196
|
transform_points
|
train
|
def transform_points(points,
matrix,
translate=True):
"""
Returns points, rotated by transformation matrix
If points is (n,2), matrix must be (3,3)
if points is (n,3), matrix must be (4,4)
Parameters
----------
points : (n, d) float
Points where d is 2 or 3
matrix : (3,3) or (4,4) float
Homogenous rotation matrix
translate : bool
Apply translation from matrix or not
Returns
----------
transformed : (n,d) float
Transformed points
"""
points = np.asanyarray(points, dtype=np.float64)
# no points no cry
if len(points) == 0:
return points.copy()
matrix = np.asanyarray(matrix, dtype=np.float64)
if (len(points.shape) != 2 or
(points.shape[1] + 1 != matrix.shape[1])):
raise ValueError('matrix shape ({}) doesn\'t match points ({})'.format(
matrix.shape,
points.shape))
# check to see if we've been passed an identity matrix
identity = np.abs(matrix - np.eye(matrix.shape[0])).max()
if identity < 1e-8:
return np.ascontiguousarray(points.copy())
dimension = points.shape[1]
column = np.zeros(len(points)) + int(bool(translate))
stacked = np.column_stack((points, column))
transformed = np.dot(matrix, stacked.T).T[:, :dimension]
transformed = np.ascontiguousarray(transformed)
return transformed
|
python
|
{
"resource": ""
}
|
q23197
|
is_rigid
|
train
|
def is_rigid(matrix):
"""
Check to make sure a homogeonous transformation matrix is
a rigid body transform.
Parameters
-----------
matrix: possibly a transformation matrix
Returns
-----------
check: bool, True if matrix is a valid (4,4) rigid body transform.
"""
matrix = np.asanyarray(matrix, dtype=np.float64)
if matrix.shape != (4, 4):
return False
if not np.allclose(matrix[-1], [0, 0, 0, 1]):
return False
check = np.dot(matrix[:3, :3],
matrix[:3, :3].T)
return np.allclose(check, np.eye(3))
|
python
|
{
"resource": ""
}
|
q23198
|
Arcball.setaxes
|
train
|
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
|
python
|
{
"resource": ""
}
|
q23199
|
Arcball.down
|
train
|
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.