_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q23200
Arcball.next
train
def next(self, acceleration=0.0): """Continue rotation in direction of last drag.""" q = quaternion_slerp(self._qpre, self._qnow, 2.0 + acceleration, False) self._qpre, self._qnow = self._qnow, q
python
{ "resource": "" }
q23201
validate_polygon
train
def validate_polygon(obj): """ Make sure an input can be returned as a valid polygon. Parameters ------------- obj : shapely.geometry.Polygon, str (wkb), or (n, 2) float Object which might be a polygon Returns ------------ polygon : shapely.geometry.Polygon Valid polygon object Raises ------------- ValueError If a valid finite- area polygon isn't available """ if isinstance(obj, Polygon): polygon = obj elif util.is_shape(obj, (-1, 2)): polygon = Polygon(obj) elif util.is_string(obj): polygon = load_wkb(obj) else: raise ValueError('Input not a polygon!') if (not polygon.is_valid or polygon.area < tol.zero): raise ValueError('Polygon is zero- area or invalid!') return polygon
python
{ "resource": "" }
q23202
extrude_polygon
train
def extrude_polygon(polygon, height, **kwargs): """ Extrude a 2D shapely polygon into a 3D mesh Parameters ---------- polygon : shapely.geometry.Polygon 2D geometry to extrude height : float Distance to extrude polygon along Z **kwargs: passed to Trimesh Returns ---------- mesh : trimesh.Trimesh Resulting extrusion as watertight body """ vertices, faces = triangulate_polygon(polygon, **kwargs) mesh = extrude_triangulation(vertices=vertices, faces=faces, height=height, **kwargs) return mesh
python
{ "resource": "" }
q23203
extrude_triangulation
train
def extrude_triangulation(vertices, faces, height, **kwargs): """ Turn a 2D triangulation into a watertight Trimesh. Parameters ---------- vertices : (n, 2) float 2D vertices faces : (m, 3) int Triangle indexes of vertices height : float Distance to extrude triangulation **kwargs: passed to Trimesh Returns --------- mesh : trimesh.Trimesh Mesh created from extrusion """ vertices = np.asanyarray(vertices, dtype=np.float64) height = float(height) faces = np.asanyarray(faces, dtype=np.int64) if not util.is_shape(vertices, (-1, 2)): raise ValueError('Vertices must be (n,2)') if not util.is_shape(faces, (-1, 3)): raise ValueError('Faces must be (n,3)') if np.abs(height) < tol.merge: raise ValueError('Height must be nonzero!') # make sure triangulation winding is pointing up normal_test = normals( [util.stack_3D(vertices[faces[0]])])[0] normal_dot = np.dot(normal_test, [0.0, 0.0, np.sign(height)])[0] # make sure the triangulation is aligned with the sign of # the height we've been passed if normal_dot < 0.0: faces = np.fliplr(faces) # stack the (n,3) faces into (3*n, 2) edges edges = faces_to_edges(faces) edges_sorted = np.sort(edges, axis=1) # edges which only occur once are on the boundary of the polygon # since the triangulation may have subdivided the boundary of the # shapely polygon, we need to find it again edges_unique = grouping.group_rows(edges_sorted, require_count=1) # (n, 2, 2) set of line segments (positions, not references) boundary = vertices[edges[edges_unique]] # we are creating two vertical triangles for every 2D line segment # on the boundary of the 2D triangulation vertical = np.tile(boundary.reshape((-1, 2)), 2).reshape((-1, 2)) vertical = np.column_stack((vertical, np.tile([0, height, 0, height], len(boundary)))) vertical_faces = np.tile([3, 1, 2, 2, 1, 0], (len(boundary), 1)) vertical_faces += np.arange(len(boundary)).reshape((-1, 1)) * 4 vertical_faces = vertical_faces.reshape((-1, 3)) # stack the (n,2) vertices with zeros to make them (n, 3) vertices_3D = util.stack_3D(vertices) # a sequence of zero- indexed faces, which will then be appended # with offsets to create the final mesh faces_seq = [faces[:, ::-1], faces.copy(), vertical_faces] vertices_seq = [vertices_3D, vertices_3D.copy() + [0.0, 0, height], vertical] mesh = Trimesh(*util.append_faces(vertices_seq, faces_seq), process=True, **kwargs) assert mesh.volume > 0.0 return mesh
python
{ "resource": "" }
q23204
_polygon_to_kwargs
train
def _polygon_to_kwargs(polygon): """ Given a shapely polygon generate the data to pass to the triangle mesh generator Parameters --------- polygon : Shapely.geometry.Polygon Input geometry Returns -------- result : dict Has keys: vertices, segments, holes """ if not polygon.is_valid: raise ValueError('invalid shapely polygon passed!') def round_trip(start, length): """ Given a start index and length, create a series of (n, 2) edges which create a closed traversal. Examples --------- start, length = 0, 3 returns: [(0,1), (1,2), (2,0)] """ tiled = np.tile(np.arange(start, start + length).reshape((-1, 1)), 2) tiled = tiled.reshape(-1)[1:-1].reshape((-1, 2)) tiled = np.vstack((tiled, [tiled[-1][-1], tiled[0][0]])) return tiled def add_boundary(boundary, start): # coords is an (n, 2) ordered list of points on the polygon boundary # the first and last points are the same, and there are no # guarantees on points not being duplicated (which will # later cause meshpy/triangle to shit a brick) coords = np.array(boundary.coords) # find indices points which occur only once, and sort them # to maintain order unique = np.sort(grouping.unique_rows(coords)[0]) cleaned = coords[unique] vertices.append(cleaned) facets.append(round_trip(start, len(cleaned))) # holes require points inside the region of the hole, which we find # by creating a polygon from the cleaned boundary region, and then # using a representative point. You could do things like take the mean of # the points, but this is more robust (to things like concavity), if # slower. test = Polygon(cleaned) holes.append(np.array(test.representative_point().coords)[0]) return len(cleaned) # sequence of (n,2) points in space vertices = collections.deque() # sequence of (n,2) indices of vertices facets = collections.deque() # list of (2) vertices in interior of hole regions holes = collections.deque() start = add_boundary(polygon.exterior, 0) for interior in polygon.interiors: try: start += add_boundary(interior, start) except BaseException: log.warn('invalid interior, continuing') continue # create clean (n,2) float array of vertices # and (m, 2) int array of facets # by stacking the sequence of (p,2) arrays vertices = np.vstack(vertices) facets = np.vstack(facets).tolist() # shapely polygons can include a Z component # strip it out for the triangulation if vertices.shape[1] == 3: vertices = vertices[:, :2] result = {'vertices': vertices, 'segments': facets} # holes in meshpy lingo are a (h, 2) list of (x,y) points # which are inside the region of the hole # we added a hole for the exterior, which we slice away here holes = np.array(holes)[1:] if len(holes) > 0: result['holes'] = holes return result
python
{ "resource": "" }
q23205
icosahedron
train
def icosahedron(): """ Create an icosahedron, a 20 faced polyhedron. Returns ------------- ico : trimesh.Trimesh Icosahederon centered at the origin. """ t = (1.0 + 5.0**.5) / 2.0 vertices = [-1, t, 0, 1, t, 0, -1, -t, 0, 1, -t, 0, 0, -1, t, 0, 1, t, 0, -1, -t, 0, 1, -t, t, 0, -1, t, 0, 1, -t, 0, -1, -t, 0, 1] faces = [0, 11, 5, 0, 5, 1, 0, 1, 7, 0, 7, 10, 0, 10, 11, 1, 5, 9, 5, 11, 4, 11, 10, 2, 10, 7, 6, 7, 1, 8, 3, 9, 4, 3, 4, 2, 3, 2, 6, 3, 6, 8, 3, 8, 9, 4, 9, 5, 2, 4, 11, 6, 2, 10, 8, 6, 7, 9, 8, 1] # scale vertices so each vertex radius is 1.0 vertices = np.reshape(vertices, (-1, 3)) / np.sqrt(2.0 + t) faces = np.reshape(faces, (-1, 3)) mesh = Trimesh(vertices=vertices, faces=faces, process=False) return mesh
python
{ "resource": "" }
q23206
icosphere
train
def icosphere(subdivisions=3, radius=1.0, color=None): """ Create an isophere centered at the origin. Parameters ---------- subdivisions : int How many times to subdivide the mesh. Note that the number of faces will grow as function of 4 ** subdivisions, so you probably want to keep this under ~5 radius : float Desired radius of sphere color: (3,) float or uint8 Desired color of sphere Returns --------- ico : trimesh.Trimesh Meshed sphere """ def refine_spherical(): vectors = ico.vertices scalar = (vectors ** 2).sum(axis=1)**.5 unit = vectors / scalar.reshape((-1, 1)) offset = radius - scalar ico.vertices += unit * offset.reshape((-1, 1)) ico = icosahedron() ico._validate = False for j in range(subdivisions): ico = ico.subdivide() refine_spherical() ico._validate = True if color is not None: ico.visual.face_colors = color return ico
python
{ "resource": "" }
q23207
capsule
train
def capsule(height=1.0, radius=1.0, count=[32, 32]): """ Create a mesh of a capsule, or a cylinder with hemispheric ends. Parameters ---------- height : float Center to center distance of two spheres radius : float Radius of the cylinder and hemispheres count : (2,) int Number of sections on latitude and longitude Returns ---------- capsule : trimesh.Trimesh Capsule geometry with: - cylinder axis is along Z - one hemisphere is centered at the origin - other hemisphere is centered along the Z axis at height """ height = float(height) radius = float(radius) count = np.array(count, dtype=np.int) count += np.mod(count, 2) # create a theta where there is a double band around the equator # so that we can offset the top and bottom of a sphere to # get a nicely meshed capsule theta = np.linspace(0, np.pi, count[0]) center = np.clip(np.arctan(tol.merge / radius), tol.merge, np.inf) offset = np.array([-center, center]) + (np.pi / 2) theta = np.insert(theta, int(len(theta) / 2), offset) capsule = uv_sphere(radius=radius, count=count, theta=theta) top = capsule.vertices[:, 2] > tol.zero capsule.vertices[top] += [0, 0, height] return capsule
python
{ "resource": "" }
q23208
cylinder
train
def cylinder(radius=1.0, height=1.0, sections=32, segment=None, transform=None, **kwargs): """ Create a mesh of a cylinder along Z centered at the origin. Parameters ---------- radius : float The radius of the cylinder height : float The height of the cylinder sections : int How many pie wedges should the cylinder have segment : (2, 3) float Endpoints of axis, overrides transform and height transform : (4, 4) float Transform to apply **kwargs: passed to Trimesh to create cylinder Returns ---------- cylinder: trimesh.Trimesh Resulting mesh of a cylinder """ if segment is not None: segment = np.asanyarray(segment, dtype=np.float64) if segment.shape != (2, 3): raise ValueError('segment must be 2 3D points!') vector = segment[1] - segment[0] # override height with segment length height = np.linalg.norm(vector) # point in middle of line midpoint = segment[0] + (vector * 0.5) # align Z with our desired direction rotation = align_vectors([0, 0, 1], vector) # translate to midpoint of segment translation = transformations.translation_matrix(midpoint) # compound the rotation and translation transform = np.dot(translation, rotation) # create a 2D pie out of wedges theta = np.linspace(0, np.pi * 2, sections) vertices = np.column_stack((np.sin(theta), np.cos(theta))) * radius # the single vertex at the center of the circle # we're overwriting the duplicated start/end vertex vertices[0] = [0, 0] # whangle indexes into a triangulation of the pie wedges index = np.arange(1, len(vertices) + 1).reshape((-1, 1)) index[-1] = 1 faces = np.tile(index, (1, 2)).reshape(-1)[1:-1].reshape((-1, 2)) faces = np.column_stack((np.zeros(len(faces), dtype=np.int), faces)) # extrude the 2D triangulation into a Trimesh object cylinder = extrude_triangulation(vertices=vertices, faces=faces, height=height, **kwargs) # the extrusion was along +Z, so move the cylinder # center of mass back to the origin cylinder.vertices[:, 2] -= height * .5 if transform is not None: # apply a transform here before any cache stuff is generated # and would have to be dumped after the transform is applied cylinder.apply_transform(transform) return cylinder
python
{ "resource": "" }
q23209
annulus
train
def annulus(r_min=1.0, r_max=2.0, height=1.0, sections=32, transform=None, **kwargs): """ Create a mesh of an annular cylinder along Z, centered at the origin. Parameters ---------- r_min : float The inner radius of the annular cylinder r_max : float The outer radius of the annular cylinder height : float The height of the annular cylinder sections : int How many pie wedges should the annular cylinder have **kwargs: passed to Trimesh to create annulus Returns ---------- annulus : trimesh.Trimesh Mesh of annular cylinder """ r_min = abs(float(r_min)) r_max = abs(float(r_max)) height = float(height) sections = int(sections) # if center radius is zero this is a cylinder if r_min < tol.merge: return cylinder(radius=r_max, height=height, sections=sections, transform=transform) # create a 2D pie out of wedges theta = np.linspace(0, np.pi * 2, sections)[:-1] unit = np.column_stack((np.sin(theta), np.cos(theta))) assert len(unit) == sections - 1 vertices = np.vstack((unit * r_min, unit * r_max)) # one flattened triangulated quad covering one slice face = np.array([0, sections - 1, 1, 1, sections - 1, sections]) # tile one quad into lots of quads faces = (np.tile(face, (sections - 1, 1)) + np.arange(sections - 1).reshape((-1, 1))).reshape((-1, 3)) # stitch the last and first triangles with correct winding faces[-1] = [sections - 1, 0, sections - 2] # extrude the 2D profile into a mesh annulus = extrude_triangulation(vertices=vertices, faces=faces, height=height, **kwargs) # move the annulus so the centroid is at the origin annulus.vertices[:, 2] -= height * .5 if transform is not None: # apply a transform here before any cache stuff is generated # and would have to be dumped after the transform is applied annulus.apply_transform(transform) return annulus
python
{ "resource": "" }
q23210
random_soup
train
def random_soup(face_count=100): """ Return random triangles as a Trimesh Parameters ----------- face_count : int Number of faces desired in mesh Returns ----------- soup : trimesh.Trimesh Geometry with face_count random faces """ vertices = np.random.random((face_count * 3, 3)) - 0.5 faces = np.arange(face_count * 3).reshape((-1, 3)) soup = Trimesh(vertices=vertices, faces=faces) return soup
python
{ "resource": "" }
q23211
axis
train
def axis(origin_size=0.04, transform=None, origin_color=None, axis_radius=None, axis_length=None): """ Return an XYZ axis marker as a Trimesh, which represents position and orientation. If you set the origin size the other parameters will be set relative to it. Parameters ---------- transform : (4, 4) float Transformation matrix origin_size : float Radius of sphere that represents the origin origin_color : (3,) float or int, uint8 or float Color of the origin axis_radius : float Radius of cylinder that represents x, y, z axis axis_length: float Length of cylinder that represents x, y, z axis Returns ------- marker : trimesh.Trimesh Mesh geometry of axis indicators """ # the size of the ball representing the origin origin_size = float(origin_size) # set the transform and use origin-relative # sized for other parameters if not specified if transform is None: transform = np.eye(4) if origin_color is None: origin_color = [255, 255, 255, 255] if axis_radius is None: axis_radius = origin_size / 5.0 if axis_length is None: axis_length = origin_size * 10.0 # generate a ball for the origin axis_origin = uv_sphere(radius=origin_size, count=[10, 10]) axis_origin.apply_transform(transform) # apply color to the origin ball axis_origin.visual.face_colors = origin_color # create the cylinder for the z-axis translation = transformations.translation_matrix( [0, 0, axis_length / 2]) z_axis = cylinder( radius=axis_radius, height=axis_length, transform=transform.dot(translation)) # XYZ->RGB, Z is blue z_axis.visual.face_colors = [0, 0, 255] # create the cylinder for the y-axis translation = transformations.translation_matrix( [0, 0, axis_length / 2]) rotation = transformations.rotation_matrix(np.radians(-90), [1, 0, 0]) y_axis = cylinder( radius=axis_radius, height=axis_length, transform=transform.dot(rotation).dot(translation)) # XYZ->RGB, Y is green y_axis.visual.face_colors = [0, 255, 0] # create the cylinder for the x-axis translation = transformations.translation_matrix( [0, 0, axis_length / 2]) rotation = transformations.rotation_matrix(np.radians(90), [0, 1, 0]) x_axis = cylinder( radius=axis_radius, height=axis_length, transform=transform.dot(rotation).dot(translation)) # XYZ->RGB, X is red x_axis.visual.face_colors = [255, 0, 0] # append the sphere and three cylinders marker = util.concatenate([axis_origin, x_axis, y_axis, z_axis]) return marker
python
{ "resource": "" }
q23212
camera_marker
train
def camera_marker(camera, marker_height=0.4, origin_size=None): """ Create a visual marker for a camera object, including an axis and FOV. Parameters --------------- camera : trimesh.scene.Camera Camera object with FOV and transform defined marker_height : float How far along the camera Z should FOV indicators be origin_size : float Sphere radius of the origin (default: marker_height / 10.0) Returns ------------ meshes : list Contains Trimesh and Path3D objects which can be visualized """ camera_transform = camera.transform if camera_transform is None: camera_transform = np.eye(4) # append the visualizations to an array meshes = [axis(origin_size=marker_height / 10.0)] meshes[0].apply_transform(camera_transform) try: # path is a soft dependency from .path.exchange.load import load_path except ImportError: # they probably don't have shapely installed log.warning('unable to create FOV visualization!', exc_info=True) return meshes # create sane origin size from marker height if origin_size is None: origin_size = marker_height / 10.0 # calculate vertices from camera FOV angles x = marker_height * np.tan(np.deg2rad(camera.fov[0]) / 2.0) y = marker_height * np.tan(np.deg2rad(camera.fov[1]) / 2.0) z = marker_height # combine the points into the vertices of an FOV visualization points = np.array( [(0, 0, 0), (-x, -y, z), (x, -y, z), (x, y, z), (-x, y, z)], dtype=float) # create line segments for the FOV visualization # a segment from the origin to each bound of the FOV segments = np.column_stack( (np.zeros_like(points), points)).reshape( (-1, 3)) # add a loop for the outside of the FOV then reshape # the whole thing into multiple line segments segments = np.vstack((segments, points[[1, 2, 2, 3, 3, 4, 4, 1]])).reshape((-1, 2, 3)) # add a single Path3D object for all line segments meshes.append(load_path(segments)) meshes[-1].apply_transform(camera_transform) return meshes
python
{ "resource": "" }
q23213
convex_hull
train
def convex_hull(obj, qhull_options='QbB Pp QJn'): """ Get a new Trimesh object representing the convex hull of the current mesh, with proper normals and watertight. Requires scipy >.12. Arguments -------- obj : Trimesh, or (n,3) float Mesh or cartesian points Returns -------- convex : Trimesh Mesh of convex hull """ from .base import Trimesh if isinstance(obj, Trimesh): points = obj.vertices.view(np.ndarray) else: # will remove subclassing points = np.asarray(obj, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('Object must be Trimesh or (n,3) points!') hull = spatial.ConvexHull(points, qhull_options=qhull_options) # hull object doesn't remove unreferenced vertices # create a mask to re- index faces for only referenced vertices vid = np.sort(hull.vertices) mask = np.zeros(len(hull.points), dtype=np.int64) mask[vid] = np.arange(len(vid)) # remove unreferenced vertices here faces = mask[hull.simplices].copy() # rescale vertices back to original size vertices = hull.points[vid].copy() # qhull returns faces with random winding # calculate the returned normal of each face crosses = triangles.cross(vertices[faces]) # qhull returns zero magnitude faces like an asshole normals, valid = util.unitize(crosses, check_valid=True) # remove zero magnitude faces faces = faces[valid] crosses = crosses[valid] # each triangle area and mean center triangles_area = triangles.area(crosses=crosses, sum=False) triangles_center = vertices[faces].mean(axis=1) # since the convex hull is (hopefully) convex, the vector from # the centroid to the center of each face # should have a positive dot product with the normal of that face # if it doesn't it is probably backwards # note that this sometimes gets screwed up by precision issues centroid = np.average(triangles_center, weights=triangles_area, axis=0) # a vector from the centroid to a point on each face test_vector = triangles_center - centroid # check the projection against face normals backwards = util.diagonal_dot(normals, test_vector) < 0.0 # flip the winding outward facing faces[backwards] = np.fliplr(faces[backwards]) # flip the normal normals[backwards] *= -1.0 # save the work we did to the cache so it doesn't have to be recomputed initial_cache = {'triangles_cross': crosses, 'triangles_center': triangles_center, 'area_faces': triangles_area, 'centroid': centroid} # create the Trimesh object for the convex hull convex = Trimesh(vertices=vertices, faces=faces, face_normals=normals, initial_cache=initial_cache, process=True, validate=False) # we did the gross case above, but sometimes precision issues # leave some faces backwards anyway # this call will exit early if the winding is consistent # and if not will fix it by traversing the adjacency graph convex.fix_normals(multibody=False) # sometimes the QbB option will cause precision issues # so try the hull again without it and # check for qhull_options is None to avoid infinite recursion if (qhull_options is not None and not convex.is_winding_consistent): return convex_hull(convex, qhull_options=None) return convex
python
{ "resource": "" }
q23214
adjacency_projections
train
def adjacency_projections(mesh): """ Test if a mesh is convex by projecting the vertices of a triangle onto the normal of its adjacent face. Parameters ---------- mesh : Trimesh Input geometry Returns ---------- projection : (len(mesh.face_adjacency),) float Distance of projection of adjacent vertex onto plane """ # normals and origins from the first column of face adjacency normals = mesh.face_normals[mesh.face_adjacency[:, 0]] # one of the vertices on the shared edge origins = mesh.vertices[mesh.face_adjacency_edges[:, 0]] # faces from the second column of face adjacency vid_other = mesh.face_adjacency_unshared[:, 1] vector_other = mesh.vertices[vid_other] - origins # get the projection with a dot product dots = util.diagonal_dot(vector_other, normals) return dots
python
{ "resource": "" }
q23215
is_convex
train
def is_convex(mesh): """ Check if a mesh is convex. Parameters ----------- mesh : Trimesh Input geometry Returns ----------- convex : bool Was passed mesh convex or not """ # don't consider zero- area faces nonzero = mesh.area_faces > tol.merge # adjacencies with two nonzero faces adj_ok = nonzero[mesh.face_adjacency].all(axis=1) # make threshold of convexity scale- relative threshold = tol.planar * mesh.scale # if projections of vertex onto plane of adjacent # face is negative, it means the face pair is locally # convex, and if that is true for all faces the mesh is convex convex = bool(mesh.face_adjacency_projections[adj_ok].max() < threshold) return convex
python
{ "resource": "" }
q23216
hull_points
train
def hull_points(obj, qhull_options='QbB Pp'): """ Try to extract a convex set of points from multiple input formats. Parameters --------- obj: Trimesh object (n,d) points (m,) Trimesh objects Returns -------- points: (o,d) convex set of points """ if hasattr(obj, 'convex_hull'): return obj.convex_hull.vertices initial = np.asanyarray(obj, dtype=np.float64) if len(initial.shape) != 2: raise ValueError('points must be (n, dimension)!') hull = spatial.ConvexHull(initial, qhull_options=qhull_options) points = hull.points[hull.vertices] return points
python
{ "resource": "" }
q23217
Entity.closed
train
def closed(self): """ If the first point is the same as the end point the entity is closed """ closed = (len(self.points) > 2 and self.points[0] == self.points[-1]) return closed
python
{ "resource": "" }
q23218
Entity.length
train
def length(self, vertices): """ Return the total length of the entity. Returns --------- length: float, total length of entity """ length = ((np.diff(self.discrete(vertices), axis=0)**2).sum(axis=1)**.5).sum() return length
python
{ "resource": "" }
q23219
Text.plot
train
def plot(self, vertices, show=False): """ Plot the text using matplotlib. Parameters -------------- vertices : (n, 2) float Vertices in space show : bool If True, call plt.show() """ if vertices.shape[1] != 2: raise ValueError('only for 2D points!') import matplotlib.pyplot as plt # get rotation angle in degrees angle = np.degrees(self.angle(vertices)) plt.text(*vertices[self.origin], s=self.text, rotation=angle, ha=self.align[0], va=self.align[1], size=18) if show: plt.show()
python
{ "resource": "" }
q23220
Text.angle
train
def angle(self, vertices): """ If Text is 2D, get the rotation angle in radians. Parameters ----------- vertices : (n, 2) float Vertices in space referenced by self.points Returns --------- angle : float Rotation angle in radians """ if vertices.shape[1] != 2: raise ValueError('angle only valid for 2D points!') # get the vector from origin direction = vertices[self.vector] - vertices[self.origin] # get the rotation angle in radians angle = np.arctan2(*direction[::-1]) return angle
python
{ "resource": "" }
q23221
Line.discrete
train
def discrete(self, vertices, scale=1.0): """ Discretize into a world- space path. Parameters ------------ vertices: (n, dimension) float Points in space scale : float Size of overall scene for numerical comparisons Returns ------------- discrete: (m, dimension) float Path in space composed of line segments """ discrete = self._orient(vertices[self.points]) return discrete
python
{ "resource": "" }
q23222
Line.is_valid
train
def is_valid(self): """ Is the current entity valid. Returns ----------- valid : bool Is the current entity well formed """ valid = np.any((self.points - self.points[0]) != 0) return valid
python
{ "resource": "" }
q23223
Line.explode
train
def explode(self): """ If the current Line entity consists of multiple line break it up into n Line entities. Returns ---------- exploded: (n,) Line entities """ points = np.column_stack(( self.points, self.points)).ravel()[1:-1].reshape((-1, 2)) exploded = [Line(i) for i in points] return exploded
python
{ "resource": "" }
q23224
Arc.discrete
train
def discrete(self, vertices, scale=1.0): """ Discretize the arc entity into line sections. Parameters ------------ vertices : (n, dimension) float Points in space scale : float Size of overall scene for numerical comparisons Returns ------------- discrete: (m, dimension) float, linear path in space """ discrete = discretize_arc(vertices[self.points], close=self.closed, scale=scale) return self._orient(discrete)
python
{ "resource": "" }
q23225
Arc.bounds
train
def bounds(self, vertices): """ Return the AABB of the arc entity. Parameters ----------- vertices: (n,dimension) float, vertices in space Returns ----------- bounds: (2, dimension) float, (min, max) coordinate of AABB """ if util.is_shape(vertices, (-1, 2)) and self.closed: # if we have a closed arc (a circle), we can return the actual bounds # this only works in two dimensions, otherwise this would return the # AABB of an sphere info = self.center(vertices) bounds = np.array([info['center'] - info['radius'], info['center'] + info['radius']], dtype=np.float64) else: # since the AABB of a partial arc is hard, approximate # the bounds by just looking at the discrete values discrete = self.discrete(vertices) bounds = np.array([discrete.min(axis=0), discrete.max(axis=0)], dtype=np.float64) return bounds
python
{ "resource": "" }
q23226
Bezier.discrete
train
def discrete(self, vertices, scale=1.0, count=None): """ Discretize the Bezier curve. Parameters ------------- vertices : (n, 2) or (n, 3) float Points in space scale : float Scale of overall drawings (for precision) count : int Number of segments to return Returns ------------- discrete : (m, 2) or (m, 3) float Curve as line segments """ discrete = discretize_bezier(vertices[self.points], count=count, scale=scale) return self._orient(discrete)
python
{ "resource": "" }
q23227
BSpline.discrete
train
def discrete(self, vertices, count=None, scale=1.0): """ Discretize the B-Spline curve. Parameters ------------- vertices : (n, 2) or (n, 3) float Points in space scale : float Scale of overall drawings (for precision) count : int Number of segments to return Returns ------------- discrete : (m, 2) or (m, 3) float Curve as line segments """ discrete = discretize_bspline( control=vertices[self.points], knots=self.knots, count=count, scale=scale) return self._orient(discrete)
python
{ "resource": "" }
q23228
BSpline.to_dict
train
def to_dict(self): """ Returns a dictionary with all of the information about the entity. """ return {'type': self.__class__.__name__, 'points': self.points.tolist(), 'knots': self.knots.tolist(), 'closed': self.closed}
python
{ "resource": "" }
q23229
print_element
train
def print_element(element): """ Pretty- print an lxml.etree element. Parameters ------------ element : etree element """ pretty = etree.tostring( element, pretty_print=True).decode('utf-8') print(pretty) return pretty
python
{ "resource": "" }
q23230
merge_vertices
train
def merge_vertices(mesh, digits=None, textured=True, uv_digits=4): """ Removes duplicate vertices based on integer hashes of each row. Parameters ------------- mesh : Trimesh object Mesh to merge vertices on digits : int How many digits to consider for vertices If not specified uses tol.merge textured : bool If True, for textured meshes only merge vertices with identical positions AND UV coordinates. No effect on untextured meshes uv_digits : int Number of digits to consider for UV coordinates. """ if not isinstance(digits, int): digits = util.decimal_to_digits(tol.merge) # UV texture visuals require us to update the # vertices and normals differently if (textured and mesh.visual.defined and mesh.visual.kind == 'texture' and mesh.visual.uv is not None): # get an array with vertices and UV coordinates # converted to integers at requested precision stacked = np.column_stack(( mesh.vertices * (10 ** digits), mesh.visual.uv * (10 ** uv_digits))).round().astype(np.int64) # Merge vertices with identical positions and UVs # we don't merge vertices just based on position # because that can corrupt textures at seams. unique, inverse = unique_rows(stacked) mesh.update_vertices(unique, inverse) # Now, smooth out the vertex normals at the duplicate vertices. # For now, we just use the first vertex's normal in a duplicate group. # It would be better to average these, but that's slower. unique, inverse = unique_rows(mesh.vertices, digits=digits) try: mesh.vertex_normals = mesh.vertex_normals[unique[inverse]] except BaseException: pass # In normal usage, just merge vertices that are close. else: # if we have a ton of unreferenced vertices it will # make the unique_rows call super slow so cull first if hasattr(mesh, 'faces') and len(mesh.faces) > 0: referenced = np.zeros(len(mesh.vertices), dtype=np.bool) referenced[mesh.faces] = True else: # this is used for PointCloud objects referenced = np.ones(len(mesh.vertices), dtype=np.bool) # check unique rows of referenced vertices u, i = unique_rows(mesh.vertices[referenced], digits=digits) # construct an inverse using the subset inverse = np.zeros(len(mesh.vertices), dtype=np.int64) inverse[referenced] = i # get the vertex mask mask = np.nonzero(referenced)[0][u] # run the update mesh.update_vertices(mask=mask, inverse=inverse)
python
{ "resource": "" }
q23231
group
train
def group(values, min_len=0, max_len=np.inf): """ Return the indices of values that are identical Parameters ---------- values: 1D array min_len: int, the shortest group allowed All groups will have len >= min_length max_len: int, the longest group allowed All groups will have len <= max_length Returns ---------- groups: sequence of indices to form groups IE [0,1,0,1] returns [[0,2], [1,3]] """ original = np.asanyarray(values) # save the sorted order and then apply it order = original.argsort() values = original[order] # find the indexes which are duplicates if values.dtype.kind == 'f': # for floats in a sorted array, neighbors are not duplicates # if the difference between them is greater than approximate zero nondupe = np.greater(np.abs(np.diff(values)), tol.zero) else: # for ints and strings we can check exact non- equality # for all other types this will only work if they defined # an __eq__ nondupe = values[1:] != values[:-1] dupe_idx = np.append(0, np.nonzero(nondupe)[0] + 1) dupe_len = np.diff(np.concatenate((dupe_idx, [len(values)]))) dupe_ok = np.logical_and(np.greater_equal(dupe_len, min_len), np.less_equal(dupe_len, max_len)) groups = [order[i:(i + j)] for i, j in zip(dupe_idx[dupe_ok], dupe_len[dupe_ok])] groups = np.array(groups) return groups
python
{ "resource": "" }
q23232
hashable_rows
train
def hashable_rows(data, digits=None): """ We turn our array into integers based on the precision given by digits and then put them in a hashable format. Parameters --------- data : (n, m) array Input data digits : int or None How many digits to add to hash if data is floating point If None, tol.merge will be used Returns --------- hashable : (n,) array Custom data type which can be sorted or used as hash keys """ # if there is no data return immediately if len(data) == 0: return np.array([]) # get array as integer to precision we care about as_int = float_to_int(data, digits=digits) # if it is flat integers already, return if len(as_int.shape) == 1: return as_int # if array is 2D and smallish, we can try bitbanging # this is significantly faster than the custom dtype if len(as_int.shape) == 2 and as_int.shape[1] <= 4: # time for some righteous bitbanging # can we pack the whole row into a single 64 bit integer precision = int(np.floor(64 / as_int.shape[1])) # if the max value is less than precision we can do this if np.abs(as_int).max() < 2**(precision - 1): # the resulting package hashable = np.zeros(len(as_int), dtype=np.int64) # loop through each column and bitwise xor to combine # make sure as_int is int64 otherwise bit offset won't work for offset, column in enumerate(as_int.astype(np.int64).T): # will modify hashable in place np.bitwise_xor(hashable, column << (offset * precision), out=hashable) return hashable # reshape array into magical data type that is weird but hashable dtype = np.dtype((np.void, as_int.dtype.itemsize * as_int.shape[1])) # make sure result is contiguous and flat hashable = np.ascontiguousarray(as_int).view(dtype).reshape(-1) return hashable
python
{ "resource": "" }
q23233
unique_ordered
train
def unique_ordered(data): """ Returns the same as np.unique, but ordered as per the first occurrence of the unique value in data. Examples --------- In [1]: a = [0, 3, 3, 4, 1, 3, 0, 3, 2, 1] In [2]: np.unique(a) Out[2]: array([0, 1, 2, 3, 4]) In [3]: trimesh.grouping.unique_ordered(a) Out[3]: array([0, 3, 4, 1, 2]) """ data = np.asanyarray(data) order = np.sort(np.unique(data, return_index=True)[1]) result = data[order] return result
python
{ "resource": "" }
q23234
unique_bincount
train
def unique_bincount(values, minlength, return_inverse=True): """ For arrays of integers find unique values using bin counting. Roughly 10x faster for correct input than np.unique Parameters -------------- values : (n,) int Values to find unique members of minlength : int Maximum value that will occur in values (values.max()) return_inverse : bool If True, return an inverse such that unique[inverse] == values Returns ------------ unique : (m,) int Unique values in original array inverse : (n,) int An array such that unique[inverse] == values Only returned if return_inverse is True """ values = np.asanyarray(values) if len(values.shape) != 1 or values.dtype.kind != 'i': raise ValueError('input must be 1D integers!') try: # count the number of occurrences of each value counts = np.bincount(values, minlength=minlength) except TypeError: # casting failed on 32 bit windows log.error('casting failed!', exc_info=True) # fall back to numpy unique return np.unique(values, return_inverse=return_inverse) # which bins are occupied at all # counts are integers so this works unique_bin = counts.astype(np.bool) # which values are unique # indexes correspond to original values unique = np.where(unique_bin)[0] if return_inverse: # find the inverse to reconstruct original inverse = (np.cumsum(unique_bin) - 1)[values] return unique, inverse return unique
python
{ "resource": "" }
q23235
merge_runs
train
def merge_runs(data, digits=None): """ Merge duplicate sequential values. This differs from unique_ordered in that values can occur in multiple places in the sequence, but only consecutive repeats are removed Parameters ----------- data: (n,) float or int Returns -------- merged: (m,) float or int Examples --------- In [1]: a Out[1]: array([-1, -1, -1, 0, 0, 1, 1, 2, 0, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9]) In [2]: trimesh.grouping.merge_runs(a) Out[2]: array([-1, 0, 1, 2, 0, 3, 4, 5, 6, 7, 8, 9]) """ data = np.asanyarray(data) mask = np.abs(np.diff(data)) > tol.merge mask = np.concatenate((np.array([True]), mask)) return data[mask]
python
{ "resource": "" }
q23236
unique_float
train
def unique_float(data, return_index=False, return_inverse=False, digits=None): """ Identical to the numpy.unique command, except evaluates floating point numbers, using a specified number of digits. If digits isn't specified, the library default TOL_MERGE will be used. """ data = np.asanyarray(data) as_int = float_to_int(data, digits) _junk, unique, inverse = np.unique(as_int, return_index=True, return_inverse=True) if (not return_index) and (not return_inverse): return data[unique] result = [data[unique]] if return_index: result.append(unique) if return_inverse: result.append(inverse) return tuple(result)
python
{ "resource": "" }
q23237
unique_value_in_row
train
def unique_value_in_row(data, unique=None): """ For a 2D array of integers find the position of a value in each row which only occurs once. If there are more than one value per row which occur once, the last one is returned. Parameters ---------- data: (n,d) int unique: (m) int, list of unique values contained in data. speedup purposes only, generated from np.unique if not passed Returns --------- result: (n,d) bool, with one or zero True values per row. Examples ------------------------------------- In [0]: r = np.array([[-1, 1, 1], [-1, 1, -1], [-1, 1, 1], [-1, 1, -1], [-1, 1, -1]], dtype=np.int8) In [1]: unique_value_in_row(r) Out[1]: array([[ True, False, False], [False, True, False], [ True, False, False], [False, True, False], [False, True, False]], dtype=bool) In [2]: unique_value_in_row(r).sum(axis=1) Out[2]: array([1, 1, 1, 1, 1]) In [3]: r[unique_value_in_row(r)] Out[3]: array([-1, 1, -1, 1, 1], dtype=int8) """ if unique is None: unique = np.unique(data) data = np.asanyarray(data) result = np.zeros_like(data, dtype=np.bool, subok=False) for value in unique: test = np.equal(data, value) test_ok = test.sum(axis=1) == 1 result[test_ok] = test[test_ok] return result
python
{ "resource": "" }
q23238
boolean_rows
train
def boolean_rows(a, b, operation=np.intersect1d): """ Find the rows in two arrays which occur in both rows. Parameters --------- a: (n, d) int Array with row vectors b: (m, d) int Array with row vectors operation : function Numpy boolean set operation function: -np.intersect1d -np.setdiff1d Returns -------- shared: (p, d) array containing rows in both a and b """ a = np.asanyarray(a, dtype=np.int64) b = np.asanyarray(b, dtype=np.int64) av = a.view([('', a.dtype)] * a.shape[1]).ravel() bv = b.view([('', b.dtype)] * b.shape[1]).ravel() shared = operation(av, bv).view(a.dtype).reshape(-1, a.shape[1]) return shared
python
{ "resource": "" }
q23239
group_vectors
train
def group_vectors(vectors, angle=1e-4, include_negative=False): """ Group vectors based on an angle tolerance, with the option to include negative vectors. Parameters ----------- vectors : (n,3) float Direction vector angle : float Group vectors closer than this angle in radians include_negative : bool If True consider the same: [0,0,1] and [0,0,-1] Returns ------------ new_vectors : (m,3) float Direction vector groups : (m,) sequence of int Indices of source vectors """ vectors = np.asanyarray(vectors, dtype=np.float64) angle = float(angle) if include_negative: vectors = util.vector_hemisphere(vectors) spherical = util.vector_to_spherical(vectors) angles, groups = group_distance(spherical, angle) new_vectors = util.spherical_to_vector(angles) return new_vectors, groups
python
{ "resource": "" }
q23240
group_distance
train
def group_distance(values, distance): """ Find groups of points which have neighbours closer than radius, where no two points in a group are farther than distance apart. Parameters --------- points : (n, d) float Points of dimension d distance : float Max distance between points in a cluster Returns ---------- unique : (m, d) float Median value of each group groups : (m) sequence of int Indexes of points that make up a group """ values = np.asanyarray(values, dtype=np.float64) consumed = np.zeros(len(values), dtype=np.bool) tree = cKDTree(values) # (n, d) set of values that are unique unique = [] # (n) sequence of indices in values groups = [] for index, value in enumerate(values): if consumed[index]: continue group = np.array(tree.query_ball_point(value, distance), dtype=np.int) consumed[group] = True unique.append(np.median(values[group], axis=0)) groups.append(group) return np.array(unique), np.array(groups)
python
{ "resource": "" }
q23241
clusters
train
def clusters(points, radius): """ Find clusters of points which have neighbours closer than radius Parameters --------- points : (n, d) float Points of dimension d radius : float Max distance between points in a cluster Returns ---------- groups : (m,) sequence of int Indices of points in a cluster """ from . import graph tree = cKDTree(points) # some versions return pairs as a set of tuples pairs = tree.query_pairs(r=radius, output_type='ndarray') # group connected components groups = graph.connected_components(pairs) return groups
python
{ "resource": "" }
q23242
blocks
train
def blocks(data, min_len=2, max_len=np.inf, digits=None, only_nonzero=False): """ Given an array, find the indices of contiguous blocks of equal values. Parameters --------- data: (n) array min_len: int, the minimum length group to be returned max_len: int, the maximum length group to be retuurned digits: if dealing with floats, how many digits to use only_nonzero: bool, only return blocks of non- zero values Returns --------- blocks: (m) sequence of indices referencing data """ data = float_to_int(data, digits=digits) # find the inflection points, or locations where the array turns # from True to False. infl = np.concatenate(([0], np.nonzero(np.diff(data))[0] + 1, [len(data)])) infl_len = np.diff(infl) infl_ok = np.logical_and(infl_len >= min_len, infl_len <= max_len) if only_nonzero: # check to make sure the values of each contiguous block are True, # by checking the first value of each block infl_ok = np.logical_and(infl_ok, data[infl[:-1]]) # inflate start/end indexes into full ranges of values blocks = [np.arange(infl[i], infl[i + 1]) for i, ok in enumerate(infl_ok) if ok] return blocks
python
{ "resource": "" }
q23243
group_min
train
def group_min(groups, data): """ Given a list of groups, find the minimum element of data within each group Parameters ----------- groups : (n,) sequence of (q,) int Indexes of each group corresponding to each element in data data : (m,) The data that groups indexes reference Returns ----------- minimums : (n,) Minimum value of data per group """ # sort with major key groups, minor key data order = np.lexsort((data, groups)) groups = groups[order] # this is only needed if groups is unsorted data = data[order] # construct an index which marks borders between groups index = np.empty(len(groups), 'bool') index[0] = True index[1:] = groups[1:] != groups[:-1] return data[index]
python
{ "resource": "" }
q23244
minimum_nsphere
train
def minimum_nsphere(obj): """ Compute the minimum n- sphere for a mesh or a set of points. Uses the fact that the minimum n- sphere will be centered at one of the vertices of the furthest site voronoi diagram, which is n*log(n) but should be pretty fast due to using the scipy/qhull implementations of convex hulls and voronoi diagrams. Parameters ---------- obj : (n, d) float or trimesh.Trimesh Points or mesh to find minimum bounidng nsphere Returns ---------- center : (d,) float Center of fitted n- sphere radius : float Radius of fitted n-sphere """ # reduce the input points or mesh to the vertices of the convex hull # since we are computing the furthest site voronoi diagram this reduces # the input complexity substantially and returns the same value points = convex.hull_points(obj) # we are scaling the mesh to a unit cube # this used to pass qhull_options 'QbB' to Voronoi however this had a bug somewhere # to avoid this we scale to a unit cube ourselves inside this function points_origin = points.min(axis=0) points_scale = points.ptp(axis=0).min() points = (points - points_origin) / points_scale # if all of the points are on an n-sphere already the voronoi # method will fail so we check a least squares fit before # bothering to compute the voronoi diagram fit_C, fit_R, fit_E = fit_nsphere(points) # return fit radius and center to global scale fit_R = (((points - fit_C)**2).sum(axis=1).max() ** .5) * points_scale fit_C = (fit_C * points_scale) + points_origin if fit_E < 1e-6: log.debug('Points were on an n-sphere, returning fit') return fit_C, fit_R # calculate a furthest site voronoi diagram # this will fail if the points are ALL on the surface of # the n-sphere but hopefully the least squares check caught those cases # , qhull_options='QbB Pp') voronoi = spatial.Voronoi(points, furthest_site=True) # find the maximum radius^2 point for each of the voronoi vertices # this is worst case quite expensive but we have taken # convex hull to reduce n for this operation # we are doing comparisons on the radius squared then rooting once try: # cdist is massivly faster than looping or tiling methods # although it does create a very large intermediate array # first, get an order of magnitude memory size estimate # a float64 would be 8 bytes per entry plus overhead memory_estimate = len(voronoi.vertices) * len(points) * 9 if memory_estimate > _MAX_MEMORY(): raise MemoryError radii_2 = spatial.distance.cdist( voronoi.vertices, points, metric='sqeuclidean').max(axis=1) except MemoryError: # log the MemoryError log.warning('MemoryError: falling back to slower check!') # fall back to a potentially very slow list comprehension radii_2 = np.array([((points - v) ** 2).sum(axis=1).max() for v in voronoi.vertices]) # we want the smallest sphere so take the min of the radii radii_idx = radii_2.argmin() # return voronoi radius and center to global scale radius_v = np.sqrt(radii_2[radii_idx]) * points_scale center_v = (voronoi.vertices[radii_idx] * points_scale) + points_origin if radius_v > fit_R: return fit_C, fit_R return center_v, radius_v
python
{ "resource": "" }
q23245
fit_nsphere
train
def fit_nsphere(points, prior=None): """ Fit an n-sphere to a set of points using least squares. Parameters --------- points : (n, d) float Points in space prior : (d,) float Best guess for center of nsphere Returns --------- center : (d,) float Location of center radius : float Mean radius across circle error : float Peak to peak value of deviation from mean radius """ # make sure points are numpy array points = np.asanyarray(points, dtype=np.float64) # create ones so we can dot instead of using slower sum ones = np.ones(points.shape[1]) def residuals(center): # do the axis sum with a dot # this gets called a LOT so worth optimizing radii_sq = np.dot((points - center) ** 2, ones) # residuals are difference between mean # use our sum mean vs .mean() as it is slightly faster return radii_sq - (radii_sq.sum() / len(radii_sq)) if prior is None: guess = points.mean(axis=0) else: guess = np.asanyarray(prior) center_result, return_code = leastsq(residuals, guess, xtol=1e-8) if not (return_code in [1, 2, 3, 4]): raise ValueError('Least square fit failed!') radii = np.linalg.norm(points - center_result, axis=1) radius = radii.mean() error = radii.ptp() return center_result, radius, error
python
{ "resource": "" }
q23246
is_nsphere
train
def is_nsphere(points): """ Check if a list of points is an nsphere. Parameters ----------- points : (n, dimension) float Points in space Returns ----------- check : bool True if input points are on an nsphere """ center, radius, error = fit_nsphere(points) check = error < tol.merge return check
python
{ "resource": "" }
q23247
contains_points
train
def contains_points(intersector, points, check_direction=None): """ Check if a mesh contains a set of points, using ray tests. If the point is on the surface of the mesh, behavior is undefined. Parameters --------- mesh: Trimesh object points: (n,3) points in space Returns --------- contains : (n) bool Whether point is inside mesh or not """ # convert points to float and make sure they are 3D points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError('points must be (n,3)') # placeholder result with no hits we'll fill in later contains = np.zeros(len(points), dtype=np.bool) # cull points outside of the axis aligned bounding box # this avoids running ray tests unless points are close inside_aabb = bounds.contains(intersector.mesh.bounds, points) # if everything is outside the AABB, exit early if not inside_aabb.any(): return contains # default ray direction is random, but we are not generating # uniquely each time so the behavior of this function is easier to debug default_direction = np.array([0.4395064455, 0.617598629942, 0.652231566745]) if check_direction is None: # if no check direction is specified use the default # stack it only for points inside the AABB ray_directions = np.tile(default_direction, (inside_aabb.sum(), 1)) else: # if a direction is passed use it ray_directions = np.tile( np.array(check_direction).reshape(3), (inside_aabb.sum(), 1)) # cast a ray both forwards and backwards location, index_ray, c = intersector.intersects_location( np.vstack( (points[inside_aabb], points[inside_aabb])), np.vstack( (ray_directions, -ray_directions))) # if we hit nothing in either direction just return with no hits if len(index_ray) == 0: return contains # reshape so bi_hits[0] is the result in the forward direction and # bi_hits[1] is the result in the backwards directions bi_hits = np.bincount( index_ray, minlength=len(ray_directions) * 2).reshape((2, -1)) # a point is probably inside if it hits a surface an odd number of times bi_contains = np.mod(bi_hits, 2) == 1 # if the mod of the hit count is the same in both # directions, we can save that result and move on agree = np.equal(*bi_contains) # in order to do an assignment we can only have one # level of boolean indexes, for example this doesn't work: # contains[inside_aabb][agree] = bi_contains[0][agree] # no error is thrown, but nothing gets assigned # to get around that, we create a single mask for assignment mask = inside_aabb.copy() mask[mask] = agree # set contains flags for things inside the AABB and who have # ray tests that agree in both directions contains[mask] = bi_contains[0][agree] # if one of the rays in either direction hit nothing # it is a very solid indicator we are in free space # as the edge cases we are working around tend to # add hits rather than miss hits one_freespace = (bi_hits == 0).any(axis=0) # rays where they don't agree and one isn't in free space # are deemed to be broken broken = np.logical_and(np.logical_not(agree), np.logical_not(one_freespace)) # if all rays agree return if not broken.any(): return contains # try to run again with a new random vector # only do it if check_direction isn't specified # to avoid infinite recursion if check_direction is None: # we're going to run the check again in a random direction new_direction = util.unitize(np.random.random(3) - .5) # do the mask trick again to be able to assign results mask = inside_aabb.copy() mask[mask] = broken contains[mask] = contains_points( intersector, points[inside_aabb][broken], check_direction=new_direction) constants.log.debug( 'detected %d broken contains test, attempted to fix', broken.sum()) return contains
python
{ "resource": "" }
q23248
mesh_to_BVH
train
def mesh_to_BVH(mesh): """ Create a BVHModel object from a Trimesh object Parameters ----------- mesh : Trimesh Input geometry Returns ------------ bvh : fcl.BVHModel BVH of input geometry """ bvh = fcl.BVHModel() bvh.beginModel(num_tris_=len(mesh.faces), num_vertices_=len(mesh.vertices)) bvh.addSubModel(verts=mesh.vertices, triangles=mesh.faces) bvh.endModel() return bvh
python
{ "resource": "" }
q23249
scene_to_collision
train
def scene_to_collision(scene): """ Create collision objects from a trimesh.Scene object. Parameters ------------ scene : trimesh.Scene Scene to create collision objects for Returns ------------ manager : CollisionManager CollisionManager for objects in scene objects: {node name: CollisionObject} Collision objects for nodes in scene """ manager = CollisionManager() objects = {} for node in scene.graph.nodes_geometry: T, geometry = scene.graph[node] objects[node] = manager.add_object(name=node, mesh=scene.geometry[geometry], transform=T) return manager, objects
python
{ "resource": "" }
q23250
CollisionManager.add_object
train
def add_object(self, name, mesh, transform=None): """ Add an object to the collision manager. If an object with the given name is already in the manager, replace it. Parameters ---------- name : str An identifier for the object mesh : Trimesh object The geometry of the collision object transform : (4,4) float Homogenous transform matrix for the object """ # if no transform passed, assume identity transform if transform is None: transform = np.eye(4) transform = np.asanyarray(transform, dtype=np.float32) if transform.shape != (4, 4): raise ValueError('transform must be (4,4)!') # create or recall from cache BVH bvh = self._get_BVH(mesh) # create the FCL transform from (4,4) matrix t = fcl.Transform(transform[:3, :3], transform[:3, 3]) o = fcl.CollisionObject(bvh, t) # Add collision object to set if name in self._objs: self._manager.unregisterObject(self._objs[name]) self._objs[name] = {'obj': o, 'geom': bvh} # store the name of the geometry self._names[id(bvh)] = name self._manager.registerObject(o) self._manager.update() return o
python
{ "resource": "" }
q23251
CollisionManager.remove_object
train
def remove_object(self, name): """ Delete an object from the collision manager. Parameters ---------- name : str The identifier for the object """ if name in self._objs: self._manager.unregisterObject(self._objs[name]['obj']) self._manager.update(self._objs[name]['obj']) # remove objects from _objs geom_id = id(self._objs.pop(name)['geom']) # remove names self._names.pop(geom_id) else: raise ValueError('{} not in collision manager!'.format(name))
python
{ "resource": "" }
q23252
CollisionManager.set_transform
train
def set_transform(self, name, transform): """ Set the transform for one of the manager's objects. This replaces the prior transform. Parameters ---------- name : str An identifier for the object already in the manager transform : (4,4) float A new homogenous transform matrix for the object """ if name in self._objs: o = self._objs[name]['obj'] o.setRotation(transform[:3, :3]) o.setTranslation(transform[:3, 3]) self._manager.update(o) else: raise ValueError('{} not in collision manager!'.format(name))
python
{ "resource": "" }
q23253
CollisionManager.in_collision_single
train
def in_collision_single(self, mesh, transform=None, return_names=False, return_data=False): """ Check a single object for collisions against all objects in the manager. Parameters ---------- mesh : Trimesh object The geometry of the collision object transform : (4,4) float Homogenous transform matrix return_names : bool If true, a set is returned containing the names of all objects in collision with the object return_data : bool If true, a list of ContactData is returned as well Returns ------------ is_collision : bool True if a collision occurs and False otherwise names : set of str The set of names of objects that collided with the provided one contacts : list of ContactData All contacts detected """ if transform is None: transform = np.eye(4) # Create FCL data b = self._get_BVH(mesh) t = fcl.Transform(transform[:3, :3], transform[:3, 3]) o = fcl.CollisionObject(b, t) # Collide with manager's objects cdata = fcl.CollisionData() if return_names or return_data: cdata = fcl.CollisionData(request=fcl.CollisionRequest( num_max_contacts=100000, enable_contact=True)) self._manager.collide(o, cdata, fcl.defaultCollisionCallback) result = cdata.result.is_collision # If we want to return the objects that were collision, collect them. objs_in_collision = set() contact_data = [] if return_names or return_data: for contact in cdata.result.contacts: cg = contact.o1 if cg == b: cg = contact.o2 name = self._extract_name(cg) names = (name, '__external') if cg == contact.o2: names = reversed(names) if return_names: objs_in_collision.add(name) if return_data: contact_data.append(ContactData(names, contact)) if return_names and return_data: return result, objs_in_collision, contact_data elif return_names: return result, objs_in_collision elif return_data: return result, contact_data else: return result
python
{ "resource": "" }
q23254
CollisionManager.in_collision_other
train
def in_collision_other(self, other_manager, return_names=False, return_data=False): """ Check if any object from this manager collides with any object from another manager. Parameters ------------------- other_manager : CollisionManager Another collision manager object return_names : bool If true, a set is returned containing the names of all pairs of objects in collision. return_data : bool If true, a list of ContactData is returned as well Returns ------------- is_collision : bool True if a collision occurred between any pair of objects and False otherwise names : set of 2-tup The set of pairwise collisions. Each tuple contains two names (first from this manager, second from the other_manager) indicating that the two corresponding objects are in collision. contacts : list of ContactData All contacts detected """ cdata = fcl.CollisionData() if return_names or return_data: cdata = fcl.CollisionData( request=fcl.CollisionRequest( num_max_contacts=100000, enable_contact=True)) self._manager.collide(other_manager._manager, cdata, fcl.defaultCollisionCallback) result = cdata.result.is_collision objs_in_collision = set() contact_data = [] if return_names or return_data: for contact in cdata.result.contacts: reverse = False names = (self._extract_name(contact.o1), other_manager._extract_name(contact.o2)) if names[0] is None: names = (self._extract_name(contact.o2), other_manager._extract_name(contact.o1)) reverse = True if return_names: objs_in_collision.add(names) if return_data: if reverse: names = reversed(names) contact_data.append(ContactData(names, contact)) if return_names and return_data: return result, objs_in_collision, contact_data elif return_names: return result, objs_in_collision elif return_data: return result, contact_data else: return result
python
{ "resource": "" }
q23255
CollisionManager.min_distance_single
train
def min_distance_single(self, mesh, transform=None, return_name=False, return_data=False): """ Get the minimum distance between a single object and any object in the manager. Parameters --------------- mesh : Trimesh object The geometry of the collision object transform : (4,4) float Homogenous transform matrix for the object return_names : bool If true, return name of the closest object return_data : bool If true, a DistanceData object is returned as well Returns ------------- distance : float Min distance between mesh and any object in the manager name : str The name of the object in the manager that was closest data : DistanceData Extra data about the distance query """ if transform is None: transform = np.eye(4) # Create FCL data b = self._get_BVH(mesh) t = fcl.Transform(transform[:3, :3], transform[:3, 3]) o = fcl.CollisionObject(b, t) # Collide with manager's objects ddata = fcl.DistanceData() if return_data: ddata = fcl.DistanceData( fcl.DistanceRequest(enable_nearest_points=True), fcl.DistanceResult() ) self._manager.distance(o, ddata, fcl.defaultDistanceCallback) distance = ddata.result.min_distance # If we want to return the objects that were collision, collect them. name, data = None, None if return_name or return_data: cg = ddata.result.o1 if cg == b: cg = ddata.result.o2 name = self._extract_name(cg) names = (name, '__external') if cg == ddata.result.o2: names = reversed(names) data = DistanceData(names, ddata.result) if return_name and return_data: return distance, name, data elif return_name: return distance, name elif return_data: return distance, data else: return distance
python
{ "resource": "" }
q23256
CollisionManager.min_distance_internal
train
def min_distance_internal(self, return_names=False, return_data=False): """ Get the minimum distance between any pair of objects in the manager. Parameters ------------- return_names : bool If true, a 2-tuple is returned containing the names of the closest objects. return_data : bool If true, a DistanceData object is returned as well Returns ----------- distance : float Min distance between any two managed objects names : (2,) str The names of the closest objects data : DistanceData Extra data about the distance query """ ddata = fcl.DistanceData() if return_data: ddata = fcl.DistanceData( fcl.DistanceRequest(enable_nearest_points=True), fcl.DistanceResult() ) self._manager.distance(ddata, fcl.defaultDistanceCallback) distance = ddata.result.min_distance names, data = None, None if return_names or return_data: names = (self._extract_name(ddata.result.o1), self._extract_name(ddata.result.o2)) data = DistanceData(names, ddata.result) names = tuple(sorted(names)) if return_names and return_data: return distance, names, data elif return_names: return distance, names elif return_data: return distance, data else: return distance
python
{ "resource": "" }
q23257
CollisionManager.min_distance_other
train
def min_distance_other(self, other_manager, return_names=False, return_data=False): """ Get the minimum distance between any pair of objects, one in each manager. Parameters ---------- other_manager : CollisionManager Another collision manager object return_names : bool If true, a 2-tuple is returned containing the names of the closest objects. return_data : bool If true, a DistanceData object is returned as well Returns ----------- distance : float The min distance between a pair of objects, one from each manager. names : 2-tup of str A 2-tuple containing two names (first from this manager, second from the other_manager) indicating the two closest objects. data : DistanceData Extra data about the distance query """ ddata = fcl.DistanceData() if return_data: ddata = fcl.DistanceData( fcl.DistanceRequest(enable_nearest_points=True), fcl.DistanceResult() ) self._manager.distance(other_manager._manager, ddata, fcl.defaultDistanceCallback) distance = ddata.result.min_distance names, data = None, None if return_names or return_data: reverse = False names = (self._extract_name(ddata.result.o1), other_manager._extract_name(ddata.result.o2)) if names[0] is None: reverse = True names = (self._extract_name(ddata.result.o2), other_manager._extract_name(ddata.result.o1)) dnames = tuple(names) if reverse: dnames = reversed(dnames) data = DistanceData(dnames, ddata.result) if return_names and return_data: return distance, names, data elif return_names: return distance, names elif return_data: return distance, data else: return distance
python
{ "resource": "" }
q23258
cylinder_inertia
train
def cylinder_inertia(mass, radius, height, transform=None): """ Return the inertia tensor of a cylinder. Parameters ------------ mass : float Mass of cylinder radius : float Radius of cylinder height : float Height of cylinder transform : (4,4) float Transformation of cylinder Returns ------------ inertia : (3,3) float Inertia tensor """ h2, r2 = height ** 2, radius ** 2 diagonal = np.array([((mass * h2) / 12) + ((mass * r2) / 4), ((mass * h2) / 12) + ((mass * r2) / 4), (mass * r2) / 2]) inertia = diagonal * np.eye(3) if transform is not None: inertia = transform_inertia(transform, inertia) return inertia
python
{ "resource": "" }
q23259
principal_axis
train
def principal_axis(inertia): """ Find the principal components and principal axis of inertia from the inertia tensor. Parameters ------------ inertia : (3,3) float Inertia tensor Returns ------------ components : (3,) float Principal components of inertia vectors : (3,3) float Row vectors pointing along the principal axes of inertia """ inertia = np.asanyarray(inertia, dtype=np.float64) if inertia.shape != (3, 3): raise ValueError('inertia tensor must be (3,3)!') # you could any of the following to calculate this: # np.linalg.svd, np.linalg.eig, np.linalg.eigh # moment of inertia is square symmetric matrix # eigh has the best numeric precision in tests components, vectors = np.linalg.eigh(inertia * negate_nondiagonal) # eigh returns them as column vectors, change them to row vectors vectors = vectors.T return components, vectors
python
{ "resource": "" }
q23260
transform_inertia
train
def transform_inertia(transform, inertia_tensor): """ Transform an inertia tensor to a new frame. More details in OCW PDF: MIT16_07F09_Lec26.pdf Parameters ------------ transform : (3, 3) or (4, 4) float Transformation matrix inertia_tensor : (3, 3) float Inertia tensor Returns ------------ transformed : (3, 3) float Inertia tensor in new frame """ # check inputs and extract rotation transform = np.asanyarray(transform, dtype=np.float64) if transform.shape == (4, 4): rotation = transform[:3, :3] elif transform.shape == (3, 3): rotation = transform else: raise ValueError('transform must be (3,3) or (4,4)!') inertia_tensor = np.asanyarray(inertia_tensor, dtype=np.float64) if inertia_tensor.shape != (3, 3): raise ValueError('inertia_tensor must be (3,3)!') transformed = util.multi_dot([rotation, inertia_tensor * negate_nondiagonal, rotation.T]) transformed *= negate_nondiagonal return transformed
python
{ "resource": "" }
q23261
autolight
train
def autolight(scene): """ Generate a list of lights for a scene that looks decent. Parameters -------------- scene : trimesh.Scene Scene with geometry Returns -------------- lights : [Light] List of light objects transforms : (len(lights), 4, 4) float Transformation matrices for light positions. """ # create two default point lights lights = [PointLight(), PointLight()] # create two translation matrices for bounds corners transforms = [transformations.translation_matrix(b) for b in scene.bounds] return lights, transforms
python
{ "resource": "" }
q23262
tracked_array
train
def tracked_array(array, dtype=None): """ Properly subclass a numpy ndarray to track changes. Avoids some pitfalls of subclassing by forcing contiguous arrays, and does a view into a TrackedArray. Parameters ------------ array : array- like object To be turned into a TrackedArray dtype : np.dtype Which dtype to use for the array Returns ------------ tracked : TrackedArray Contains input array data """ # if someone passed us None, just create an empty array if array is None: array = [] # make sure it is contiguous then view it as our subclass tracked = np.ascontiguousarray( array, dtype=dtype).view(TrackedArray) # should always be contiguous here assert tracked.flags['C_CONTIGUOUS'] return tracked
python
{ "resource": "" }
q23263
TrackedArray.md5
train
def md5(self): """ Return an MD5 hash of the current array. Returns ----------- md5: str, hexadecimal MD5 of the array """ if self._modified_m or not hasattr(self, '_hashed_md5'): if self.flags['C_CONTIGUOUS']: hasher = hashlib.md5(self) self._hashed_md5 = hasher.hexdigest() else: # the case where we have sliced our nice # contiguous array into a non- contiguous block # for example (note slice *after* track operation): # t = util.tracked_array(np.random.random(10))[::-1] contiguous = np.ascontiguousarray(self) hasher = hashlib.md5(contiguous) self._hashed_md5 = hasher.hexdigest() self._modified_m = False return self._hashed_md5
python
{ "resource": "" }
q23264
TrackedArray.crc
train
def crc(self): """ A zlib.crc32 or zlib.adler32 checksum of the current data. Returns ----------- crc: int, checksum from zlib.crc32 or zlib.adler32 """ if self._modified_c or not hasattr(self, '_hashed_crc'): if self.flags['C_CONTIGUOUS']: self._hashed_crc = crc32(self) else: # the case where we have sliced our nice # contiguous array into a non- contiguous block # for example (note slice *after* track operation): # t = util.tracked_array(np.random.random(10))[::-1] contiguous = np.ascontiguousarray(self) self._hashed_crc = crc32(contiguous) self._modified_c = False return self._hashed_crc
python
{ "resource": "" }
q23265
TrackedArray._xxhash
train
def _xxhash(self): """ An xxhash.b64 hash of the array. Returns ------------- xx: int, xxhash.xxh64 hash of array. """ # repeat the bookkeeping to get a contiguous array inside # the function to avoid additional function calls # these functions are called millions of times so everything helps if self._modified_x or not hasattr(self, '_hashed_xx'): if self.flags['C_CONTIGUOUS']: hasher = xxhash.xxh64(self) self._hashed_xx = hasher.intdigest() else: # the case where we have sliced our nice # contiguous array into a non- contiguous block # for example (note slice *after* track operation): # t = util.tracked_array(np.random.random(10))[::-1] contiguous = np.ascontiguousarray(self) hasher = xxhash.xxh64(contiguous) self._hashed_xx = hasher.intdigest() self._modified_x = False return self._hashed_xx
python
{ "resource": "" }
q23266
Cache.verify
train
def verify(self): """ Verify that the cached values are still for the same value of id_function and delete all stored items if the value of id_function has changed. """ # if we are in a lock don't check anything if self._lock != 0: return # check the hash of our data id_new = self._id_function() # things changed if id_new != self.id_current: if len(self.cache) > 0: log.debug('%d items cleared from cache: %s', len(self.cache), str(list(self.cache.keys()))) # hash changed, so dump the cache # do it manually rather than calling clear() # as we are internal logic and can avoid function calls self.cache = {} # set the id to the new data hash self.id_current = id_new
python
{ "resource": "" }
q23267
Cache.clear
train
def clear(self, exclude=None): """ Remove all elements in the cache. """ if exclude is None: self.cache = {} else: self.cache = {k: v for k, v in self.cache.items() if k in exclude}
python
{ "resource": "" }
q23268
DataStore.is_empty
train
def is_empty(self): """ Is the current DataStore empty or not. Returns ---------- empty: bool, False if there are items in the DataStore """ if len(self.data) == 0: return True for v in self.data.values(): if is_sequence(v): if len(v) == 0: return True else: return False elif bool(np.isreal(v)): return False return True
python
{ "resource": "" }
q23269
DataStore.md5
train
def md5(self): """ Get an MD5 reflecting everything in the DataStore. Returns ---------- md5: str, MD5 in hexadecimal """ hasher = hashlib.md5() for key in sorted(self.data.keys()): hasher.update(self.data[key].md5().encode('utf-8')) md5 = hasher.hexdigest() return md5
python
{ "resource": "" }
q23270
DataStore.crc
train
def crc(self): """ Get a CRC reflecting everything in the DataStore. Returns ---------- crc: int, CRC of data """ crc = sum(i.crc() for i in self.data.values()) return crc
python
{ "resource": "" }
q23271
DataStore.fast_hash
train
def fast_hash(self): """ Get a CRC32 or xxhash.xxh64 reflecting the DataStore. Returns ------------ hashed: int, checksum of data """ fast = sum(i.fast_hash() for i in self.data.values()) return fast
python
{ "resource": "" }
q23272
identifier_simple
train
def identifier_simple(mesh): """ Return a basic identifier for a mesh, consisting of properties that have been hand tuned to be somewhat robust to rigid transformations and different tesselations. Parameters ---------- mesh : Trimesh object Source geometry Returns ---------- identifier : (6,) float Identifying values of the mesh """ # verify the cache once mesh._cache.verify() # don't check hashes during identifier as we aren't # changing any data values of the mesh inside block # if we did change values in cache block things would break with mesh._cache: # pre-allocate identifier so indexes of values can't move around # like they might if we used hstack or something else identifier = np.zeros(6, dtype=np.float64) # avoid thrashing the cache unnecessarily mesh_area = mesh.area # start with properties that are valid regardless of watertightness # note that we're going to try to make all parameters relative # to area so other values don't get blown up at weird scales identifier[0] = mesh_area # topological constant and the only thing we can really # trust in this fallen world identifier[1] = mesh.euler_number # if we have a watertight mesh include volume and inertia if mesh.is_volume: # side length of a cube ratio # 1.0 for cubes, different values for other things identifier[2] = (((mesh_area / 6.0) ** (1.0 / 2.0)) / (mesh.volume ** (1.0 / 3.0))) # save vertices for radius calculation vertices = mesh.vertices - mesh.center_mass # we are going to special case radially symmetric meshes # to replace their surface area with ratio of their # surface area to a primitive sphere or cylinder surface area # this is because tessellated curved surfaces are really rough # to reliably hash as they are very sensitive to floating point # and tessellation error. By making area proportionate to a fit # primitive area we are able to reliably hash at more sigfigs if mesh.symmetry == 'radial': # cylinder height h = np.dot(vertices, mesh.symmetry_axis).ptp() # section radius R2 = (np.dot(vertices, mesh.symmetry_section.T) ** 2).sum(axis=1).max() # area of a cylinder primitive area = (2 * np.pi * (R2**.5) * h) + (2 * np.pi * R2) # replace area in this case with area ratio identifier[0] = mesh_area / area elif mesh.symmetry == 'spherical': # handle a spherically symmetric mesh R2 = (vertices ** 2).sum(axis=1).max() area = 4 * np.pi * R2 identifier[0] = mesh_area / area else: # if we don't have a watertight mesh add information about the # convex hull, which is slow to compute and unreliable # just what we're looking for in a hash but hey identifier[3] = mesh_area / mesh.convex_hull.area # cube side length ratio for the hull identifier[4] = (((mesh.convex_hull.area / 6.0) ** (1.0 / 2.0)) / (mesh.convex_hull.volume ** (1.0 / 3.0))) vertices = mesh.vertices - mesh.centroid # add in max radius^2 to area ratio R2 = (vertices ** 2).sum(axis=1).max() identifier[5] = R2 / mesh_area return identifier
python
{ "resource": "" }
q23273
identifier_hash
train
def identifier_hash(identifier, sigfig=None): """ Hash an identifier array to a specified number of significant figures. Parameters ---------- identifier : (n,) float Vector of properties sigfig : (n,) int Number of sigfigs per property Returns ---------- md5 : str MD5 hash of identifier """ if sigfig is None: sigfig = id_sigfig # convert identifier to integers and order of magnitude as_int, multiplier = util.sigfig_int(identifier, sigfig) # make all scales positive if (multiplier < 0).any(): multiplier += np.abs(multiplier.min()) hashable = (as_int * (10 ** multiplier)).astype(np.int64) md5 = util.md5_object(hashable) return md5
python
{ "resource": "" }
q23274
convert_to_vertexlist
train
def convert_to_vertexlist(geometry, **kwargs): """ Try to convert various geometry objects to the constructor args for a pyglet indexed vertex list. Parameters ------------ obj : Trimesh, Path2D, Path3D, (n,2) float, (n,3) float Object to render Returns ------------ args : tuple Args to be passed to pyglet indexed vertex list constructor. """ if util.is_instance_named(geometry, 'Trimesh'): return mesh_to_vertexlist(geometry, **kwargs) elif util.is_instance_named(geometry, 'Path'): # works for Path3D and Path2D # both of which inherit from Path return path_to_vertexlist(geometry, **kwargs) elif util.is_instance_named(geometry, 'PointCloud'): # pointcloud objects contain colors return points_to_vertexlist(geometry.vertices, colors=geometry.colors, **kwargs) elif util.is_instance_named(geometry, 'ndarray'): # (n,2) or (n,3) points return points_to_vertexlist(geometry, **kwargs) else: raise ValueError('Geometry passed is not a viewable type!')
python
{ "resource": "" }
q23275
mesh_to_vertexlist
train
def mesh_to_vertexlist(mesh, group=None, smooth=True, smooth_threshold=60000): """ Convert a Trimesh object to arguments for an indexed vertex list constructor. Parameters ------------- mesh : trimesh.Trimesh Mesh to be rendered group : str Rendering group for the vertex list smooth : bool Should we try to smooth shade the mesh smooth_threshold : int Maximum number of faces to smooth shade Returns -------------- args : (7,) tuple Args for vertex list constructor """ if hasattr(mesh.visual, 'uv') and mesh.visual.uv is not None: # if the mesh has texture defined pass it to pyglet vertex_count = len(mesh.vertices) normals = mesh.vertex_normals.reshape(-1).tolist() faces = mesh.faces.reshape(-1).tolist() vertices = mesh.vertices.reshape(-1).tolist() # get the per- vertex UV coordinates uv = mesh.visual.uv # if someone passed (n, 3) UVR cut it off here if uv.shape[1] > 2: uv = uv[:, :2] # texcoord as (2,) float color_gl = ('t2f/static', uv.astype(np.float64).reshape(-1).tolist()) elif smooth and len(mesh.faces) < smooth_threshold: # if we have a small number of faces and colors defined # smooth the mesh by merging vertices of faces below # the threshold angle mesh = mesh.smoothed() vertex_count = len(mesh.vertices) normals = mesh.vertex_normals.reshape(-1).tolist() faces = mesh.faces.reshape(-1).tolist() vertices = mesh.vertices.reshape(-1).tolist() color_gl = colors_to_gl(mesh.visual.vertex_colors, vertex_count) else: # we don't have textures or want to smooth so # send a polygon soup of disconnected triangles to opengl vertex_count = len(mesh.triangles) * 3 normals = np.tile(mesh.face_normals, (1, 3)).reshape(-1).tolist() vertices = mesh.triangles.reshape(-1).tolist() faces = np.arange(vertex_count).tolist() colors = np.tile(mesh.visual.face_colors, (1, 3)).reshape((-1, 4)) color_gl = colors_to_gl(colors, vertex_count) # create the ordered tuple for pyglet, use like: # `batch.add_indexed(*args)` args = (vertex_count, # number of vertices GL_TRIANGLES, # mode group, # group faces, # indices ('v3f/static', vertices), ('n3f/static', normals), color_gl) return args
python
{ "resource": "" }
q23276
path_to_vertexlist
train
def path_to_vertexlist(path, group=None, colors=None, **kwargs): """ Convert a Path3D object to arguments for an indexed vertex list constructor. Parameters ------------- path : trimesh.path.Path3D object Mesh to be rendered group : str Rendering group for the vertex list Returns -------------- args : (7,) tuple Args for vertex list constructor """ # avoid cache check inside tight loop vertices = path.vertices # get (n, 2, (2|3)) lines lines = np.vstack([util.stack_lines(e.discrete(vertices)) for e in path.entities]) count = len(lines) # stack zeros for 2D lines if util.is_shape(vertices, (-1, 2)): lines = lines.reshape((-1, 2)) lines = np.column_stack((lines, np.zeros(len(lines)))) # index for GL is one per point index = np.arange(count).tolist() args = (count, # number of lines GL_LINES, # mode group, # group index, # indices ('v3f/static', lines.reshape(-1)), colors_to_gl(colors, count=count)) # default colors return args
python
{ "resource": "" }
q23277
points_to_vertexlist
train
def points_to_vertexlist(points, colors=None, group=None, **kwargs): """ Convert a numpy array of 3D points to args for a vertex list constructor. Parameters ------------- points : (n, 3) float Points to be rendered colors : (n, 3) or (n, 4) float Colors for each point group : str Rendering group for the vertex list Returns -------------- args : (7,) tuple Args for vertex list constructor """ points = np.asanyarray(points, dtype=np.float64) if util.is_shape(points, (-1, 2)): points = np.column_stack((points, np.zeros(len(points)))) elif not util.is_shape(points, (-1, 3)): raise ValueError('Pointcloud must be (n,3)!') index = np.arange(len(points)).tolist() args = (len(points), # number of vertices GL_POINTS, # mode group, # group index, # indices ('v3f/static', points.reshape(-1)), colors_to_gl(colors, len(points))) return args
python
{ "resource": "" }
q23278
material_to_texture
train
def material_to_texture(material): """ Convert a trimesh.visual.texture.Material object into a pyglet- compatible texture object. Parameters -------------- material : trimesh.visual.texture.Material Material to be converted Returns --------------- texture : pyglet.image.Texture Texture loaded into pyglet form """ # try to extract a PIL image from material if hasattr(material, 'image'): img = material.image else: img = material.baseColorTexture if img is None: return None # use a PNG export to exchange into pyglet # probably a way to do this with a PIL converter with util.BytesIO() as f: # export PIL image as PNG img.save(f, format='png') f.seek(0) # filename used for format guess gl_image = pyglet.image.load(filename='.png', file=f) # turn image into pyglet texture texture = gl_image.get_texture() return texture
python
{ "resource": "" }
q23279
matrix_to_gl
train
def matrix_to_gl(matrix): """ Convert a numpy row- major homogenous transformation matrix to a flat column- major GLfloat transformation. Parameters ------------- matrix : (4,4) float Row- major homogenous transform Returns ------------- glmatrix : (16,) gl.GLfloat Transform in pyglet format """ matrix = np.asanyarray(matrix, dtype=np.float64) if matrix.shape != (4, 4): raise ValueError('matrix must be (4,4)!') # switch to column major and flatten to (16,) column = matrix.T.flatten() # convert to GLfloat glmatrix = (gl.GLfloat * 16)(*column) return glmatrix
python
{ "resource": "" }
q23280
vector_to_gl
train
def vector_to_gl(array, *args): """ Convert an array and an optional set of args into a flat vector of gl.GLfloat """ array = np.array(array) if len(args) > 0: array = np.append(array, args) vector = (gl.GLfloat * len(array))(*array) return vector
python
{ "resource": "" }
q23281
light_to_gl
train
def light_to_gl(light, transform, lightN): """ Convert trimesh.scene.lighting.Light objects into args for gl.glLightFv calls Parameters -------------- light : trimesh.scene.lighting.Light Light object to be converted to GL transform : (4, 4) float Transformation matrix of light lightN : int Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc Returns -------------- multiarg : [tuple] List of args to pass to gl.glLightFv eg: [gl.glLightfb(*a) for a in multiarg] """ # convert color to opengl gl_color = vector_to_gl(light.color.astype(np.float64) / 255.0) assert len(gl_color) == 4 # cartesian translation from matrix gl_position = vector_to_gl(transform[:3, 3]) # create the different position and color arguments args = [(lightN, gl.GL_POSITION, gl_position), (lightN, gl.GL_SPECULAR, gl_color), (lightN, gl.GL_DIFFUSE, gl_color), (lightN, gl.GL_AMBIENT, gl_color)] return args
python
{ "resource": "" }
q23282
Trackball.down
train
def down(self, point): """Record an initial mouse press at a given point. Parameters ---------- point : (2,) int The x and y pixel coordinates of the mouse press. """ self._pdown = np.array(point, dtype=np.float32) self._pose = self._n_pose self._target = self._n_target
python
{ "resource": "" }
q23283
Trackball.drag
train
def drag(self, point): """Update the tracball during a drag. Parameters ---------- point : (2,) int The current x and y pixel coordinates of the mouse during a drag. This will compute a movement for the trackball with the relative motion between this point and the one marked by down(). """ point = np.array(point, dtype=np.float32) dx, dy = point - self._pdown mindim = 0.3 * np.min(self._size) target = self._target x_axis = self._pose[:3, 0].flatten() y_axis = self._pose[:3, 1].flatten() z_axis = self._pose[:3, 2].flatten() eye = self._pose[:3, 3].flatten() # Interpret drag as a rotation if self._state == Trackball.STATE_ROTATE: x_angle = -dx / mindim x_rot_mat = transformations.rotation_matrix( x_angle, y_axis, target ) y_angle = dy / mindim y_rot_mat = transformations.rotation_matrix( y_angle, x_axis, target ) self._n_pose = y_rot_mat.dot(x_rot_mat.dot(self._pose)) # Interpret drag as a roll about the camera axis elif self._state == Trackball.STATE_ROLL: center = self._size / 2.0 v_init = self._pdown - center v_curr = point - center v_init = v_init / np.linalg.norm(v_init) v_curr = v_curr / np.linalg.norm(v_curr) theta = (-np.arctan2(v_curr[1], v_curr[0]) + np.arctan2(v_init[1], v_init[0])) rot_mat = transformations.rotation_matrix(theta, z_axis, target) self._n_pose = rot_mat.dot(self._pose) # Interpret drag as a camera pan in view plane elif self._state == Trackball.STATE_PAN: dx = -dx / (5.0 * mindim) * self._scale dy = -dy / (5.0 * mindim) * self._scale translation = dx * x_axis + dy * y_axis self._n_target = self._target + translation t_tf = np.eye(4) t_tf[:3, 3] = translation self._n_pose = t_tf.dot(self._pose) # Interpret drag as a zoom motion elif self._state == Trackball.STATE_ZOOM: radius = np.linalg.norm(eye - target) ratio = 0.0 if dy > 0: ratio = np.exp(abs(dy) / (0.5 * self._size[1])) - 1.0 elif dy < 0: ratio = 1.0 - np.exp(dy / (0.5 * (self._size[1]))) translation = -np.sign(dy) * ratio * radius * z_axis t_tf = np.eye(4) t_tf[:3, 3] = translation self._n_pose = t_tf.dot(self._pose)
python
{ "resource": "" }
q23284
Trackball.scroll
train
def scroll(self, clicks): """Zoom using a mouse scroll wheel motion. Parameters ---------- clicks : int The number of clicks. Positive numbers indicate forward wheel movement. """ target = self._target ratio = 0.90 mult = 1.0 if clicks > 0: mult = ratio**clicks elif clicks < 0: mult = (1.0 / ratio)**abs(clicks) z_axis = self._n_pose[:3, 2].flatten() eye = self._n_pose[:3, 3].flatten() radius = np.linalg.norm(eye - target) translation = (mult * radius - radius) * z_axis t_tf = np.eye(4) t_tf[:3, 3] = translation self._n_pose = t_tf.dot(self._n_pose) z_axis = self._pose[:3, 2].flatten() eye = self._pose[:3, 3].flatten() radius = np.linalg.norm(eye - target) translation = (mult * radius - radius) * z_axis t_tf = np.eye(4) t_tf[:3, 3] = translation self._pose = t_tf.dot(self._pose)
python
{ "resource": "" }
q23285
Trackball.rotate
train
def rotate(self, azimuth, axis=None): """Rotate the trackball about the "Up" axis by azimuth radians. Parameters ---------- azimuth : float The number of radians to rotate. """ target = self._target y_axis = self._n_pose[:3, 1].flatten() if axis is not None: y_axis = axis x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target) self._n_pose = x_rot_mat.dot(self._n_pose) y_axis = self._pose[:3, 1].flatten() if axis is not None: y_axis = axis x_rot_mat = transformations.rotation_matrix(azimuth, y_axis, target) self._pose = x_rot_mat.dot(self._pose)
python
{ "resource": "" }
q23286
look_at
train
def look_at(points, fov, rotation=None, distance=None, center=None): """ Generate transform for a camera to keep a list of points in the camera's field of view. Parameters ------------- points : (n, 3) float Points in space fov : (2,) float Field of view, in DEGREES rotation : None, or (4, 4) float Rotation matrix for initial rotation center : None, or (3,) float Center of field of view. Returns -------------- transform : (4, 4) float Transformation matrix with points in view """ if rotation is None: rotation = np.eye(4) else: rotation = np.asanyarray(rotation, dtype=np.float64) points = np.asanyarray(points, dtype=np.float64) # Transform points to camera frame (just use the rotation part) rinv = rotation[:3, :3].T points_c = rinv.dot(points.T).T if center is None: # Find the center of the points' AABB in camera frame center_c = points_c.min(axis=0) + 0.5 * points_c.ptp(axis=0) else: # Transform center to camera frame center_c = rinv.dot(center) # Re-center the points around the camera-frame origin points_c -= center_c # Find the minimum distance for the camera from the origin # so that all points fit in the view frustrum tfov = np.tan(np.radians(fov) / 2.0) if distance is None: distance = np.max(np.abs(points_c[:, :2]) / tfov + points_c[:, 2][:, np.newaxis]) # set the pose translation center_w = rotation[:3, :3].dot(center_c) cam_pose = rotation.copy() cam_pose[:3, 3] = center_w + distance * cam_pose[:3, 2] return cam_pose
python
{ "resource": "" }
q23287
camera_to_rays
train
def camera_to_rays(camera): """ Convert a trimesh.scene.Camera object to ray origins and direction vectors. Will return one ray per pixel, as set in camera.resolution. Parameters -------------- camera : trimesh.scene.Camera Camera with transform defined Returns -------------- origins : (n, 3) float Ray origins in space vectors : (n, 3) float Ray direction unit vectors angles : (n, 2) float Ray spherical coordinate angles in radians """ # radians of half the field of view half = np.radians(camera.fov / 2.0) # scale it down by two pixels to keep image under resolution half *= (camera.resolution - 2) / camera.resolution # create an evenly spaced list of angles angles = util.grid_linspace(bounds=[-half, half], count=camera.resolution) # turn the angles into unit vectors vectors = util.unitize(np.column_stack(( np.sin(angles), np.ones(len(angles))))) # flip the camera transform to change sign of Z transform = np.dot( camera.transform, align_vectors([1, 0, 0], [-1, 0, 0])) # apply the rotation to the direction vectors vectors = transformations.transform_points( vectors, transform, translate=False) # camera origin is single point, extract from transform origin = transformations.translation_from_matrix(transform) # tile it into corresponding list of ray vectorsy origins = np.ones_like(vectors) * origin return origins, vectors, angles
python
{ "resource": "" }
q23288
Camera.resolution
train
def resolution(self, values): """ Set the camera resolution in pixels. Parameters ------------ resolution (2,) float Camera resolution in pixels """ values = np.asanyarray(values, dtype=np.int64) if values.shape != (2,): raise ValueError('resolution must be (2,) float') # assign passed values to focal length self._resolution = values
python
{ "resource": "" }
q23289
Camera.scene
train
def scene(self, value): """ Set the reference to the scene that this camera is in. Parameters ------------- scene : None, or trimesh.Scene Scene where this camera is attached """ # save the scene reference self._scene = value # check if we have local not None transform # an if we can apply it to the scene graph # also check here that scene is a real scene if (hasattr(self, '_transform') and self._transform is not None and hasattr(value, 'graph')): # set scene transform to locally saved transform self._scene.graph[self.name] = self._transform # set local transform to None self._transform = None
python
{ "resource": "" }
q23290
Camera.focal
train
def focal(self): """ Get the focal length in pixels for the camera. Returns ------------ focal : (2,) float Focal length in pixels """ if self._focal is None: # calculate focal length from FOV focal = [(px / 2.0) / np.tan(np.radians(fov / 2.0)) for px, fov in zip(self._resolution, self.fov)] # store as correct dtype self._focal = np.asanyarray(focal, dtype=np.float64) return self._focal
python
{ "resource": "" }
q23291
Camera.K
train
def K(self): """ Get the intrinsic matrix for the Camera object. Returns ----------- K : (3, 3) float Intrinsic matrix for camera """ K = np.eye(3, dtype=np.float64) K[0, 0] = self.focal[0] K[1, 1] = self.focal[1] K[0, 2] = self.resolution[0] / 2.0 K[1, 2] = self.resolution[1] / 2.0 return K
python
{ "resource": "" }
q23292
Camera.fov
train
def fov(self): """ Get the field of view in degrees. Returns ------------- fov : (2,) float XY field of view in degrees """ if self._fov is None: fov = [2.0 * np.degrees(np.arctan((px / 2.0) / f)) for px, f in zip(self._resolution, self._focal)] fov = np.asanyarray(fov, dtype=np.float64) self._fov = fov return self._fov
python
{ "resource": "" }
q23293
Camera.fov
train
def fov(self, values): """ Set the field of view in degrees. Parameters ------------- values : (2,) float Size of FOV to set in degrees """ if values is None: self._fov = None else: values = np.asanyarray(values, dtype=np.float64) if values.shape != (2,): raise ValueError('focal length must be (2,) int') # assign passed values to FOV self._fov = values # fov overrides focal self._focal = None
python
{ "resource": "" }
q23294
sinwave
train
def sinwave(scene): """ A callback passed to a scene viewer which will update transforms in the viewer periodically. Parameters ------------- scene : trimesh.Scene Scene containing geometry """ # create an empty homogenous transformation matrix = np.eye(4) # set Y as cos of time matrix[1][3] = np.cos(time.time()) * 2 # set Z as sin of time matrix[2][3] = np.sin(time.time()) * 3 # take one of the two spheres arbitrarily node = s.graph.nodes_geometry[0] # apply the transform to the node scene.graph.update(node, matrix=matrix)
python
{ "resource": "" }
q23295
to_volume
train
def to_volume(mesh, file_name=None, max_element=None, mesher_id=1): """ Convert a surface mesh to a 3D volume mesh generated by gmsh. An easy way to install the gmsh sdk is through the gmsh-sdk package on pypi, which downloads and sets up gmsh: pip install gmsh-sdk Algorithm details, although check gmsh docs for more information: The "Delaunay" algorithm is split into three separate steps. First, an initial mesh of the union of all the volumes in the model is performed, without inserting points in the volume. The surface mesh is then recovered using H. Si's boundary recovery algorithm Tetgen/BR. Then a three-dimensional version of the 2D Delaunay algorithm described above is applied to insert points in the volume to respect the mesh size constraints. The Frontal" algorithm uses J. Schoeberl's Netgen algorithm. The "HXT" algorithm is a new efficient and parallel reimplementaton of the Delaunay algorithm. The "MMG3D" algorithm (experimental) allows to generate anisotropic tetrahedralizations Parameters -------------- mesh : trimesh.Trimesh Surface mesh of input geometry file_name : str or None Location to save output, in .msh (gmsh) or .bdf (Nastran) format max_element : float or None Maximum length of an element in the volume mesh mesher_id : int 3D unstructured algorithms: 1: Delaunay, 4: Frontal, 7: MMG3D, 10: HXT Returns ------------ data : None or bytes MSH data, only returned if file_name is None """ # checks mesher selection if mesher_id not in [1, 4, 7, 10]: raise ValueError('unavilable mesher selected!') else: mesher_id = int(mesher_id) # set max element length to a best guess if not specified if max_element is None: max_element = np.sqrt(np.mean(mesh.area_faces)) if file_name is not None: # check extensions to make sure it is supported format if not any(file_name.lower().endswith(e) for e in ['.bdf', '.msh', '.inp', '.diff', '.mesh']): raise ValueError( 'Only Nastran (.bdf), Gmsh (.msh), Abaqus (*.inp), ' + 'Diffpack (*.diff) and Inria Medit (*.mesh) formats ' + 'are available!') # exports to disk for gmsh to read using a temp file mesh_file = tempfile.NamedTemporaryFile(suffix='.stl', delete=False) mesh_file.close() mesh.export(mesh_file.name) # starts Gmsh Python API script gmsh.initialize() gmsh.option.setNumber("General.Terminal", 1) gmsh.model.add('Nastran_stl') gmsh.merge(mesh_file.name) dimtag = gmsh.model.getEntities()[0] dim = dimtag[0] tag = dimtag[1] surf_loop = gmsh.model.geo.addSurfaceLoop([tag]) gmsh.model.geo.addVolume([surf_loop]) gmsh.model.geo.synchronize() # We can then generate a 3D mesh... gmsh.option.setNumber("Mesh.Algorithm3D", mesher_id) gmsh.option.setNumber("Mesh.CharacteristicLengthMax", max_element) gmsh.model.mesh.generate(3) dimtag2 = gmsh.model.getEntities()[1] dim2 = dimtag2[0] tag2 = dimtag2[1] p2 = gmsh.model.addPhysicalGroup(dim2, [tag2]) gmsh.model.setPhysicalName(dim, p2, 'Nastran_bdf') data = None # if file name is None, return msh data using a tempfile if file_name is None: out_data = tempfile.NamedTemporaryFile(suffix='.msh', delete=False) # windows gets mad if two processes try to open the same file out_data.close() gmsh.write(out_data.name) with open(out_data.name, 'rb') as f: data = f.read() else: gmsh.write(file_name) # close up shop gmsh.finalize() return data
python
{ "resource": "" }
q23296
local_voxelize
train
def local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs): """ Voxelize a mesh in the region of a cube around a point. When fill=True, uses proximity.contains to fill the resulting voxels so may be meaningless for non-watertight meshes. Useful to reduce memory cost for small values of pitch as opposed to global voxelization. Parameters ----------- mesh : trimesh.Trimesh Source geometry point : (3, ) float Point in space to voxelize around pitch : float Side length of a single voxel cube radius : int Number of voxel cubes to return in each direction. kwargs : parameters to pass to voxelize_subdivide Returns ----------- voxels : (m, m, m) bool Array of local voxels where m=2*radius+1 origin_position : (3,) float Position of the voxel grid origin in space """ from scipy import ndimage # make sure point is correct type/shape point = np.asanyarray(point, dtype=np.float64).reshape(3) # this is a gotcha- radius sounds a lot like it should be in # float model space, not int voxel space so check if not isinstance(radius, int): raise ValueError('radius needs to be an integer number of cubes!') # Bounds of region bounds = np.concatenate((point - (radius + 0.5) * pitch, point + (radius + 0.5) * pitch)) # faces that intersect axis aligned bounding box faces = list(mesh.triangles_tree.intersection(bounds)) # didn't hit anything so exit if len(faces) == 0: return np.array([], dtype=np.bool), np.zeros(3) local = mesh.submesh([[f] for f in faces], append=True) # Translate mesh so point is at 0,0,0 local.apply_translation(-point) sparse, origin = voxelize_subdivide(local, pitch, **kwargs) matrix = sparse_to_matrix(sparse) # Find voxel index for point center = np.round(-origin / pitch).astype(np.int64) # pad matrix if necessary prepad = np.maximum(radius - center, 0) postpad = np.maximum(center + radius + 1 - matrix.shape, 0) matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1), mode='constant') center += prepad # Extract voxels within the bounding box voxels = matrix[center[0] - radius:center[0] + radius + 1, center[1] - radius:center[1] + radius + 1, center[2] - radius:center[2] + radius + 1] local_origin = point - radius * pitch # origin of local voxels # Fill internal regions if fill: regions, n = ndimage.measurements.label(~voxels) distance = ndimage.morphology.distance_transform_cdt(~voxels) representatives = [np.unravel_index((distance * (regions == i)).argmax(), distance.shape) for i in range(1, n + 1)] contains = mesh.contains( np.asarray(representatives) * pitch + local_origin) where = np.where(contains)[0] + 1 # use in1d vs isin for older numpy versions internal = np.in1d(regions.flatten(), where).reshape(regions.shape) voxels = np.logical_or(voxels, internal) return voxels, local_origin
python
{ "resource": "" }
q23297
voxelize_ray
train
def voxelize_ray(mesh, pitch, per_cell=[2, 2], **kwargs): """ Voxelize a mesh using ray queries. Parameters ------------- mesh : Trimesh object Mesh to be voxelized pitch : float Length of voxel cube per_cell : (2,) int How many ray queries to make per cell Returns ------------- voxels : (n, 3) int Voxel positions origin : (3, ) int Origin of voxels """ # how many rays per cell per_cell = np.array(per_cell).astype(np.int).reshape(2) # edge length of cube voxels pitch = float(pitch) # create the ray origins in a grid bounds = mesh.bounds[:, :2].copy() # offset start so we get the requested number per cell bounds[0] += pitch / (1.0 + per_cell) # offset end so arange doesn't short us bounds[1] += pitch # on X we are doing multiple rays per voxel step step = pitch / per_cell # 2D grid ray_ori = util.grid_arange(bounds, step=step) # a Z position below the mesh z = np.ones(len(ray_ori)) * (mesh.bounds[0][2] - pitch) ray_ori = np.column_stack((ray_ori, z)) # all rays are along positive Z ray_dir = np.ones_like(ray_ori) * [0, 0, 1] # if you have pyembree this should be decently fast hits = mesh.ray.intersects_location(ray_ori, ray_dir)[0] # just convert hit locations to integer positions voxels = np.round(hits / pitch).astype(np.int64) # offset voxels by min, so matrix isn't huge origin = voxels.min(axis=0) voxels -= origin return voxels, origin
python
{ "resource": "" }
q23298
fill_voxelization
train
def fill_voxelization(occupied): """ Given a sparse surface voxelization, fill in between columns. Parameters -------------- occupied: (n, 3) int, location of filled cells Returns -------------- filled: (m, 3) int, location of filled cells """ # validate inputs occupied = np.asanyarray(occupied, dtype=np.int64) if not util.is_shape(occupied, (-1, 3)): raise ValueError('incorrect shape') # create grid and mark inner voxels max_value = occupied.max() + 3 grid = np.zeros((max_value, max_value, max_value), dtype=np.int64) voxels_sparse = np.add(occupied, 1) grid.__setitem__(tuple(voxels_sparse.T), 1) for i in range(max_value): check_dir2 = False for j in range(0, max_value - 1): idx = [] # find transitions first # transition positions are from 0 to 1 and from 1 to 0 eq = np.equal(grid[i, j, :-1], grid[i, j, 1:]) idx = np.where(np.logical_not(eq))[0] + 1 c = len(idx) check_dir2 = (c % 4) > 0 and c > 4 if c < 4: continue for s in range(0, c - c % 4, 4): grid[i, j, idx[s]:idx[s + 3]] = 1 if not check_dir2: continue # check another direction for robustness for k in range(0, max_value - 1): idx = [] # find transitions first eq = np.equal(grid[i, :-1, k], grid[i, 1:, k]) idx = np.where(np.logical_not(eq))[0] + 1 c = len(idx) if c < 4: continue for s in range(0, c - c % 4, 4): grid[i, idx[s]:idx[s + 3], k] = 1 # generate new voxels idx = np.where(grid == 1) filled = np.array([[idx[0][i] - 1, idx[1][i] - 1, idx[2][i] - 1] for i in range(len(idx[0]))]) return filled
python
{ "resource": "" }
q23299
multibox
train
def multibox(centers, pitch, colors=None): """ Return a Trimesh object with a box at every center. Doesn't do anything nice or fancy. Parameters ----------- centers: (n,3) float, center of boxes that are occupied pitch: float, the edge length of a voxel colors: (3,) or (4,) or (n,3) or (n, 4) float, color of boxes Returns --------- rough: Trimesh object representing inputs """ from . import primitives from .base import Trimesh b = primitives.Box(extents=[pitch, pitch, pitch]) v = np.tile(centers, (1, len(b.vertices))).reshape((-1, 3)) v += np.tile(b.vertices, (len(centers), 1)) f = np.tile(b.faces, (len(centers), 1)) f += np.tile(np.arange(len(centers)) * len(b.vertices), (len(b.faces), 1)).T.reshape((-1, 1)) face_colors = None if colors is not None: colors = np.asarray(colors) if colors.ndim == 1: colors = colors[None].repeat(len(centers), axis=0) if colors.ndim == 2 and len(colors) == len(centers): face_colors = colors.repeat(12, axis=0) mesh = Trimesh(vertices=v, faces=f, face_colors=face_colors) return mesh
python
{ "resource": "" }