_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q23300
|
boolean_sparse
|
train
|
def boolean_sparse(a, b, operation=np.logical_and):
"""
Find common rows between two arrays very quickly
using 3D boolean sparse matrices.
Parameters
-----------
a: (n, d) int, coordinates in space
b: (m, d) int, coordinates in space
operation: numpy operation function, ie:
np.logical_and
np.logical_or
Returns
-----------
coords: (q, d) int, coordinates in space
"""
# 3D sparse arrays, using wrapped scipy.sparse
# pip install sparse
import sparse
# find the bounding box of both arrays
extrema = np.array([a.min(axis=0),
a.max(axis=0),
b.min(axis=0),
b.max(axis=0)])
origin = extrema.min(axis=0) - 1
size = tuple(extrema.ptp(axis=0) + 2)
# put nearby voxel arrays into same shape sparse array
sp_a = sparse.COO((a - origin).T,
data=np.ones(len(a), dtype=np.bool),
shape=size)
sp_b = sparse.COO((b - origin).T,
data=np.ones(len(b), dtype=np.bool),
shape=size)
# apply the logical operation
# get a sparse matrix out
applied = operation(sp_a, sp_b)
# reconstruct the original coordinates
coords = np.column_stack(applied.coords) + origin
return coords
|
python
|
{
"resource": ""
}
|
q23301
|
VoxelBase.marching_cubes
|
train
|
def marching_cubes(self):
"""
A marching cubes Trimesh representation of the voxels.
No effort was made to clean or smooth the result in any way;
it is merely the result of applying the scikit-image
measure.marching_cubes function to self.matrix.
Returns
---------
meshed: Trimesh object representing the current voxel
object, as returned by marching cubes algorithm.
"""
meshed = matrix_to_marching_cubes(matrix=self.matrix,
pitch=self.pitch,
origin=self.origin)
return meshed
|
python
|
{
"resource": ""
}
|
q23302
|
VoxelBase.points
|
train
|
def points(self):
"""
The center of each filled cell as a list of points.
Returns
----------
points: (self.filled, 3) float, list of points
"""
points = matrix_to_points(matrix=self.matrix,
pitch=self.pitch,
origin=self.origin)
return points
|
python
|
{
"resource": ""
}
|
q23303
|
VoxelBase.point_to_index
|
train
|
def point_to_index(self, point):
"""
Convert a point to an index in the matrix array.
Parameters
----------
point: (3,) float, point in space
Returns
---------
index: (3,) int tuple, index in self.matrix
"""
indices = points_to_indices(points=[point],
pitch=self.pitch,
origin=self.origin)
index = tuple(indices[0])
return index
|
python
|
{
"resource": ""
}
|
q23304
|
VoxelBase.is_filled
|
train
|
def is_filled(self, point):
"""
Query a point to see if the voxel cell it lies in is filled or not.
Parameters
----------
point: (3,) float, point in space
Returns
---------
is_filled: bool, is cell occupied or not
"""
index = self.point_to_index(point)
in_range = (np.array(index) < np.array(self.shape)).all()
if in_range:
is_filled = self.matrix[index]
else:
is_filled = False
return is_filled
|
python
|
{
"resource": ""
}
|
q23305
|
VoxelMesh.sparse_surface
|
train
|
def sparse_surface(self):
"""
Filled cells on the surface of the mesh.
Returns
----------------
voxels: (n, 3) int, filled cells on mesh surface
"""
if self._method == 'ray':
func = voxelize_ray
elif self._method == 'subdivide':
func = voxelize_subdivide
else:
raise ValueError('voxelization method incorrect')
voxels, origin = func(
mesh=self._data['mesh'],
pitch=self._data['pitch'],
max_iter=self._data['max_iter'][0])
self._cache['origin'] = origin
return voxels
|
python
|
{
"resource": ""
}
|
q23306
|
simulated_brick
|
train
|
def simulated_brick(face_count, extents, noise, max_iter=10):
"""
Produce a mesh that is a rectangular solid with noise
with a random transform.
Parameters
-------------
face_count : int
Approximate number of faces desired
extents : (n,3) float
Dimensions of brick
noise : float
Magnitude of vertex noise to apply
"""
# create the mesh as a simple box
mesh = trimesh.creation.box(extents=extents)
# add some systematic error pre- tesselation
mesh.vertices[0] += mesh.vertex_normals[0] + (noise * 2)
# subdivide until we have more faces than we want
for i in range(max_iter):
if len(mesh.vertices) > face_count:
break
mesh = mesh.subdivide()
# apply tesselation and random noise
mesh = mesh.permutate.noise(noise)
# randomly rotation with translation
transform = trimesh.transformations.random_rotation_matrix()
transform[:3, 3] = (np.random.random(3) - .5) * 1000
mesh.apply_transform(transform)
return mesh
|
python
|
{
"resource": ""
}
|
q23307
|
unitize
|
train
|
def unitize(vectors,
check_valid=False,
threshold=None):
"""
Unitize a vector or an array or row- vectors.
Parameters
---------
vectors : (n,m) or (j) float
Vector or vectors to be unitized
check_valid : bool
If set, will return mask of nonzero vectors
threshold : float
Cutoff for a value to be considered zero.
Returns
---------
unit : (n,m) or (j) float
Input vectors but unitized
valid : (n,) bool or bool
Mask of nonzero vectors returned if `check_valid`
"""
# make sure we have a numpy array
vectors = np.asanyarray(vectors)
# allow user to set zero threshold
if threshold is None:
threshold = TOL_ZERO
if len(vectors.shape) == 2:
# for (m, d) arrays take the per- row unit vector
# using sqrt and avoiding exponents is slightly faster
# also dot with ones is faser than .sum(axis=1)
norm = np.sqrt(np.dot(vectors * vectors,
[1.0] * vectors.shape[1]))
# non-zero norms
valid = norm > threshold
# in-place reciprocal of nonzero norms
norm[valid] **= -1
# tile reciprocal of norm
tiled = np.tile(norm, (vectors.shape[1], 1)).T
# multiply by reciprocal of norm
unit = vectors * tiled
elif len(vectors.shape) == 1:
# treat 1D arrays as a single vector
norm = np.sqrt((vectors * vectors).sum())
valid = norm > threshold
if valid:
unit = vectors / norm
else:
unit = vectors.copy()
else:
raise ValueError('vectors must be (n, ) or (n, d)!')
if check_valid:
return unit[valid], valid
return unit
|
python
|
{
"resource": ""
}
|
q23308
|
euclidean
|
train
|
def euclidean(a, b):
"""
Euclidean distance between vectors a and b.
Parameters
------------
a : (n,) float
First vector
b : (n,) float
Second vector
Returns
------------
distance : float
Euclidean distance between A and B
"""
a = np.asanyarray(a, dtype=np.float64)
b = np.asanyarray(b, dtype=np.float64)
return np.sqrt(((a - b) ** 2).sum())
|
python
|
{
"resource": ""
}
|
q23309
|
is_none
|
train
|
def is_none(obj):
"""
Check to see if an object is None or not.
Handles the case of np.array(None) as well.
Parameters
-------------
obj : object
Any object type to be checked
Returns
-------------
is_none : bool
True if obj is None or numpy None-like
"""
if obj is None:
return True
if (is_sequence(obj) and
len(obj) == 1 and
obj[0] is None):
return True
return False
|
python
|
{
"resource": ""
}
|
q23310
|
is_sequence
|
train
|
def is_sequence(obj):
"""
Check if an object is a sequence or not.
Parameters
-------------
obj : object
Any object type to be checked
Returns
-------------
is_sequence : bool
True if object is sequence
"""
seq = (not hasattr(obj, "strip") and
hasattr(obj, "__getitem__") or
hasattr(obj, "__iter__"))
# check to make sure it is not a set, string, or dictionary
seq = seq and all(not isinstance(obj, i) for i in (dict,
set,
basestring))
# PointCloud objects can look like an array but are not
seq = seq and type(obj).__name__ not in ['PointCloud']
# numpy sometimes returns objects that are single float64 values
# but sure look like sequences, so we check the shape
if hasattr(obj, 'shape'):
seq = seq and obj.shape != ()
return seq
|
python
|
{
"resource": ""
}
|
q23311
|
is_shape
|
train
|
def is_shape(obj, shape):
"""
Compare the shape of a numpy.ndarray to a target shape,
with any value less than zero being considered a wildcard
Note that if a list- like object is passed that is not a numpy
array, this function will not convert it and will return False.
Parameters
---------
obj : np.ndarray
Array to check the shape on
shape : list or tuple
Any negative term will be considered a wildcard
Any tuple term will be evaluated as an OR
Returns
---------
shape_ok: bool, True if shape of obj matches query shape
Examples
------------------------
In [1]: a = np.random.random((100, 3))
In [2]: a.shape
Out[2]: (100, 3)
In [3]: trimesh.util.is_shape(a, (-1, 3))
Out[3]: True
In [4]: trimesh.util.is_shape(a, (-1, 3, 5))
Out[4]: False
In [5]: trimesh.util.is_shape(a, (100, -1))
Out[5]: True
In [6]: trimesh.util.is_shape(a, (-1, (3, 4)))
Out[6]: True
In [7]: trimesh.util.is_shape(a, (-1, (4, 5)))
Out[7]: False
"""
# if the obj.shape is different length than
# the goal shape it means they have different number
# of dimensions and thus the obj is not the query shape
if (not hasattr(obj, 'shape') or
len(obj.shape) != len(shape)):
return False
# loop through each integer of the two shapes
# multiple values are sequences
# wildcards are less than zero (i.e. -1)
for i, target in zip(obj.shape, shape):
# check if current field has multiple acceptable values
if is_sequence(target):
if i in target:
# obj shape is in the accepted values
continue
else:
return False
# check if current field is a wildcard
if target < 0:
if i == 0:
# if a dimension is 0, we don't allow
# that to match to a wildcard
# it would have to be explicitly called out as 0
return False
else:
continue
# since we have a single target and a single value,
# if they are not equal we have an answer
if target != i:
return False
# since none of the checks failed the obj.shape
# matches the pattern
return True
|
python
|
{
"resource": ""
}
|
q23312
|
make_sequence
|
train
|
def make_sequence(obj):
"""
Given an object, if it is a sequence return, otherwise
add it to a length 1 sequence and return.
Useful for wrapping functions which sometimes return single
objects and other times return lists of objects.
Parameters
--------------
obj : object
An object to be made a sequence
Returns
--------------
as_sequence : (n,) sequence
Contains input value
"""
if is_sequence(obj):
return np.array(list(obj))
else:
return np.array([obj])
|
python
|
{
"resource": ""
}
|
q23313
|
vector_hemisphere
|
train
|
def vector_hemisphere(vectors, return_sign=False):
"""
For a set of 3D vectors alter the sign so they are all in the
upper hemisphere.
If the vector lies on the plane all vectors with negative Y
will be reversed.
If the vector has a zero Z and Y value vectors with a
negative X value will be reversed.
Parameters
----------
vectors : (n,3) float
Input vectors
return_sign : bool
Return the sign mask or not
Returns
----------
oriented: (n, 3) float
Vectors with same magnitude as source
but possibly reversed to ensure all vectors
are in the same hemisphere.
sign : (n,) float
"""
# vectors as numpy array
vectors = np.asanyarray(vectors, dtype=np.float64)
if is_shape(vectors, (-1, 2)):
# 2D vector case
# check the Y value and reverse vector
# direction if negative.
negative = vectors < -TOL_ZERO
zero = np.logical_not(
np.logical_or(negative, vectors > TOL_ZERO))
signs = np.ones(len(vectors), dtype=np.float64)
# negative Y values are reversed
signs[negative[:, 1]] = -1.0
# zero Y and negative X are reversed
signs[np.logical_and(zero[:, 1], negative[:, 0])] = -1.0
elif is_shape(vectors, (-1, 3)):
# 3D vector case
negative = vectors < -TOL_ZERO
zero = np.logical_not(
np.logical_or(negative, vectors > TOL_ZERO))
# move all negative Z to positive
# then for zero Z vectors, move all negative Y to positive
# then for zero Y vectors, move all negative X to positive
signs = np.ones(len(vectors), dtype=np.float64)
# all vectors with negative Z values
signs[negative[:, 2]] = -1.0
# all on-plane vectors with negative Y values
signs[np.logical_and(zero[:, 2], negative[:, 1])] = -1.0
# all on-plane vectors with zero Y values
# and negative X values
signs[np.logical_and(np.logical_and(zero[:, 2],
zero[:, 1]),
negative[:, 0])] = -1.0
else:
raise ValueError('vectors must be (n,3)!')
# apply the signs to the vectors
oriented = vectors * signs.reshape((-1, 1))
if return_sign:
return oriented, signs
return oriented
|
python
|
{
"resource": ""
}
|
q23314
|
pairwise
|
train
|
def pairwise(iterable):
"""
For an iterable, group values into pairs.
Parameters
-----------
iterable : (m, ) list
A sequence of values
Returns
-----------
pairs: (n, 2)
Pairs of sequential values
Example
-----------
In [1]: data
Out[1]: [0, 1, 2, 3, 4, 5, 6]
In [2]: list(trimesh.util.pairwise(data))
Out[2]: [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)]
"""
# looping through a giant numpy array would be dumb
# so special case ndarrays and use numpy operations
if isinstance(iterable, np.ndarray):
iterable = iterable.reshape(-1)
stacked = np.column_stack((iterable, iterable))
pairs = stacked.reshape(-1)[1:-1].reshape((-1, 2))
return pairs
# if we have a normal iterable use itertools
import itertools
a, b = itertools.tee(iterable)
# pop the first element of the second item
next(b)
return zip(a, b)
|
python
|
{
"resource": ""
}
|
q23315
|
diagonal_dot
|
train
|
def diagonal_dot(a, b):
"""
Dot product by row of a and b.
There are a lot of ways to do this though
performance varies very widely. This method
uses the dot product to sum the row and avoids
function calls if at all possible.
Comparing performance of some equivalent versions:
```
In [1]: import numpy as np; import trimesh
In [2]: a = np.random.random((10000, 3))
In [3]: b = np.random.random((10000, 3))
In [4]: %timeit (a * b).sum(axis=1)
1000 loops, best of 3: 181 us per loop
In [5]: %timeit np.einsum('ij,ij->i', a, b)
10000 loops, best of 3: 62.7 us per loop
In [6]: %timeit np.diag(np.dot(a, b.T))
1 loop, best of 3: 429 ms per loop
In [7]: %timeit np.dot(a * b, np.ones(a.shape[1]))
10000 loops, best of 3: 61.3 us per loop
In [8]: %timeit trimesh.util.diagonal_dot(a, b)
10000 loops, best of 3: 55.2 us per loop
```
Parameters
------------
a : (m, d) float
First array
b : (m, d) float
Second array
Returns
-------------
result : (m,) float
Dot product of each row
"""
# make sure `a` is numpy array
# doing it for `a` will force the multiplication to
# convert `b` if necessary and avoid function call otherwise
a = np.asanyarray(a)
# 3x faster than (a * b).sum(axis=1)
# avoiding np.ones saves 5-10% sometimes
result = np.dot(a * b, [1.0] * a.shape[1])
return result
|
python
|
{
"resource": ""
}
|
q23316
|
grid_linspace
|
train
|
def grid_linspace(bounds, count):
"""
Return a grid spaced inside a bounding box with edges spaced using np.linspace.
Parameters
---------
bounds: (2,dimension) list of [[min x, min y, etc], [max x, max y, etc]]
count: int, or (dimension,) int, number of samples per side
Returns
-------
grid: (n, dimension) float, points in the specified bounds
"""
bounds = np.asanyarray(bounds, dtype=np.float64)
if len(bounds) != 2:
raise ValueError('bounds must be (2, dimension!')
count = np.asanyarray(count, dtype=np.int)
if count.shape == ():
count = np.tile(count, bounds.shape[1])
grid_elements = [np.linspace(*b, num=c) for b, c in zip(bounds.T, count)]
grid = np.vstack(np.meshgrid(*grid_elements)
).reshape(bounds.shape[1], -1).T
return grid
|
python
|
{
"resource": ""
}
|
q23317
|
multi_dict
|
train
|
def multi_dict(pairs):
"""
Given a set of key value pairs, create a dictionary.
If a key occurs multiple times, stack the values into an array.
Can be called like the regular dict(pairs) constructor
Parameters
----------
pairs: (n,2) array of key, value pairs
Returns
----------
result: dict, with all values stored (rather than last with regular dict)
"""
result = collections.defaultdict(list)
for k, v in pairs:
result[k].append(v)
return result
|
python
|
{
"resource": ""
}
|
q23318
|
distance_to_end
|
train
|
def distance_to_end(file_obj):
"""
For an open file object how far is it to the end
Parameters
----------
file_obj: open file- like object
Returns
----------
distance: int, bytes to end of file
"""
position_current = file_obj.tell()
file_obj.seek(0, 2)
position_end = file_obj.tell()
file_obj.seek(position_current)
distance = position_end - position_current
return distance
|
python
|
{
"resource": ""
}
|
q23319
|
decimal_to_digits
|
train
|
def decimal_to_digits(decimal, min_digits=None):
"""
Return the number of digits to the first nonzero decimal.
Parameters
-----------
decimal: float
min_digits: int, minimum number of digits to return
Returns
-----------
digits: int, number of digits to the first nonzero decimal
"""
digits = abs(int(np.log10(decimal)))
if min_digits is not None:
digits = np.clip(digits, min_digits, 20)
return digits
|
python
|
{
"resource": ""
}
|
q23320
|
hash_file
|
train
|
def hash_file(file_obj,
hash_function=hashlib.md5):
"""
Get the hash of an open file- like object.
Parameters
---------
file_obj: file like object
hash_function: function to use to hash data
Returns
---------
hashed: str, hex version of result
"""
# before we read the file data save the current position
# in the file (which is probably 0)
file_position = file_obj.tell()
# create an instance of the hash object
hasher = hash_function()
# read all data from the file into the hasher
hasher.update(file_obj.read())
# get a hex version of the result
hashed = hasher.hexdigest()
# return the file object to its original position
file_obj.seek(file_position)
return hashed
|
python
|
{
"resource": ""
}
|
q23321
|
md5_object
|
train
|
def md5_object(obj):
"""
If an object is hashable, return the string of the MD5.
Parameters
-----------
obj: object
Returns
----------
md5: str, MD5 hash
"""
hasher = hashlib.md5()
if isinstance(obj, basestring) and PY3:
# in python3 convert strings to bytes before hashing
hasher.update(obj.encode('utf-8'))
else:
hasher.update(obj)
md5 = hasher.hexdigest()
return md5
|
python
|
{
"resource": ""
}
|
q23322
|
attach_to_log
|
train
|
def attach_to_log(level=logging.DEBUG,
handler=None,
loggers=None,
colors=True,
capture_warnings=True,
blacklist=None):
"""
Attach a stream handler to all loggers.
Parameters
------------
level: logging level
handler: log handler object
loggers: list of loggers to attach to
if None, will try to attach to all available
colors: bool, if True try to use colorlog formatter
blacklist: list of str, names of loggers NOT to attach to
"""
if blacklist is None:
blacklist = ['TerminalIPythonApp',
'PYREADLINE',
'pyembree',
'shapely.geos',
'shapely.speedups._speedups',
'parso.cache',
'parso.python.diff']
# make sure we log warnings from the warnings module
logging.captureWarnings(capture_warnings)
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)-7s (%(filename)s:%(lineno)3s) %(message)s",
"%Y-%m-%d %H:%M:%S")
if colors:
try:
from colorlog import ColoredFormatter
formatter = ColoredFormatter(
("%(log_color)s%(levelname)-8s%(reset)s " +
"%(filename)17s:%(lineno)-4s %(blue)4s%(message)s"),
datefmt=None,
reset=True,
log_colors={'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red'})
except ImportError:
pass
# if no handler was passed, use a StreamHandler
if handler is None:
handler = logging.StreamHandler()
# add the formatters and set the level
handler.setFormatter(formatter)
handler.setLevel(level)
# if nothing passed use all available loggers
if loggers is None:
# de- duplicate loggers using a set
loggers = set(logging.Logger.manager.loggerDict.values())
# add the warnings logging
loggers.add(logging.getLogger('py.warnings'))
# disable pyembree warnings
logging.getLogger('pyembree').disabled = True
# loop through all available loggers
for logger in loggers:
# skip loggers on the blacklist
if (logger.__class__.__name__ != 'Logger' or
logger.name in blacklist):
continue
logger.addHandler(handler)
logger.setLevel(level)
# set nicer numpy print options
np.set_printoptions(precision=5, suppress=True)
|
python
|
{
"resource": ""
}
|
q23323
|
stack_lines
|
train
|
def stack_lines(indices):
"""
Stack a list of values that represent a polyline into
individual line segments with duplicated consecutive values.
Parameters
----------
indices: sequence of items
Returns
---------
stacked: (n,2) set of items
In [1]: trimesh.util.stack_lines([0,1,2])
Out[1]:
array([[0, 1],
[1, 2]])
In [2]: trimesh.util.stack_lines([0,1,2,4,5])
Out[2]:
array([[0, 1],
[1, 2],
[2, 4],
[4, 5]])
In [3]: trimesh.util.stack_lines([[0,0],[1,1],[2,2], [3,3]])
Out[3]:
array([[0, 0],
[1, 1],
[1, 1],
[2, 2],
[2, 2],
[3, 3]])
"""
indices = np.asanyarray(indices)
if is_sequence(indices[0]):
shape = (-1, len(indices[0]))
else:
shape = (-1, 2)
return np.column_stack((indices[:-1],
indices[1:])).reshape(shape)
|
python
|
{
"resource": ""
}
|
q23324
|
append_faces
|
train
|
def append_faces(vertices_seq, faces_seq):
"""
Given a sequence of zero- indexed faces and vertices
combine them into a single array of faces and
a single array of vertices.
Parameters
-----------
vertices_seq : (n, ) sequence of (m, d) float
Multiple arrays of verticesvertex arrays
faces_seq : (n, ) sequence of (p, j) int
Zero indexed faces for matching vertices
Returns
----------
vertices : (i, d) float
Points in space
faces : (j, 3) int
Reference vertex indices
"""
# the length of each vertex array
vertices_len = np.array([len(i) for i in vertices_seq])
# how much each group of faces needs to be offset
face_offset = np.append(0, np.cumsum(vertices_len)[:-1])
new_faces = []
for offset, faces in zip(face_offset, faces_seq):
if len(faces) == 0:
continue
# apply the index offset
new_faces.append(faces + offset)
# stack to clean (n, 3) float
vertices = vstack_empty(vertices_seq)
# stack to clean (n, 3) int
faces = vstack_empty(new_faces)
return vertices, faces
|
python
|
{
"resource": ""
}
|
q23325
|
array_to_string
|
train
|
def array_to_string(array,
col_delim=' ',
row_delim='\n',
digits=8,
value_format='{}'):
"""
Convert a 1 or 2D array into a string with a specified number
of digits and delimiter. The reason this exists is that the
basic numpy array to string conversions are surprisingly bad.
Parameters
----------
array : (n,) or (n, d) float or int
Data to be converted
If shape is (n,) only column delimiter will be used
col_delim : str
What string should separate values in a column
row_delim : str
What string should separate values in a row
digits : int
How many digits should floating point numbers include
value_format : str
Format string for each value or sequence of values
If multiple values per value_format it must divide
into array evenly.
Returns
----------
formatted : str
String representation of original array
"""
# convert inputs to correct types
array = np.asanyarray(array)
digits = int(digits)
row_delim = str(row_delim)
col_delim = str(col_delim)
value_format = str(value_format)
# abort for non- flat arrays
if len(array.shape) > 2:
raise ValueError('conversion only works on 1D/2D arrays not %s!',
str(array.shape))
# allow a value to be repeated in a value format
repeats = value_format.count('{}')
if array.dtype.kind == 'i':
# integer types don't need a specified precision
format_str = value_format + col_delim
elif array.dtype.kind == 'f':
# add the digits formatting to floats
format_str = value_format.replace(
'{}', '{:.' + str(digits) + 'f}') + col_delim
else:
raise ValueError('dtype %s not convertible!',
array.dtype.name)
# length of extra delimiters at the end
end_junk = len(col_delim)
# if we have a 2D array add a row delimiter
if len(array.shape) == 2:
format_str *= array.shape[1]
# cut off the last column delimiter and add a row delimiter
format_str = format_str[:-len(col_delim)] + row_delim
end_junk = len(row_delim)
# expand format string to whole array
format_str *= len(array)
# if an array is repeated in the value format
# do the shaping here so we don't need to specify indexes
shaped = np.tile(array.reshape((-1, 1)),
(1, repeats)).reshape(-1)
# run the format operation and remove the extra delimiters
formatted = format_str.format(*shaped)[:-end_junk]
return formatted
|
python
|
{
"resource": ""
}
|
q23326
|
array_to_encoded
|
train
|
def array_to_encoded(array, dtype=None, encoding='base64'):
"""
Export a numpy array to a compact serializable dictionary.
Parameters
------------
array : array
Any numpy array
dtype : str or None
Optional dtype to encode array
encoding : str
'base64' or 'binary'
Returns
---------
encoded : dict
Has keys:
'dtype': str, of dtype
'shape': tuple of shape
'base64': str, base64 encoded string
"""
array = np.asanyarray(array)
shape = array.shape
# ravel also forces contiguous
flat = np.ravel(array)
if dtype is None:
dtype = array.dtype
encoded = {'dtype': np.dtype(dtype).str,
'shape': shape}
if encoding in ['base64', 'dict64']:
packed = base64.b64encode(flat.astype(dtype).tostring())
if hasattr(packed, 'decode'):
packed = packed.decode('utf-8')
encoded['base64'] = packed
elif encoding == 'binary':
encoded['binary'] = array.tostring(order='C')
else:
raise ValueError('encoding {} is not available!'.format(encoding))
return encoded
|
python
|
{
"resource": ""
}
|
q23327
|
decode_keys
|
train
|
def decode_keys(store, encoding='utf-8'):
"""
If a dictionary has keys that are bytes decode them to a str.
Parameters
---------
store : dict
Dictionary with data
Returns
---------
result : dict
Values are untouched but keys that were bytes
are converted to ASCII strings.
Example
-----------
In [1]: d
Out[1]: {1020: 'nah', b'hi': 'stuff'}
In [2]: trimesh.util.decode_keys(d)
Out[2]: {1020: 'nah', 'hi': 'stuff'}
"""
keys = store.keys()
for key in keys:
if hasattr(key, 'decode'):
decoded = key.decode(encoding)
if key != decoded:
store[key.decode(encoding)] = store[key]
store.pop(key)
return store
|
python
|
{
"resource": ""
}
|
q23328
|
encoded_to_array
|
train
|
def encoded_to_array(encoded):
"""
Turn a dictionary with base64 encoded strings back into a numpy array.
Parameters
------------
encoded : dict
Has keys:
dtype: string of dtype
shape: int tuple of shape
base64: base64 encoded string of flat array
binary: decode result coming from numpy.tostring
Returns
----------
array: numpy array
"""
if not isinstance(encoded, dict):
if is_sequence(encoded):
as_array = np.asanyarray(encoded)
return as_array
else:
raise ValueError('Unable to extract numpy array from input')
encoded = decode_keys(encoded)
dtype = np.dtype(encoded['dtype'])
if 'base64' in encoded:
array = np.frombuffer(base64.b64decode(encoded['base64']),
dtype)
elif 'binary' in encoded:
array = np.frombuffer(encoded['binary'],
dtype=dtype)
if 'shape' in encoded:
array = array.reshape(encoded['shape'])
return array
|
python
|
{
"resource": ""
}
|
q23329
|
type_bases
|
train
|
def type_bases(obj, depth=4):
"""
Return the bases of the object passed.
"""
bases = collections.deque([list(obj.__class__.__bases__)])
for i in range(depth):
bases.append([i.__base__ for i in bases[-1] if i is not None])
try:
bases = np.hstack(bases)
except IndexError:
bases = []
# we do the hasattr as None/NoneType can be in the list of bases
bases = [i for i in bases if hasattr(i, '__name__')]
return np.array(bases)
|
python
|
{
"resource": ""
}
|
q23330
|
concatenate
|
train
|
def concatenate(a, b=None):
"""
Concatenate two or more meshes.
Parameters
----------
a: Trimesh object, or list of such
b: Trimesh object, or list of such
Returns
----------
result: Trimesh object containing concatenated mesh
"""
if b is None:
b = []
# stack meshes into flat list
meshes = np.append(a, b)
# extract the trimesh type to avoid a circular import
# and assert that both inputs are Trimesh objects
trimesh_type = type_named(meshes[0], 'Trimesh')
# append faces and vertices of meshes
vertices, faces = append_faces(
[m.vertices.copy() for m in meshes],
[m.faces.copy() for m in meshes])
# only save face normals if already calculated
face_normals = None
if all('face_normals' in m._cache for m in meshes):
face_normals = np.vstack([m.face_normals
for m in meshes])
# concatenate visuals
visual = meshes[0].visual.concatenate(
[m.visual for m in meshes[1:]])
# create the mesh object
mesh = trimesh_type(vertices=vertices,
faces=faces,
face_normals=face_normals,
visual=visual,
process=False)
return mesh
|
python
|
{
"resource": ""
}
|
q23331
|
submesh
|
train
|
def submesh(mesh,
faces_sequence,
only_watertight=False,
append=False):
"""
Return a subset of a mesh.
Parameters
----------
mesh : Trimesh
Source mesh to take geometry from
faces_sequence : sequence (p,) int
Indexes of mesh.faces
only_watertight : bool
Only return submeshes which are watertight.
append : bool
Return a single mesh which has the faces appended,
if this flag is set, only_watertight is ignored
Returns
---------
if append : Trimesh object
else list of Trimesh objects
"""
# evaluate generators so we can escape early
faces_sequence = list(faces_sequence)
if len(faces_sequence) == 0:
return []
# check to make sure we're not doing a whole bunch of work
# to deliver a subset which ends up as the whole mesh
if len(faces_sequence[0]) == len(mesh.faces):
all_faces = np.array_equal(np.sort(faces_sequence),
np.arange(len(faces_sequence)))
if all_faces:
log.debug('entire mesh requested, returning copy')
return mesh.copy()
# avoid nuking the cache on the original mesh
original_faces = mesh.faces.view(np.ndarray)
original_vertices = mesh.vertices.view(np.ndarray)
faces = []
vertices = []
normals = []
visuals = []
# for reindexing faces
mask = np.arange(len(original_vertices))
for faces_index in faces_sequence:
# sanitize indices in case they are coming in as a set or tuple
faces_index = np.asanyarray(faces_index, dtype=np.int64)
if len(faces_index) == 0:
continue
faces_current = original_faces[faces_index]
unique = np.unique(faces_current.reshape(-1))
# redefine face indices from zero
mask[unique] = np.arange(len(unique))
normals.append(mesh.face_normals[faces_index])
faces.append(mask[faces_current])
vertices.append(original_vertices[unique])
visuals.append(mesh.visual.face_subset(faces_index))
# we use type(mesh) rather than importing Trimesh from base
# to avoid a circular import
trimesh_type = type_named(mesh, 'Trimesh')
if append:
if all(hasattr(i, 'concatenate')
for i in visuals):
visuals = np.array(visuals)
visual = visuals[0].concatenate(visuals[1:])
else:
visual = None
vertices, faces = append_faces(vertices, faces)
appended = trimesh_type(
vertices=vertices,
faces=faces,
face_normals=np.vstack(normals),
visual=visual,
process=False)
return appended
# generate a list of Trimesh objects
result = [trimesh_type(
vertices=v,
faces=f,
face_normals=n,
visual=c,
metadata=copy.deepcopy(mesh.metadata),
process=False) for v, f, n, c in zip(vertices,
faces,
normals,
visuals)]
result = np.array(result)
if len(result) > 0 and only_watertight:
# fill_holes will attempt a repair and returns the
# watertight status at the end of the repair attempt
watertight = np.array([i.fill_holes() and len(i.faces) >= 4
for i in result])
# remove unrepairable meshes
result = result[watertight]
return result
|
python
|
{
"resource": ""
}
|
q23332
|
jsonify
|
train
|
def jsonify(obj, **kwargs):
"""
A version of json.dumps that can handle numpy arrays
by creating a custom encoder for numpy dtypes.
Parameters
--------------
obj : JSON- serializable blob
**kwargs :
Passed to json.dumps
Returns
--------------
dumped : str
JSON dump of obj
"""
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
# will work for numpy.ndarrays
# as well as their int64/etc objects
if hasattr(obj, 'tolist'):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
# run the dumps using our encoder
dumped = json.dumps(obj, cls=NumpyEncoder, **kwargs)
return dumped
|
python
|
{
"resource": ""
}
|
q23333
|
convert_like
|
train
|
def convert_like(item, like):
"""
Convert an item to have the dtype of another item
Parameters
----------
item: item to be converted
like: object with target dtype. If None, item is returned unmodified
Returns
--------
result: item, but in dtype of like
"""
if isinstance(like, np.ndarray):
return np.asanyarray(item, dtype=like.dtype)
if isinstance(item, like.__class__) or is_none(like):
return item
if (is_sequence(item) and
len(item) == 1 and
isinstance(item[0], like.__class__)):
return item[0]
item = like.__class__(item)
return item
|
python
|
{
"resource": ""
}
|
q23334
|
bounds_tree
|
train
|
def bounds_tree(bounds):
"""
Given a set of axis aligned bounds, create an r-tree for broad- phase
collision detection
Parameters
---------
bounds: (n, dimension*2) list of non- interleaved bounds
for a 2D bounds tree:
[(minx, miny, maxx, maxy), ...]
Returns
---------
tree: Rtree object
"""
bounds = np.asanyarray(copy.deepcopy(bounds), dtype=np.float64)
if len(bounds.shape) != 2:
raise ValueError('Bounds must be (n,dimension*2)!')
dimension = bounds.shape[1]
if (dimension % 2) != 0:
raise ValueError('Bounds must be (n,dimension*2)!')
dimension = int(dimension / 2)
import rtree
# some versions of rtree screw up indexes on stream loading
# do a test here so we know if we are free to use stream loading
# or if we have to do a loop to insert things which is 5x slower
rtree_test = rtree.index.Index([(1564, [0, 0, 0, 10, 10, 10], None)],
properties=rtree.index.Property(dimension=3))
rtree_stream_ok = next(rtree_test.intersection([1, 1, 1, 2, 2, 2])) == 1564
properties = rtree.index.Property(dimension=dimension)
if rtree_stream_ok:
# stream load was verified working on inport above
tree = rtree.index.Index(zip(np.arange(len(bounds)),
bounds,
[None] * len(bounds)),
properties=properties)
else:
# in some rtree versions stream loading goofs the index
log.warning('rtree stream loading broken! Try upgrading rtree!')
tree = rtree.index.Index(properties=properties)
for i, b in enumerate(bounds):
tree.insert(i, b)
return tree
|
python
|
{
"resource": ""
}
|
q23335
|
wrap_as_stream
|
train
|
def wrap_as_stream(item):
"""
Wrap a string or bytes object as a file object.
Parameters
----------
item: str or bytes
Item to be wrapped
Returns
---------
wrapped: file-like object
"""
if not PY3:
return StringIO(item)
if isinstance(item, str):
return StringIO(item)
elif isinstance(item, bytes):
return BytesIO(item)
raise ValueError('{} is not wrappable!'.format(type(item).__name__))
|
python
|
{
"resource": ""
}
|
q23336
|
sigfig_round
|
train
|
def sigfig_round(values, sigfig=1):
"""
Round a single value to a specified number of significant figures.
Parameters
----------
values: float, value to be rounded
sigfig: int, number of significant figures to reduce to
Returns
----------
rounded: values, but rounded to the specified number of significant figures
Examples
----------
In [1]: trimesh.util.round_sigfig(-232453.00014045456, 1)
Out[1]: -200000.0
In [2]: trimesh.util.round_sigfig(.00014045456, 1)
Out[2]: 0.0001
In [3]: trimesh.util.round_sigfig(.00014045456, 4)
Out[3]: 0.0001405
"""
as_int, multiplier = sigfig_int(values, sigfig)
rounded = as_int * (10 ** multiplier)
return rounded
|
python
|
{
"resource": ""
}
|
q23337
|
sigfig_int
|
train
|
def sigfig_int(values, sigfig):
"""
Convert a set of floating point values into integers with a specified number
of significant figures and an exponent.
Parameters
------------
values: (n,) float or int, array of values
sigfig: (n,) int, number of significant figures to keep
Returns
------------
as_int: (n,) int, every value[i] has sigfig[i] digits
multiplier: (n, int), exponent, so as_int * 10 ** multiplier is
the same order of magnitude as the input
"""
values = np.asanyarray(values).reshape(-1)
sigfig = np.asanyarray(sigfig, dtype=np.int).reshape(-1)
if sigfig.shape != values.shape:
raise ValueError('sigfig must match identifier')
exponent = np.zeros(len(values))
nonzero = np.abs(values) > TOL_ZERO
exponent[nonzero] = np.floor(np.log10(np.abs(values[nonzero])))
multiplier = exponent - sigfig + 1
as_int = np.round(values / (10**multiplier)).astype(np.int32)
return as_int, multiplier
|
python
|
{
"resource": ""
}
|
q23338
|
decompress
|
train
|
def decompress(file_obj, file_type):
"""
Given an open file object and a file type, return all components
of the archive as open file objects in a dict.
Parameters
-----------
file_obj : file-like
Containing compressed data
file_type : str
File extension, 'zip', 'tar.gz', etc
Returns
---------
decompressed : dict
Data from archive in format {file name : file-like}
"""
def is_zip():
archive = zipfile.ZipFile(file_obj)
result = {name: wrap_as_stream(archive.read(name))
for name in archive.namelist()}
return result
def is_tar():
import tarfile
archive = tarfile.open(fileobj=file_obj, mode='r')
result = {name: archive.extractfile(name)
for name in archive.getnames()}
return result
file_type = str(file_type).lower()
if isinstance(file_obj, bytes):
file_obj = wrap_as_stream(file_obj)
if file_type[-3:] == 'zip':
return is_zip()
if 'tar' in file_type[-6:]:
return is_tar()
raise ValueError('Unsupported type passed!')
|
python
|
{
"resource": ""
}
|
q23339
|
compress
|
train
|
def compress(info):
"""
Compress data stored in a dict.
Parameters
-----------
info : dict
Data to compress in form:
{file name in archive: bytes or file-like object}
Returns
-----------
compressed : bytes
Compressed file data
"""
if PY3:
file_obj = BytesIO()
else:
file_obj = StringIO()
with zipfile.ZipFile(
file_obj,
mode='w',
compression=zipfile.ZIP_DEFLATED) as zipper:
for name, data in info.items():
if hasattr(data, 'read'):
# if we were passed a file object, read it
data = data.read()
zipper.writestr(name, data)
file_obj.seek(0)
compressed = file_obj.read()
return compressed
|
python
|
{
"resource": ""
}
|
q23340
|
vstack_empty
|
train
|
def vstack_empty(tup):
"""
A thin wrapper for numpy.vstack that ignores empty lists.
Parameters
------------
tup: tuple or list of arrays with the same number of columns
Returns
------------
stacked: (n,d) array, with same number of columns as
constituent arrays.
"""
# filter out empty arrays
stackable = [i for i in tup if len(i) > 0]
# if we only have one array just return it
if len(stackable) == 1:
return np.asanyarray(stackable[0])
# if we have nothing return an empty numpy array
elif len(stackable) == 0:
return np.array([])
# otherwise just use vstack as normal
return np.vstack(stackable)
|
python
|
{
"resource": ""
}
|
q23341
|
write_encoded
|
train
|
def write_encoded(file_obj,
stuff,
encoding='utf-8'):
"""
If a file is open in binary mode and a string is passed, encode and write
If a file is open in text mode and bytes are passed, decode and write
Parameters
-----------
file_obj: file object, with 'write' and 'mode'
stuff: str or bytes, stuff to be written
encoding: str, encoding of text
"""
binary_file = 'b' in file_obj.mode
string_stuff = isinstance(stuff, basestring)
binary_stuff = isinstance(stuff, bytes)
if not PY3:
file_obj.write(stuff)
elif binary_file and string_stuff:
file_obj.write(stuff.encode(encoding))
elif not binary_file and binary_stuff:
file_obj.write(stuff.decode(encoding))
else:
file_obj.write(stuff)
file_obj.flush()
return stuff
|
python
|
{
"resource": ""
}
|
q23342
|
unique_id
|
train
|
def unique_id(length=12, increment=0):
"""
Generate a decent looking alphanumeric unique identifier.
First 16 bits are time- incrementing, followed by randomness.
This function is used as a nicer looking alternative to:
>>> uuid.uuid4().hex
Follows the advice in:
https://eager.io/blog/how-long-does-an-id-need-to-be/
Parameters
------------
length: int, length of resulting identifier
increment: int, number to add to header uint16
useful if calling this function repeatedly
in a tight loop executing faster than time
can increment the header
Returns
------------
unique: str, unique alphanumeric identifier
"""
# head the identifier with 16 bits of time information
# this provides locality and reduces collision chances
head = np.array((increment + time.time() * 10) % 2**16,
dtype=np.uint16).tostring()
# get a bunch of random bytes
random = np.random.random(int(np.ceil(length / 5))).tostring()
# encode the time header and random information as base64
# replace + and / with spaces
unique = base64.b64encode(head + random,
b' ').decode('utf-8')
# remove spaces and cut to length
unique = unique.replace(' ', '')[:length]
return unique
|
python
|
{
"resource": ""
}
|
q23343
|
isclose
|
train
|
def isclose(a, b, atol):
"""
A replacement for np.isclose that does fewer checks
and validation and as a result is roughly 4x faster.
Note that this is used in tight loops, and as such
a and b MUST be np.ndarray, not list or "array-like"
Parameters
----------
a : np.ndarray
To be compared
b : np.ndarray
To be compared
atol : float
Acceptable distance between `a` and `b` to be "close"
Returns
-----------
close : np.ndarray, bool
Per- element closeness
"""
diff = a - b
close = np.logical_and(diff > -atol, diff < atol)
return close
|
python
|
{
"resource": ""
}
|
q23344
|
svg_to_path
|
train
|
def svg_to_path(file_obj, file_type=None):
"""
Load an SVG file into a Path2D object.
Parameters
-----------
file_obj : open file object
Contains SVG data
file_type: None
Not used
Returns
-----------
loaded : dict
With kwargs for Path2D constructor
"""
def element_transform(e, max_depth=100):
"""
Find a transformation matrix for an XML element.
"""
matrices = []
current = e
for i in range(max_depth):
if 'transform' in current.attrib:
mat = transform_to_matrices(current.attrib['transform'])
matrices.extend(mat)
# cached[current] = mat
current = current.getparent()
if current is None:
break
if len(matrices) == 0:
return np.eye(3)
elif len(matrices) == 1:
return matrices[0]
else:
return util.multi_dot(matrices[::-1])
# first parse the XML
xml = etree.fromstring(file_obj.read())
# store paths and transforms as
# (path string, 3x3 matrix)
paths = []
# store every path element
for element in xml.iter('{*}path'):
paths.append((element.attrib['d'],
element_transform(element)))
return _svg_path_convert(paths)
|
python
|
{
"resource": ""
}
|
q23345
|
transform_to_matrices
|
train
|
def transform_to_matrices(transform):
"""
Convert an SVG transform string to an array of matrices.
> transform = "rotate(-10 50 100)
translate(-36 45.5)
skewX(40)
scale(1 0.5)"
Parameters
-----------
transform : str
Contains transformation information in SVG form
Returns
-----------
matrices : (n, 3, 3) float
Multiple transformation matrices from input transform string
"""
# split the transform string in to components of:
# (operation, args) i.e. (translate, '-1.0, 2.0')
components = [
[j.strip() for j in i.strip().split('(') if len(j) > 0]
for i in transform.lower().split(')') if len(i) > 0]
# store each matrix without dotting
matrices = []
for line in components:
if len(line) == 0:
continue
elif len(line) != 2:
raise ValueError('should always have two components!')
key, args = line
# convert string args to array of floats
# support either comma or space delimiter
values = np.array([float(i) for i in
args.replace(',', ' ').split()])
if key == 'translate':
# convert translation to a (3, 3) homogenous matrix
matrices.append(np.eye(3))
matrices[-1][:2, 2] = values
elif key == 'matrix':
# [a b c d e f] ->
# [[a c e],
# [b d f],
# [0 0 1]]
matrices.append(np.vstack((
values.reshape((3, 2)).T, [0, 0, 1])))
elif key == 'rotate':
# SVG rotations are in degrees
angle = np.degrees(values[0])
# if there are three values rotate around point
if len(values) == 3:
point = values[1:]
else:
point = None
matrices.append(planar_matrix(theta=angle,
point=point))
elif key == 'scale':
# supports (x_scale, y_scale) or (scale)
mat = np.eye(3)
mat[:2, :2] *= values
matrices.append(mat)
else:
log.warning('unknown SVG transform: {}'.format(key))
return matrices
|
python
|
{
"resource": ""
}
|
q23346
|
_svg_path_convert
|
train
|
def _svg_path_convert(paths):
"""
Convert an SVG path string into a Path2D object
Parameters
-------------
paths: list of tuples
Containing (path string, (3,3) matrix)
Returns
-------------
drawing : dict
Kwargs for Path2D constructor
"""
def complex_to_float(values):
return np.array([[i.real, i.imag] for i in values])
def load_line(svg_line):
points = complex_to_float([svg_line.point(0.0),
svg_line.point(1.0)])
if starting:
# return every vertex and use it
return (entities_mod.Line(np.arange(2) + len(vertices)), points)
else:
# we are not starting so use the last referenced vertex as the
# start point
return (entities_mod.Line(
np.arange(2) + len(vertices) - 1), points[1:])
def load_arc(svg_arc):
points = complex_to_float([svg_arc.start,
svg_arc.point(.5),
svg_arc.end])
if starting:
# return every vertex and use it
return (entities_mod.Arc(np.arange(3) + len(vertices)), points)
else:
# we are not starting so use the last referenced vertex as the
# start point
return (entities_mod.Arc(np.arange(3) +
len(vertices) - 1), points[1:])
def load_quadratic(svg_quadratic):
points = complex_to_float([svg_quadratic.start,
svg_quadratic.control,
svg_quadratic.end])
if starting:
# return every vertex and use it
return (entities_mod.Bezier(np.arange(3) + len(vertices)), points)
else:
# we are not starting so use the last referenced vertex as the
# start point
return (entities_mod.Bezier(
np.arange(3) + len(vertices) - 1), points[1:])
def load_cubic(svg_cubic):
points = complex_to_float([svg_cubic.start,
svg_cubic.control1,
svg_cubic.control2,
svg_cubic.end])
if starting:
# return every vertex and use it
return (entities_mod.Bezier(np.arange(4) + len(vertices)), points)
else:
# we are not starting so use the last referenced vertex as the
# start point
return (entities_mod.Bezier(
np.arange(4) + len(vertices) - 1), points[1:])
# store loaded values here
entities = []
vertices = []
loaders = {'Arc': load_arc,
'Line': load_line,
'CubicBezier': load_cubic,
'QuadraticBezier': load_quadratic}
for path_string, matrix in paths:
starting = True
for svg_entity in parse_path(path_string):
type_name = svg_entity.__class__.__name__
if type_name in loaders:
e, v = loaders[type_name](svg_entity)
entities.append(e)
vertices.extend(transform_points(v, matrix))
# store results as kwargs
loaded = {'entities': np.array(entities),
'vertices': np.array(vertices)}
return loaded
|
python
|
{
"resource": ""
}
|
q23347
|
_orient3dfast
|
train
|
def _orient3dfast(plane, pd):
"""
Performs a fast 3D orientation test.
Parameters
----------
plane: (3,3) float, three points in space that define a plane
pd: (3,) float, a single point
Returns
-------
result: float, if greater than zero then pd is above the plane through
the given three points, if less than zero then pd is below
the given plane, and if equal to zero then pd is on the
given plane.
"""
pa, pb, pc = plane
adx = pa[0] - pd[0]
bdx = pb[0] - pd[0]
cdx = pc[0] - pd[0]
ady = pa[1] - pd[1]
bdy = pb[1] - pd[1]
cdy = pc[1] - pd[1]
adz = pa[2] - pd[2]
bdz = pb[2] - pd[2]
cdz = pc[2] - pd[2]
return (adx * (bdy * cdz - bdz * cdy)
+ bdx * (cdy * adz - cdz * ady)
+ cdx * (ady * bdz - adz * bdy))
|
python
|
{
"resource": ""
}
|
q23348
|
_compute_static_prob
|
train
|
def _compute_static_prob(tri, com):
"""
For an object with the given center of mass, compute
the probability that the given tri would be the first to hit the
ground if the object were dropped with a pose chosen uniformly at random.
Parameters
----------
tri: (3,3) float, the vertices of a triangle
cm: (3,) float, the center of mass of the object
Returns
-------
prob: float, the probability in [0,1] for the given triangle
"""
sv = [(v - com) / np.linalg.norm(v - com) for v in tri]
# Use L'Huilier's Formula to compute spherical area
a = np.arccos(min(1, max(-1, np.dot(sv[0], sv[1]))))
b = np.arccos(min(1, max(-1, np.dot(sv[1], sv[2]))))
c = np.arccos(min(1, max(-1, np.dot(sv[2], sv[0]))))
s = (a + b + c) / 2.0
# Prevents weirdness with arctan
try:
return 1.0 / np.pi * np.arctan(np.sqrt(np.tan(s / 2) * np.tan(
(s - a) / 2) * np.tan((s - b) / 2) * np.tan((s - c) / 2)))
except BaseException:
s = s + 1e-8
return 1.0 / np.pi * np.arctan(np.sqrt(np.tan(s / 2) * np.tan(
(s - a) / 2) * np.tan((s - b) / 2) * np.tan((s - c) / 2)))
|
python
|
{
"resource": ""
}
|
q23349
|
_create_topple_graph
|
train
|
def _create_topple_graph(cvh_mesh, com):
"""
Constructs a toppling digraph for the given convex hull mesh and
center of mass.
Each node n_i in the digraph corresponds to a face f_i of the mesh and is
labelled with the probability that the mesh will land on f_i if dropped
randomly. Not all faces are stable, and node n_i has a directed edge to
node n_j if the object will quasi-statically topple from f_i to f_j if it
lands on f_i initially.
This computation is described in detail in
http://goldberg.berkeley.edu/pubs/eps.pdf.
Parameters
----------
cvh_mesh : trimesh.Trimesh
Rhe convex hull of the target shape
com : (3,) float
The 3D location of the target shape's center of mass
Returns
-------
graph : networkx.DiGraph
Graph representing static probabilities and toppling
order for the convex hull
"""
adj_graph = nx.Graph()
topple_graph = nx.DiGraph()
# Create face adjacency graph
face_pairs = cvh_mesh.face_adjacency
edges = cvh_mesh.face_adjacency_edges
graph_edges = []
for fp, e in zip(face_pairs, edges):
verts = cvh_mesh.vertices[e]
graph_edges.append([fp[0], fp[1], {'verts': verts}])
adj_graph.add_edges_from(graph_edges)
# Compute static probabilities of landing on each face
for i, tri in enumerate(cvh_mesh.triangles):
prob = _compute_static_prob(tri, com)
topple_graph.add_node(i, prob=prob)
# Compute COM projections onto planes of each triangle in cvh_mesh
proj_dists = np.einsum('ij,ij->i', cvh_mesh.face_normals,
com - cvh_mesh.triangles[:, 0])
proj_coms = com - np.einsum('i,ij->ij', proj_dists, cvh_mesh.face_normals)
barys = points_to_barycentric(cvh_mesh.triangles, proj_coms)
unstable_face_indices = np.where(np.any(barys < 0, axis=1))[0]
# For each unstable face, compute the face it topples to
for fi in unstable_face_indices:
proj_com = proj_coms[fi]
centroid = cvh_mesh.triangles_center[fi]
norm = cvh_mesh.face_normals[fi]
for tfi in adj_graph[fi]:
v1, v2 = adj_graph[fi][tfi]['verts']
if np.dot(np.cross(v1 - centroid, v2 - centroid), norm) < 0:
tmp = v2
v2 = v1
v1 = tmp
plane1 = [centroid, v1, v1 + norm]
plane2 = [centroid, v2 + norm, v2]
if _orient3dfast(plane1, proj_com) >= 0 and _orient3dfast(
plane2, proj_com) >= 0:
break
topple_graph.add_edge(fi, tfi)
return topple_graph
|
python
|
{
"resource": ""
}
|
q23350
|
transform
|
train
|
def transform(mesh, translation_scale=1000.0):
"""
Return a permutated variant of a mesh by randomly reording faces
and rotatating + translating a mesh by a random matrix.
Parameters
----------
mesh: Trimesh object (input will not be altered by this function)
Returns
----------
permutated: Trimesh object, same faces as input mesh but
rotated and reordered.
"""
matrix = transformations.random_rotation_matrix()
matrix[0:3, 3] = np.random.random(3) * translation_scale
triangles = np.random.permutation(mesh.triangles).reshape((-1, 3))
triangles = transformations.transform_points(triangles, matrix)
mesh_type = util.type_named(mesh, 'Trimesh')
permutated = mesh_type(
**triangles_module.to_kwargs(triangles.reshape((-1, 3, 3))))
return permutated
|
python
|
{
"resource": ""
}
|
q23351
|
noise
|
train
|
def noise(mesh, magnitude=None):
"""
Add gaussian noise to every vertex of a mesh.
Makes no effort to maintain topology or sanity.
Parameters
----------
mesh: Trimesh object (will not be mutated)
magnitude: float, what is the maximum distance per axis we can displace a vertex.
Default value is mesh.scale/100.0
Returns
----------
permutated: Trimesh object, input mesh with noise applied
"""
if magnitude is None:
magnitude = mesh.scale / 100.0
random = (np.random.random(mesh.vertices.shape) - .5) * magnitude
vertices_noise = mesh.vertices.copy() + random
# make sure we've re- ordered faces randomly
triangles = np.random.permutation(vertices_noise[mesh.faces])
mesh_type = util.type_named(mesh, 'Trimesh')
permutated = mesh_type(**triangles_module.to_kwargs(triangles))
return permutated
|
python
|
{
"resource": ""
}
|
q23352
|
tessellation
|
train
|
def tessellation(mesh):
"""
Subdivide each face of a mesh into three faces with the new vertex
randomly placed inside the old face.
This produces a mesh with exactly the same surface area and volume
but with different tessellation.
Parameters
----------
mesh: Trimesh object
Returns
----------
permutated: Trimesh object with remeshed facets
"""
# create random barycentric coordinates for each face
# pad all coordinates by a small amount to bias new vertex towards center
barycentric = np.random.random(mesh.faces.shape) + .05
barycentric /= barycentric.sum(axis=1).reshape((-1, 1))
# create one new vertex somewhere in a face
vertex_face = (barycentric.reshape((-1, 3, 1))
* mesh.triangles).sum(axis=1)
vertex_face_id = np.arange(len(vertex_face)) + len(mesh.vertices)
# new vertices are the old vertices stacked on the vertices in the faces
vertices = np.vstack((mesh.vertices, vertex_face))
# there are three new faces per old face, and we maintain correct winding
faces = np.vstack((np.column_stack((mesh.faces[:, [0, 1]], vertex_face_id)),
np.column_stack(
(mesh.faces[:, [1, 2]], vertex_face_id)),
np.column_stack((mesh.faces[:, [2, 0]], vertex_face_id))))
# make sure the order of the faces is permutated
faces = np.random.permutation(faces)
mesh_type = util.type_named(mesh, 'Trimesh')
permutated = mesh_type(vertices=vertices,
faces=faces)
return permutated
|
python
|
{
"resource": ""
}
|
q23353
|
load_ply
|
train
|
def load_ply(file_obj,
resolver=None,
fix_texture=True,
*args,
**kwargs):
"""
Load a PLY file from an open file object.
Parameters
---------
file_obj : an open file- like object
Source data, ASCII or binary PLY
resolver : trimesh.visual.resolvers.Resolver
Object which can resolve assets
fix_texture : bool
If True, will re- index vertices and faces
so vertices with different UV coordinates
are disconnected.
Returns
---------
mesh_kwargs : dict
Data which can be passed to
Trimesh constructor, eg: a = Trimesh(**mesh_kwargs)
"""
# OrderedDict which is populated from the header
elements, is_ascii, image_name = parse_header(file_obj)
# functions will fill in elements from file_obj
if is_ascii:
ply_ascii(elements, file_obj)
else:
ply_binary(elements, file_obj)
# try to load the referenced image
image = None
if image_name is not None:
try:
data = resolver.get(image_name)
image = PIL.Image.open(util.wrap_as_stream(data))
except BaseException:
log.warning('unable to load image!',
exc_info=True)
kwargs = elements_to_kwargs(elements,
fix_texture=fix_texture,
image=image)
return kwargs
|
python
|
{
"resource": ""
}
|
q23354
|
export_ply
|
train
|
def export_ply(mesh,
encoding='binary',
vertex_normal=None):
"""
Export a mesh in the PLY format.
Parameters
----------
mesh : Trimesh object
encoding : ['ascii'|'binary_little_endian']
vertex_normal : include vertex normals
Returns
----------
export : bytes of result
"""
# evaluate input args
# allow a shortcut for binary
if encoding == 'binary':
encoding = 'binary_little_endian'
elif encoding not in ['binary_little_endian', 'ascii']:
raise ValueError('encoding must be binary or ascii')
# if vertex normals aren't specifically asked for
# only export them if they are stored in cache
if vertex_normal is None:
vertex_normal = 'vertex_normal' in mesh._cache
# custom numpy dtypes for exporting
dtype_face = [('count', '<u1'),
('index', '<i4', (3))]
dtype_vertex = [('vertex', '<f4', (3))]
# will be appended to main dtype if needed
dtype_vertex_normal = ('normals', '<f4', (3))
dtype_color = ('rgba', '<u1', (4))
# get template strings in dict
templates = json.loads(get_resource('ply.template'))
# start collecting elements into a string for the header
header = templates['intro']
header += templates['vertex']
# if we're exporting vertex normals add them
# to the header and dtype
if vertex_normal:
header += templates['vertex_normal']
dtype_vertex.append(dtype_vertex_normal)
# if mesh has a vertex coloradd it to the header
if mesh.visual.kind == 'vertex' and encoding != 'ascii':
header += templates['color']
dtype_vertex.append(dtype_color)
# create and populate the custom dtype for vertices
vertex = np.zeros(len(mesh.vertices),
dtype=dtype_vertex)
vertex['vertex'] = mesh.vertices
if vertex_normal:
vertex['normals'] = mesh.vertex_normals
if mesh.visual.kind == 'vertex':
vertex['rgba'] = mesh.visual.vertex_colors
header += templates['face']
if mesh.visual.kind == 'face' and encoding != 'ascii':
header += templates['color']
dtype_face.append(dtype_color)
# put mesh face data into custom dtype to export
faces = np.zeros(len(mesh.faces), dtype=dtype_face)
faces['count'] = 3
faces['index'] = mesh.faces
if mesh.visual.kind == 'face' and encoding != 'ascii':
faces['rgba'] = mesh.visual.face_colors
header += templates['outro']
header_params = {'vertex_count': len(mesh.vertices),
'face_count': len(mesh.faces),
'encoding': encoding}
export = Template(header).substitute(header_params).encode('utf-8')
if encoding == 'binary_little_endian':
export += vertex.tostring()
export += faces.tostring()
elif encoding == 'ascii':
# ply format is: (face count, v0, v1, v2)
fstack = np.column_stack((np.ones(len(mesh.faces),
dtype=np.int64) * 3,
mesh.faces))
# if we're exporting vertex normals they get stacked
if vertex_normal:
vstack = np.column_stack((mesh.vertices,
mesh.vertex_normals))
else:
vstack = mesh.vertices
# add the string formatted vertices and faces
export += (util.array_to_string(vstack,
col_delim=' ',
row_delim='\n') +
'\n' +
util.array_to_string(fstack,
col_delim=' ',
row_delim='\n')).encode('utf-8')
else:
raise ValueError('encoding must be ascii or binary!')
return export
|
python
|
{
"resource": ""
}
|
q23355
|
parse_header
|
train
|
def parse_header(file_obj):
"""
Read the ASCII header of a PLY file, and leave the file object
at the position of the start of data but past the header.
Parameters
-----------
file_obj : open file object
Positioned at the start of the file
Returns
-----------
elements : collections.OrderedDict
Fields and data types populated
is_ascii : bool
Whether the data is ASCII or binary
image_name : None or str
File name of TextureFile
"""
if 'ply' not in str(file_obj.readline()):
raise ValueError('not a ply file!')
# collect the encoding: binary or ASCII
encoding = file_obj.readline().decode('utf-8').strip().lower()
is_ascii = 'ascii' in encoding
# big or little endian
endian = ['<', '>'][int('big' in encoding)]
elements = collections.OrderedDict()
# store file name of TextureFiles in the header
image_name = None
while True:
line = file_obj.readline()
if line is None:
raise ValueError("Header not terminated properly!")
line = line.decode('utf-8').strip().split()
# we're done
if 'end_header' in line:
break
# elements are groups of properties
if 'element' in line[0]:
# we got a new element so add it
name, length = line[1:]
elements[name] = {
'length': int(length),
'properties': collections.OrderedDict()}
# a property is a member of an element
elif 'property' in line[0]:
# is the property a simple single value, like:
# `propert float x`
if len(line) == 3:
dtype, field = line[1:]
elements[name]['properties'][
str(field)] = endian + dtypes[dtype]
# is the property a painful list, like:
# `property list uchar int vertex_indices`
elif 'list' in line[1]:
dtype_count, dtype, field = line[2:]
elements[name]['properties'][
str(field)] = (
endian +
dtypes[dtype_count] +
', ($LIST,)' +
endian +
dtypes[dtype])
# referenced as a file name
elif 'TextureFile' in line:
# textures come listed like:
# `comment TextureFile fuze_uv.jpg`
index = line.index('TextureFile') + 1
if index < len(line):
image_name = line[index]
return elements, is_ascii, image_name
|
python
|
{
"resource": ""
}
|
q23356
|
ply_ascii
|
train
|
def ply_ascii(elements, file_obj):
"""
Load data from an ASCII PLY file into an existing elements data structure.
Parameters
------------
elements: OrderedDict object, populated from the file header.
object will be modified to add data by this function.
file_obj: open file object, with current position at the start
of the data section (past the header)
"""
# get the file contents as a string
text = str(file_obj.read().decode('utf-8'))
# split by newlines
lines = str.splitlines(text)
# get each line as an array split by whitespace
array = np.array([np.fromstring(i, sep=' ')
for i in lines])
# store the line position in the file
position = 0
# loop through data we need
for key, values in elements.items():
# will store (start, end) column index of data
columns = collections.deque()
# will store the total number of rows
rows = 0
for name, dtype in values['properties'].items():
if '$LIST' in dtype:
# if an element contains a list property handle it here
row = array[position]
list_count = int(row[rows])
# ignore the count and take the data
columns.append([rows + 1,
rows + 1 + list_count])
rows += list_count + 1
# change the datatype to just the dtype for data
values['properties'][name] = dtype.split('($LIST,)')[-1]
else:
# a single column data field
columns.append([rows, rows + 1])
rows += 1
# get the lines as a 2D numpy array
data = np.vstack(array[position:position + values['length']])
# offset position in file
position += values['length']
# store columns we care about by name and convert to data type
elements[key]['data'] = {n: data[:, c[0]:c[1]].astype(dt)
for n, dt, c in zip(
values['properties'].keys(), # field name
values['properties'].values(), # data type of field
columns)}
|
python
|
{
"resource": ""
}
|
q23357
|
ply_binary
|
train
|
def ply_binary(elements, file_obj):
"""
Load the data from a binary PLY file into the elements data structure.
Parameters
------------
elements: OrderedDict object, populated from the file header.
object will be modified to add data by this function.
file_obj: open file object, with current position at the start
of the data section (past the header)
"""
def populate_listsize(file_obj, elements):
"""
Given a set of elements populated from the header if there are any
list properties seek in the file the length of the list.
Note that if you have a list where each instance is different length
(if for example you mixed triangles and quads) this won't work at all
"""
p_start = file_obj.tell()
p_current = file_obj.tell()
for element_key, element in elements.items():
props = element['properties']
prior_data = ''
for k, dtype in props.items():
if '$LIST' in dtype:
# every list field has two data types:
# the list length (single value), and the list data (multiple)
# here we are only reading the single value for list length
field_dtype = np.dtype(dtype.split(',')[0])
if len(prior_data) == 0:
offset = 0
else:
offset = np.dtype(prior_data).itemsize
file_obj.seek(p_current + offset)
size = np.frombuffer(file_obj.read(field_dtype.itemsize),
dtype=field_dtype)[0]
props[k] = props[k].replace('$LIST', str(size))
prior_data += props[k] + ','
itemsize = np.dtype(', '.join(props.values())).itemsize
p_current += element['length'] * itemsize
file_obj.seek(p_start)
def populate_data(file_obj, elements):
"""
Given the data type and field information from the header,
read the data and add it to a 'data' field in the element.
"""
for key in elements.keys():
items = list(elements[key]['properties'].items())
dtype = np.dtype(items)
data = file_obj.read(elements[key]['length'] * dtype.itemsize)
elements[key]['data'] = np.frombuffer(data,
dtype=dtype)
return elements
def elements_size(elements):
"""
Given an elements data structure populated from the header,
calculate how long the file should be if it is intact.
"""
size = 0
for element in elements.values():
dtype = np.dtype(','.join(element['properties'].values()))
size += element['length'] * dtype.itemsize
return size
# some elements are passed where the list dimensions
# are not included in the header, so this function goes
# into the meat of the file and grabs the list dimensions
# before we to the main data read as a single operation
populate_listsize(file_obj, elements)
# how many bytes are left in the file
size_file = util.distance_to_end(file_obj)
# how many bytes should the data structure described by
# the header take up
size_elements = elements_size(elements)
# if the number of bytes is not the same the file is probably corrupt
if size_file != size_elements:
raise ValueError('File is unexpected length!')
# with everything populated and a reasonable confidence the file
# is intact, read the data fields described by the header
populate_data(file_obj, elements)
|
python
|
{
"resource": ""
}
|
q23358
|
export_draco
|
train
|
def export_draco(mesh):
"""
Export a mesh using Google's Draco compressed format.
Only works if draco_encoder is in your PATH:
https://github.com/google/draco
Parameters
----------
mesh : Trimesh object
Returns
----------
data : str or bytes
DRC file bytes
"""
with tempfile.NamedTemporaryFile(suffix='.ply') as temp_ply:
temp_ply.write(export_ply(mesh))
temp_ply.flush()
with tempfile.NamedTemporaryFile(suffix='.drc') as encoded:
subprocess.check_output([draco_encoder,
'-qp', # bits of quantization for position
'28', # since our tol.merge is 1e-8, 25 bits
# more has a machine epsilon
# smaller than that
'-i',
temp_ply.name,
'-o',
encoded.name])
encoded.seek(0)
data = encoded.read()
return data
|
python
|
{
"resource": ""
}
|
q23359
|
load_draco
|
train
|
def load_draco(file_obj, **kwargs):
"""
Load a mesh from Google's Draco format.
Parameters
----------
file_obj : file- like object
Contains data
Returns
----------
kwargs : dict
Keyword arguments to construct a Trimesh object
"""
with tempfile.NamedTemporaryFile(suffix='.drc') as temp_drc:
temp_drc.write(file_obj.read())
temp_drc.flush()
with tempfile.NamedTemporaryFile(suffix='.ply') as temp_ply:
subprocess.check_output([draco_decoder,
'-i',
temp_drc.name,
'-o',
temp_ply.name])
temp_ply.seek(0)
kwargs = load_ply(temp_ply)
return kwargs
|
python
|
{
"resource": ""
}
|
q23360
|
boolean_automatic
|
train
|
def boolean_automatic(meshes, operation):
"""
Automatically pick an engine for booleans based on availability.
Parameters
--------------
meshes : list of Trimesh
Meshes to be booleaned
operation : str
Type of boolean, i.e. 'union', 'intersection', 'difference'
Returns
---------------
result : trimesh.Trimesh
Result of boolean operation
"""
if interfaces.blender.exists:
result = interfaces.blender.boolean(meshes, operation)
elif interfaces.scad.exists:
result = interfaces.scad.boolean(meshes, operation)
else:
raise ValueError('No backends available for boolean operations!')
return result
|
python
|
{
"resource": ""
}
|
q23361
|
subdivide
|
train
|
def subdivide(vertices,
faces,
face_index=None):
"""
Subdivide a mesh into smaller triangles.
Note that if `face_index` is passed, only those faces will
be subdivided and their neighbors won't be modified making
the mesh no longer "watertight."
Parameters
----------
vertices : (n, 3) float
Vertices in space
faces : (n, 3) int
Indexes of vertices which make up triangular faces
face_index : faces to subdivide.
if None: all faces of mesh will be subdivided
if (n,) int array of indices: only specified faces
Returns
----------
new_vertices : (n, 3) float
Vertices in space
new_faces : (n, 3) int
Remeshed faces
"""
if face_index is None:
face_index = np.arange(len(faces))
else:
face_index = np.asanyarray(face_index)
# the (c,3) int set of vertex indices
faces = faces[face_index]
# the (c, 3, 3) float set of points in the triangles
triangles = vertices[faces]
# the 3 midpoints of each triangle edge
# stacked to a (3 * c, 3) float
mid = np.vstack([triangles[:, g, :].mean(axis=1)
for g in [[0, 1],
[1, 2],
[2, 0]]])
# for adjacent faces we are going to be generating
# the same midpoint twice so merge them here
mid_idx = (np.arange(len(face_index) * 3)).reshape((3, -1)).T
unique, inverse = grouping.unique_rows(mid)
mid = mid[unique]
mid_idx = inverse[mid_idx] + len(vertices)
# the new faces with correct winding
f = np.column_stack([faces[:, 0],
mid_idx[:, 0],
mid_idx[:, 2],
mid_idx[:, 0],
faces[:, 1],
mid_idx[:, 1],
mid_idx[:, 2],
mid_idx[:, 1],
faces[:, 2],
mid_idx[:, 0],
mid_idx[:, 1],
mid_idx[:, 2]]).reshape((-1, 3))
# add the 3 new faces per old face
new_faces = np.vstack((faces, f[len(face_index):]))
# replace the old face with a smaller face
new_faces[face_index] = f[:len(face_index)]
new_vertices = np.vstack((vertices, mid))
return new_vertices, new_faces
|
python
|
{
"resource": ""
}
|
q23362
|
subdivide_to_size
|
train
|
def subdivide_to_size(vertices,
faces,
max_edge,
max_iter=10):
"""
Subdivide a mesh until every edge is shorter than a
specified length.
Will return a triangle soup, not a nicely structured mesh.
Parameters
------------
vertices : (n, 3) float
Vertices in space
faces : (m, 3) int
Indices of vertices which make up triangles
max_edge : float
Maximum length of any edge in the result
max_iter : int
The maximum number of times to run subdivision
Returns
------------
vertices : (j, 3) float
Vertices in space
faces : (q, 3) int
Indices of vertices
"""
# store completed
done_face = []
done_vert = []
# copy inputs and make sure dtype is correct
current_faces = np.array(faces,
dtype=np.int64,
copy=True)
current_vertices = np.array(vertices,
dtype=np.float64,
copy=True)
# loop through iteration cap
for i in range(max_iter + 1):
# (n, 3, 3) float triangle soup
triangles = current_vertices[current_faces]
# compute the length of every triangle edge
edge_lengths = (np.diff(triangles[:, [0, 1, 2, 0]],
axis=1) ** 2).sum(axis=2) ** .5
too_long = (edge_lengths > max_edge).any(axis=1)
# clean up the faces a little bit so we don't
# store a ton of unused vertices
unique, inverse = np.unique(
current_faces[np.logical_not(too_long)],
return_inverse=True)
# store vertices and faces meeting criteria
done_vert.append(current_vertices[unique])
done_face.append(inverse.reshape((-1, 3)))
# met our goals so abort
if not too_long.any():
break
# run subdivision again
(current_vertices,
current_faces) = subdivide(current_vertices,
current_faces[too_long])
# stack sequence into nice (n, 3) arrays
vertices, faces = util.append_faces(done_vert,
done_face)
return vertices, faces
|
python
|
{
"resource": ""
}
|
q23363
|
load_dwg
|
train
|
def load_dwg(file_obj, **kwargs):
"""
Load DWG files by converting them to DXF files using
TeighaFileConverter.
Parameters
-------------
file_obj : file- like object
Returns
-------------
loaded : dict
kwargs for a Path2D constructor
"""
# read the DWG data into a bytes object
data = file_obj.read()
# convert data into R14 ASCII DXF
converted = _teigha_convert(data)
# load into kwargs for Path2D constructor
result = load_dxf(util.wrap_as_stream(converted))
return result
|
python
|
{
"resource": ""
}
|
q23364
|
cross
|
train
|
def cross(triangles):
"""
Returns the cross product of two edges from input triangles
Parameters
--------------
triangles: (n, 3, 3) float
Vertices of triangles
Returns
--------------
crosses : (n, 3) float
Cross product of two edge vectors
"""
vectors = np.diff(triangles, axis=1)
crosses = np.cross(vectors[:, 0], vectors[:, 1])
return crosses
|
python
|
{
"resource": ""
}
|
q23365
|
area
|
train
|
def area(triangles=None, crosses=None, sum=False):
"""
Calculates the sum area of input triangles
Parameters
----------
triangles : (n, 3, 3) float
Vertices of triangles
crosses : (n, 3) float or None
As a speedup don't re- compute cross products
sum : bool
Return summed area or individual triangle area
Returns
----------
area : (n,) float or float
Individual or summed area depending on `sum` argument
"""
if crosses is None:
crosses = cross(triangles)
area = (np.sum(crosses**2, axis=1)**.5) * .5
if sum:
return np.sum(area)
return area
|
python
|
{
"resource": ""
}
|
q23366
|
normals
|
train
|
def normals(triangles=None, crosses=None):
"""
Calculates the normals of input triangles
Parameters
------------
triangles : (n, 3, 3) float
Vertex positions
crosses : (n, 3) float
Cross products of edge vectors
Returns
------------
normals : (m, 3) float
Normal vectors
valid : (n,) bool
Was the face nonzero area or not
"""
if crosses is None:
crosses = cross(triangles)
# unitize the cross product vectors
unit, valid = util.unitize(crosses, check_valid=True)
return unit, valid
|
python
|
{
"resource": ""
}
|
q23367
|
angles
|
train
|
def angles(triangles):
"""
Calculates the angles of input triangles.
Parameters
------------
triangles : (n, 3, 3) float
Vertex positions
Returns
------------
angles : (n, 3) float
Angles at vertex positions, in radians
"""
# get a vector for each edge of the triangle
u = triangles[:, 1] - triangles[:, 0]
v = triangles[:, 2] - triangles[:, 0]
w = triangles[:, 2] - triangles[:, 1]
# normalize each vector in place
u /= np.linalg.norm(u, axis=1, keepdims=True)
v /= np.linalg.norm(v, axis=1, keepdims=True)
w /= np.linalg.norm(w, axis=1, keepdims=True)
# run the cosine and an einsum that definitely does something
a = np.arccos(np.clip(np.einsum('ij, ij->i', u, v), -1, 1))
b = np.arccos(np.clip(np.einsum('ij, ij->i', -u, w), -1, 1))
c = np.pi - a - b
return np.column_stack([a, b, c])
|
python
|
{
"resource": ""
}
|
q23368
|
all_coplanar
|
train
|
def all_coplanar(triangles):
"""
Check to see if a list of triangles are all coplanar
Parameters
----------------
triangles: (n, 3, 3) float
Vertices of triangles
Returns
---------------
all_coplanar : bool
True if all triangles are coplanar
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
test_normal = normals(triangles)[0]
test_vertex = triangles[0][0]
distances = point_plane_distance(points=triangles[1:].reshape((-1, 3)),
plane_normal=test_normal,
plane_origin=test_vertex)
all_coplanar = np.all(np.abs(distances) < tol.zero)
return all_coplanar
|
python
|
{
"resource": ""
}
|
q23369
|
windings_aligned
|
train
|
def windings_aligned(triangles, normals_compare):
"""
Given a list of triangles and a list of normals determine if the
two are aligned
Parameters
----------
triangles : (n, 3, 3) float
Vertex locations in space
normals_compare : (n, 3) float
List of normals to compare
Returns
----------
aligned : (n,) bool
Are normals aligned with triangles
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
calculated, valid = normals(triangles)
difference = util.diagonal_dot(calculated,
normals_compare[valid])
aligned = np.zeros(len(triangles), dtype=np.bool)
aligned[valid] = difference > 0.0
return aligned
|
python
|
{
"resource": ""
}
|
q23370
|
bounds_tree
|
train
|
def bounds_tree(triangles):
"""
Given a list of triangles, create an r-tree for broad- phase
collision detection
Parameters
---------
triangles : (n, 3, 3) float
Triangles in space
Returns
---------
tree : rtree.Rtree
One node per triangle
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
# the (n,6) interleaved bounding box for every triangle
triangle_bounds = np.column_stack((triangles.min(axis=1),
triangles.max(axis=1)))
tree = util.bounds_tree(triangle_bounds)
return tree
|
python
|
{
"resource": ""
}
|
q23371
|
nondegenerate
|
train
|
def nondegenerate(triangles, areas=None, height=None):
"""
Find all triangles which have an oriented bounding box
where both of the two sides is larger than a specified height.
Degenerate triangles can be when:
1) Two of the three vertices are colocated
2) All three vertices are unique but colinear
Parameters
----------
triangles : (n, 3, 3) float
Triangles in space
height : float
Minimum edge length of a triangle to keep
Returns
----------
nondegenerate : (n,) bool
True if a triangle meets required minimum height
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
if height is None:
height = tol.merge
# if both edges of the triangles OBB are longer than tol.merge
# we declare them to be nondegenerate
ok = (extents(triangles=triangles,
areas=areas) > height).all(axis=1)
return ok
|
python
|
{
"resource": ""
}
|
q23372
|
extents
|
train
|
def extents(triangles, areas=None):
"""
Return the 2D bounding box size of each triangle.
Parameters
----------
triangles : (n, 3, 3) float
Triangles in space
areas : (n,) float
Optional area of input triangles
Returns
----------
box : (n, 2) float
The size of each triangle's 2D oriented bounding box
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
if areas is None:
areas = area(triangles=triangles,
sum=False)
# the edge vectors which define the triangle
a = triangles[:, 1] - triangles[:, 0]
b = triangles[:, 2] - triangles[:, 0]
# length of the edge vectors
length_a = (a**2).sum(axis=1)**.5
length_b = (b**2).sum(axis=1)**.5
# which edges are acceptable length
nonzero_a = length_a > tol.merge
nonzero_b = length_b > tol.merge
# find the two heights of the triangle
# essentially this is the side length of an
# oriented bounding box, per triangle
box = np.zeros((len(triangles), 2), dtype=np.float64)
box[:, 0][nonzero_a] = (areas[nonzero_a] * 2) / length_a[nonzero_a]
box[:, 1][nonzero_b] = (areas[nonzero_b] * 2) / length_b[nonzero_b]
return box
|
python
|
{
"resource": ""
}
|
q23373
|
barycentric_to_points
|
train
|
def barycentric_to_points(triangles, barycentric):
"""
Convert a list of barycentric coordinates on a list of triangles
to cartesian points.
Parameters
------------
triangles : (n, 3, 3) float
Triangles in space
barycentric : (n, 2) float
Barycentric coordinates
Returns
-----------
points : (m, 3) float
Points in space
"""
barycentric = np.asanyarray(barycentric, dtype=np.float64)
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
if barycentric.shape == (2,):
barycentric = np.ones((len(triangles), 2),
dtype=np.float64) * barycentric
if util.is_shape(barycentric, (len(triangles), 2)):
barycentric = np.column_stack((barycentric,
1.0 - barycentric.sum(axis=1)))
elif not util.is_shape(barycentric, (len(triangles), 3)):
raise ValueError('Barycentric shape incorrect!')
barycentric /= barycentric.sum(axis=1).reshape((-1, 1))
points = (triangles * barycentric.reshape((-1, 3, 1))).sum(axis=1)
return points
|
python
|
{
"resource": ""
}
|
q23374
|
points_to_barycentric
|
train
|
def points_to_barycentric(triangles,
points,
method='cramer'):
"""
Find the barycentric coordinates of points relative to triangles.
The Cramer's rule solution implements:
http://blackpawn.com/texts/pointinpoly
The cross product solution implements:
https://www.cs.ubc.ca/~heidrich/Papers/JGT.05.pdf
Parameters
-----------
triangles : (n, 3, 3) float
Triangles vertices in space
points : (n, 3) float
Point in space associated with a triangle
method : str
Which method to compute the barycentric coordinates with:
- 'cross': uses a method using cross products, roughly 2x slower but
different numerical robustness properties
- anything else: uses a cramer's rule solution
Returns
-----------
barycentric : (n, 3) float
Barycentric coordinates of each point
"""
def method_cross():
n = np.cross(edge_vectors[:, 0], edge_vectors[:, 1])
denominator = util.diagonal_dot(n, n)
barycentric = np.zeros((len(triangles), 3), dtype=np.float64)
barycentric[:, 2] = util.diagonal_dot(
np.cross(edge_vectors[:, 0], w), n) / denominator
barycentric[:, 1] = util.diagonal_dot(
np.cross(w, edge_vectors[:, 1]), n) / denominator
barycentric[:, 0] = 1 - barycentric[:, 1] - barycentric[:, 2]
return barycentric
def method_cramer():
dot00 = util.diagonal_dot(edge_vectors[:, 0], edge_vectors[:, 0])
dot01 = util.diagonal_dot(edge_vectors[:, 0], edge_vectors[:, 1])
dot02 = util.diagonal_dot(edge_vectors[:, 0], w)
dot11 = util.diagonal_dot(edge_vectors[:, 1], edge_vectors[:, 1])
dot12 = util.diagonal_dot(edge_vectors[:, 1], w)
inverse_denominator = 1.0 / (dot00 * dot11 - dot01 * dot01)
barycentric = np.zeros((len(triangles), 3), dtype=np.float64)
barycentric[:, 2] = (dot00 * dot12 - dot01 *
dot02) * inverse_denominator
barycentric[:, 1] = (dot11 * dot02 - dot01 *
dot12) * inverse_denominator
barycentric[:, 0] = 1 - barycentric[:, 1] - barycentric[:, 2]
return barycentric
# establish that input triangles and points are sane
triangles = np.asanyarray(triangles, dtype=np.float64)
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('triangles shape incorrect')
if not util.is_shape(points, (len(triangles), 3)):
raise ValueError('triangles and points must correspond')
edge_vectors = triangles[:, 1:] - triangles[:, :1]
w = points - triangles[:, 0].reshape((-1, 3))
if method == 'cross':
return method_cross()
return method_cramer()
|
python
|
{
"resource": ""
}
|
q23375
|
to_kwargs
|
train
|
def to_kwargs(triangles):
"""
Convert a list of triangles to the kwargs for the Trimesh
constructor.
Parameters
---------
triangles : (n, 3, 3) float
Triangles in space
Returns
---------
kwargs : dict
Keyword arguments for the trimesh.Trimesh constructor
Includes keys 'vertices' and 'faces'
Examples
---------
>>> mesh = trimesh.Trimesh(**trimesh.triangles.to_kwargs(triangles))
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
vertices = triangles.reshape((-1, 3))
faces = np.arange(len(vertices)).reshape((-1, 3))
kwargs = {'vertices': vertices,
'faces': faces}
return kwargs
|
python
|
{
"resource": ""
}
|
q23376
|
_Primitive.copy
|
train
|
def copy(self):
"""
Return a copy of the Primitive object.
"""
result = copy.deepcopy(self)
result._cache.clear()
return result
|
python
|
{
"resource": ""
}
|
q23377
|
_Primitive.to_mesh
|
train
|
def to_mesh(self):
"""
Return a copy of the Primitive object as a Trimesh object.
"""
result = Trimesh(vertices=self.vertices.copy(),
faces=self.faces.copy(),
face_normals=self.face_normals.copy(),
process=False)
return result
|
python
|
{
"resource": ""
}
|
q23378
|
Cylinder.volume
|
train
|
def volume(self):
"""
The analytic volume of the cylinder primitive.
Returns
---------
volume : float
Volume of the cylinder
"""
volume = ((np.pi * self.primitive.radius ** 2) *
self.primitive.height)
return volume
|
python
|
{
"resource": ""
}
|
q23379
|
Cylinder.moment_inertia
|
train
|
def moment_inertia(self):
"""
The analytic inertia tensor of the cylinder primitive.
Returns
----------
tensor: (3,3) float, 3D inertia tensor
"""
tensor = inertia.cylinder_inertia(
mass=self.volume,
radius=self.primitive.radius,
height=self.primitive.height,
transform=self.primitive.transform)
return tensor
|
python
|
{
"resource": ""
}
|
q23380
|
Cylinder.segment
|
train
|
def segment(self):
"""
A line segment which if inflated by cylinder radius
would represent the cylinder primitive.
Returns
-------------
segment : (2, 3) float
Points representing a single line segment
"""
# half the height
half = self.primitive.height / 2.0
# apply the transform to the Z- aligned segment
points = np.dot(
self.primitive.transform,
np.transpose([[0, 0, -half, 1], [0, 0, half, 1]])).T[:, :3]
return points
|
python
|
{
"resource": ""
}
|
q23381
|
Sphere.apply_transform
|
train
|
def apply_transform(self, matrix):
"""
Apply a transform to the sphere primitive
Parameters
------------
matrix: (4,4) float, homogenous transformation
"""
matrix = np.asanyarray(matrix, dtype=np.float64)
if matrix.shape != (4, 4):
raise ValueError('shape must be 4,4')
center = np.dot(matrix,
np.append(self.primitive.center, 1.0))[:3]
self.primitive.center = center
|
python
|
{
"resource": ""
}
|
q23382
|
Sphere.moment_inertia
|
train
|
def moment_inertia(self):
"""
The analytic inertia tensor of the sphere primitive.
Returns
----------
tensor: (3,3) float, 3D inertia tensor
"""
tensor = inertia.sphere_inertia(mass=self.volume,
radius=self.primitive.radius)
return tensor
|
python
|
{
"resource": ""
}
|
q23383
|
Box.sample_volume
|
train
|
def sample_volume(self, count):
"""
Return random samples from inside the volume of the box.
Parameters
-------------
count : int
Number of samples to return
Returns
----------
samples : (count, 3) float
Points inside the volume
"""
samples = sample.volume_rectangular(
extents=self.primitive.extents,
count=count,
transform=self.primitive.transform)
return samples
|
python
|
{
"resource": ""
}
|
q23384
|
Box.sample_grid
|
train
|
def sample_grid(self, count=None, step=None):
"""
Return a 3D grid which is contained by the box.
Samples are either 'step' distance apart, or there are
'count' samples per box side.
Parameters
-----------
count : int or (3,) int
If specified samples are spaced with np.linspace
step : float or (3,) float
If specified samples are spaced with np.arange
Returns
-----------
grid : (n, 3) float
Points inside the box
"""
if (count is not None and
step is not None):
raise ValueError('only step OR count can be specified!')
# create pre- transform bounds from extents
bounds = np.array([-self.primitive.extents,
self.primitive.extents]) * .5
if step is not None:
grid = util.grid_arange(bounds, step=step)
elif count is not None:
grid = util.grid_linspace(bounds, count=count)
else:
raise ValueError('either count or step must be specified!')
transformed = transformations.transform_points(
grid, matrix=self.primitive.transform)
return transformed
|
python
|
{
"resource": ""
}
|
q23385
|
Box.is_oriented
|
train
|
def is_oriented(self):
"""
Returns whether or not the current box is rotated at all.
"""
if util.is_shape(self.primitive.transform, (4, 4)):
return not np.allclose(self.primitive.transform[
0:3, 0:3], np.eye(3))
else:
return False
|
python
|
{
"resource": ""
}
|
q23386
|
Box.volume
|
train
|
def volume(self):
"""
Volume of the box Primitive.
Returns
--------
volume: float, volume of box
"""
volume = float(np.product(self.primitive.extents))
return volume
|
python
|
{
"resource": ""
}
|
q23387
|
Extrusion.area
|
train
|
def area(self):
"""
The surface area of the primitive extrusion.
Calculated from polygon and height to avoid mesh creation.
Returns
----------
area: float, surface area of 3D extrusion
"""
# area of the sides of the extrusion
area = abs(self.primitive.height *
self.primitive.polygon.length)
# area of the two caps of the extrusion
area += self.primitive.polygon.area * 2
return area
|
python
|
{
"resource": ""
}
|
q23388
|
Extrusion.volume
|
train
|
def volume(self):
"""
The volume of the primitive extrusion.
Calculated from polygon and height to avoid mesh creation.
Returns
----------
volume: float, volume of 3D extrusion
"""
volume = abs(self.primitive.polygon.area *
self.primitive.height)
return volume
|
python
|
{
"resource": ""
}
|
q23389
|
Extrusion.direction
|
train
|
def direction(self):
"""
Based on the extrudes transform, what is the vector along
which the polygon will be extruded
Returns
---------
direction: (3,) float vector. If self.primitive.transform is an
identity matrix this will be [0.0, 0.0, 1.0]
"""
direction = np.dot(self.primitive.transform[:3, :3],
[0.0, 0.0, np.sign(self.primitive.height)])
return direction
|
python
|
{
"resource": ""
}
|
q23390
|
Extrusion.slide
|
train
|
def slide(self, distance):
"""
Alter the transform of the current extrusion to slide it
along its extrude_direction vector
Parameters
-----------
distance: float, distance along self.extrude_direction to move
"""
distance = float(distance)
translation = np.eye(4)
translation[2, 3] = distance
new_transform = np.dot(self.primitive.transform.copy(),
translation.copy())
self.primitive.transform = new_transform
|
python
|
{
"resource": ""
}
|
q23391
|
Extrusion.buffer
|
train
|
def buffer(self, distance):
"""
Return a new Extrusion object which is expanded in profile and
in height by a specified distance.
Returns
----------
buffered: Extrusion object
"""
distance = float(distance)
# start with current height
height = self.primitive.height
# if current height is negative offset by negative amount
height += np.sign(height) * 2.0 * distance
buffered = Extrusion(
transform=self.primitive.transform.copy(),
polygon=self.primitive.polygon.buffer(distance),
height=height)
# slide the stock along the axis
buffered.slide(-np.sign(height) * distance)
return buffered
|
python
|
{
"resource": ""
}
|
q23392
|
enclosure_tree
|
train
|
def enclosure_tree(polygons):
"""
Given a list of shapely polygons with only exteriors,
find which curves represent the exterior shell or root curve
and which represent holes which penetrate the exterior.
This is done with an R-tree for rough overlap detection,
and then exact polygon queries for a final result.
Parameters
-----------
polygons : (n,) shapely.geometry.Polygon
Polygons which only have exteriors and may overlap
Returns
-----------
roots : (m,) int
Index of polygons which are root
contains : networkx.DiGraph
Edges indicate a polygon is
contained by another polygon
"""
tree = Rtree()
# nodes are indexes in polygons
contains = nx.DiGraph()
for i, polygon in enumerate(polygons):
# if a polygon is None it means creation
# failed due to weird geometry so ignore it
if polygon is None or len(polygon.bounds) != 4:
continue
# insert polygon bounds into rtree
tree.insert(i, polygon.bounds)
# make sure every valid polygon has a node
contains.add_node(i)
# loop through every polygon
for i in contains.nodes():
polygon = polygons[i]
# we first query for bounding box intersections from the R-tree
for j in tree.intersection(polygon.bounds):
# if we are checking a polygon against itself continue
if (i == j):
continue
# do a more accurate polygon in polygon test
# for the enclosure tree information
if polygons[i].contains(polygons[j]):
contains.add_edge(i, j)
elif polygons[j].contains(polygons[i]):
contains.add_edge(j, i)
# a root or exterior curve has an even number of parents
# wrap in dict call to avoid networkx view
degree = dict(contains.in_degree())
# convert keys and values to numpy arrays
indexes = np.array(list(degree.keys()))
degrees = np.array(list(degree.values()))
# roots are curves with an even inward degree (parent count)
roots = indexes[(degrees % 2) == 0]
# if there are multiple nested polygons split the graph
# so the contains logic returns the individual polygons
if len(degrees) > 0 and degrees.max() > 1:
# collect new edges for graph
edges = []
# find edges of subgraph for each root and children
for root in roots:
children = indexes[degrees == degree[root] + 1]
edges.extend(contains.subgraph(np.append(children, root)).edges())
# stack edges into new directed graph
contains = nx.from_edgelist(edges, nx.DiGraph())
# if roots have no children add them anyway
contains.add_nodes_from(roots)
return roots, contains
|
python
|
{
"resource": ""
}
|
q23393
|
edges_to_polygons
|
train
|
def edges_to_polygons(edges, vertices):
"""
Given an edge list of indices and associated vertices
representing lines, generate a list of polygons.
Parameters
-----------
edges : (n, 2) int
Indexes of vertices which represent lines
vertices : (m, 2) float
Vertices in 2D space
Returns
----------
polygons : (p,) shapely.geometry.Polygon
Polygon objects with interiors
"""
# create closed polygon objects
polygons = []
# loop through a sequence of ordered traversals
for dfs in graph.traversals(edges, mode='dfs'):
try:
# try to recover polygons before they are more complicated
polygons.append(repair_invalid(Polygon(vertices[dfs])))
except ValueError:
continue
# if there is only one polygon, just return it
if len(polygons) == 1:
return polygons
# find which polygons contain which other polygons
roots, tree = enclosure_tree(polygons)
# generate list of polygons with proper interiors
complete = []
for root in roots:
interior = list(tree[root].keys())
shell = polygons[root].exterior.coords
holes = [polygons[i].exterior.coords for i in interior]
complete.append(Polygon(shell=shell,
holes=holes))
return complete
|
python
|
{
"resource": ""
}
|
q23394
|
polygons_obb
|
train
|
def polygons_obb(polygons):
"""
Find the OBBs for a list of shapely.geometry.Polygons
"""
rectangles = [None] * len(polygons)
transforms = [None] * len(polygons)
for i, p in enumerate(polygons):
transforms[i], rectangles[i] = polygon_obb(p)
return np.array(transforms), np.array(rectangles)
|
python
|
{
"resource": ""
}
|
q23395
|
polygon_obb
|
train
|
def polygon_obb(polygon):
"""
Find the oriented bounding box of a Shapely polygon.
The OBB is always aligned with an edge of the convex hull of the polygon.
Parameters
-------------
polygons: shapely.geometry.Polygon
Returns
-------------
transform: (3,3) float, transformation matrix
which will move input polygon from its original position
to the first quadrant where the AABB is the OBB
extents: (2,) float, extents of transformed polygon
"""
if hasattr(polygon, 'exterior'):
points = np.asanyarray(polygon.exterior.coords)
elif isinstance(polygon, np.ndarray):
points = polygon
else:
raise ValueError('polygon or points must be provided')
return bounds.oriented_bounds_2D(points)
|
python
|
{
"resource": ""
}
|
q23396
|
transform_polygon
|
train
|
def transform_polygon(polygon, matrix):
"""
Transform a polygon by a a 2D homogenous transform.
Parameters
-------------
polygon : shapely.geometry.Polygon
2D polygon to be transformed.
matrix : (3, 3) float
2D homogenous transformation.
Returns
--------------
result : shapely.geometry.Polygon
Polygon transformed by matrix.
"""
matrix = np.asanyarray(matrix, dtype=np.float64)
if util.is_sequence(polygon):
result = [transform_polygon(p, t)
for p, t in zip(polygon, matrix)]
return result
# transform the outer shell
shell = transform_points(np.array(polygon.exterior.coords),
matrix)[:, :2]
# transform the interiors
holes = [transform_points(np.array(i.coords),
matrix)[:, :2]
for i in polygon.interiors]
# create a new polygon with the result
result = Polygon(shell=shell, holes=holes)
return result
|
python
|
{
"resource": ""
}
|
q23397
|
plot_polygon
|
train
|
def plot_polygon(polygon, show=True, **kwargs):
"""
Plot a shapely polygon using matplotlib.
Parameters
------------
polygon : shapely.geometry.Polygon
Polygon to be plotted
show : bool
If True will display immediately
**kwargs
Passed to plt.plot
"""
import matplotlib.pyplot as plt
def plot_single(single):
plt.plot(*single.exterior.xy, **kwargs)
for interior in single.interiors:
plt.plot(*interior.xy, **kwargs)
# make aspect ratio non- stupid
plt.axes().set_aspect('equal', 'datalim')
if util.is_sequence(polygon):
[plot_single(i) for i in polygon]
else:
plot_single(polygon)
if show:
plt.show()
|
python
|
{
"resource": ""
}
|
q23398
|
resample_boundaries
|
train
|
def resample_boundaries(polygon, resolution, clip=None):
"""
Return a version of a polygon with boundaries resampled
to a specified resolution.
Parameters
-------------
polygon: shapely.geometry.Polygon object
resolution: float, desired distance between points on boundary
clip: (2,) int, upper and lower bounds to clip
number of samples to (to avoid exploding counts)
Returns
------------
kwargs: dict, keyword args for a Polygon(**kwargs)
"""
def resample_boundary(boundary):
# add a polygon.exterior or polygon.interior to
# the deque after resampling based on our resolution
count = boundary.length / resolution
count = int(np.clip(count, *clip))
return resample_path(boundary.coords, count=count)
if clip is None:
clip = [8, 200]
# create a sequence of [(n,2)] points
kwargs = {'shell': resample_boundary(polygon.exterior),
'holes': deque()}
for interior in polygon.interiors:
kwargs['holes'].append(resample_boundary(interior))
kwargs['holes'] = np.array(kwargs['holes'])
return kwargs
|
python
|
{
"resource": ""
}
|
q23399
|
medial_axis
|
train
|
def medial_axis(polygon,
resolution=None,
clip=None):
"""
Given a shapely polygon, find the approximate medial axis
using a voronoi diagram of evenly spaced points on the
boundary of the polygon.
Parameters
----------
polygon : shapely.geometry.Polygon
The source geometry
resolution : float
Distance between each sample on the polygon boundary
clip : None, or (2,) int
Clip sample count to min of clip[0] and max of clip[1]
Returns
----------
edges : (n, 2) int
Vertex indices representing line segments
on the polygon's medial axis
vertices : (m, 2) float
Vertex positions in space
"""
from scipy.spatial import Voronoi
if resolution is None:
resolution = .01
# get evenly spaced points on the polygons boundaries
samples = resample_boundaries(polygon=polygon,
resolution=resolution,
clip=clip)
# stack the boundary into a (m,2) float array
samples = stack_boundaries(samples)
# create the voronoi diagram on 2D points
voronoi = Voronoi(samples)
# which voronoi vertices are contained inside the polygon
contains = vectorized.contains(polygon, *voronoi.vertices.T)
# ridge vertices of -1 are outside, make sure they are False
contains = np.append(contains, False)
# make sure ridge vertices is numpy array
ridge = np.asanyarray(voronoi.ridge_vertices, dtype=np.int64)
# only take ridges where every vertex is contained
edges = ridge[contains[ridge].all(axis=1)]
return edges, voronoi.vertices
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.