_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q23400
|
polygon_hash
|
train
|
def polygon_hash(polygon):
"""
Return a vector containing values representitive of
a particular polygon.
Parameters
---------
polygon : shapely.geometry.Polygon
Input geometry
Returns
---------
hashed: (6), float
Representitive values representing input polygon
"""
result = np.array(
[len(polygon.interiors),
polygon.convex_hull.area,
polygon.convex_hull.length,
polygon.area,
polygon.length,
polygon.exterior.length],
dtype=np.float64)
return result
|
python
|
{
"resource": ""
}
|
q23401
|
random_polygon
|
train
|
def random_polygon(segments=8, radius=1.0):
"""
Generate a random polygon with a maximum number of sides and approximate radius.
Parameters
---------
segments: int, the maximum number of sides the random polygon will have
radius: float, the approximate radius of the polygon desired
Returns
---------
polygon: shapely.geometry.Polygon object with random exterior, and no interiors.
"""
angles = np.sort(np.cumsum(np.random.random(
segments) * np.pi * 2) % (np.pi * 2))
radii = np.random.random(segments) * radius
points = np.column_stack(
(np.cos(angles), np.sin(angles))) * radii.reshape((-1, 1))
points = np.vstack((points, points[0]))
polygon = Polygon(points).buffer(0.0)
if util.is_sequence(polygon):
return polygon[0]
return polygon
|
python
|
{
"resource": ""
}
|
q23402
|
polygon_scale
|
train
|
def polygon_scale(polygon):
"""
For a Polygon object, return the diagonal length of the AABB.
Parameters
------------
polygon: shapely.geometry.Polygon object
Returns
------------
scale: float, length of AABB diagonal
"""
extents = np.reshape(polygon.bounds, (2, 2)).ptp(axis=0)
scale = (extents ** 2).sum() ** .5
return scale
|
python
|
{
"resource": ""
}
|
q23403
|
paths_to_polygons
|
train
|
def paths_to_polygons(paths, scale=None):
"""
Given a sequence of connected points turn them into
valid shapely Polygon objects.
Parameters
-----------
paths : (n,) sequence
Of (m,2) float, closed paths
scale: float
Approximate scale of drawing for precision
Returns
-----------
polys: (p,) list
shapely.geometry.Polygon
None
"""
polygons = [None] * len(paths)
for i, path in enumerate(paths):
if len(path) < 4:
# since the first and last vertices are identical in
# a closed loop a 4 vertex path is the minimum for
# non-zero area
continue
try:
polygons[i] = repair_invalid(Polygon(path), scale)
except ValueError:
# raised if a polygon is unrecoverable
continue
except BaseException:
log.error('unrecoverable polygon', exc_info=True)
polygons = np.array(polygons)
return polygons
|
python
|
{
"resource": ""
}
|
q23404
|
repair_invalid
|
train
|
def repair_invalid(polygon, scale=None, rtol=.5):
"""
Given a shapely.geometry.Polygon, attempt to return a
valid version of the polygon through buffering tricks.
Parameters
-----------
polygon: shapely.geometry.Polygon object
rtol: float, how close does a perimeter have to be
scale: float, or None
Returns
----------
repaired: shapely.geometry.Polygon object
Raises
----------
ValueError: if polygon can't be repaired
"""
if hasattr(polygon, 'is_valid') and polygon.is_valid:
return polygon
# basic repair involves buffering the polygon outwards
# this will fix a subset of problems.
basic = polygon.buffer(tol.zero)
# if it returned multiple polygons check the largest
if util.is_sequence(basic):
basic = basic[np.argmax([i.area for i in basic])]
# check perimeter of result against original perimeter
if basic.is_valid and np.isclose(basic.length,
polygon.length,
rtol=rtol):
return basic
if scale is None:
distance = tol.buffer * polygon_scale(polygon)
else:
distance = tol.buffer * scale
# if there are no interiors, we can work with just the exterior
# ring, which is often more reliable
if len(polygon.interiors) == 0:
# try buffering the exterior of the polygon
# the interior will be offset by -tol.buffer
rings = polygon.exterior.buffer(distance).interiors
if len(rings) == 1:
# reconstruct a single polygon from the interior ring
recon = Polygon(shell=rings[0]).buffer(distance)
# check perimeter of result against original perimeter
if recon.is_valid and np.isclose(recon.length,
polygon.length,
rtol=rtol):
return recon
# buffer and unbuffer the whole polygon
buffered = polygon.buffer(distance).buffer(-distance)
# if it returned multiple polygons check the largest
if util.is_sequence(buffered):
buffered = buffered[np.argmax([i.area for i in buffered])]
# check perimeter of result against original perimeter
if buffered.is_valid and np.isclose(buffered.length,
polygon.length,
rtol=rtol):
log.debug('Recovered invalid polygon through double buffering')
return buffered
raise ValueError('unable to recover polygon!')
|
python
|
{
"resource": ""
}
|
q23405
|
export_gltf
|
train
|
def export_gltf(scene,
extras=None,
include_normals=False):
"""
Export a scene object as a GLTF directory.
This puts each mesh into a separate file (i.e. a `buffer`)
as opposed to one larger file.
Parameters
-----------
scene : trimesh.Scene
Scene to be exported
Returns
----------
export : dict
Format: {file name : file data}
"""
# if we were passed a bare Trimesh or Path3D object
if (not util.is_instance_named(scene, "Scene")
and hasattr(scene, "scene")):
scene = scene.scene()
# create the header and buffer data
tree, buffer_items = _create_gltf_structure(
scene=scene,
extras=extras,
include_normals=include_normals)
# store files as {name : data}
files = {}
# make one buffer per buffer_items
buffers = [None] * len(buffer_items)
# A bufferView is a slice of a file
views = [None] * len(buffer_items)
# create the buffer views
for i, item in enumerate(buffer_items):
views[i] = {
"buffer": i,
"byteOffset": 0,
"byteLength": len(item)}
buffer_data = _byte_pad(bytes().join(buffer_items[i: i + 2]))
buffer_name = "gltf_buffer_{}.bin".format(i)
buffers[i] = {
"uri": buffer_name,
"byteLength": len(buffer_data)}
files[buffer_name] = buffer_data
tree["buffers"] = buffers
tree["bufferViews"] = views
files["model.gltf"] = json.dumps(tree).encode("utf-8")
return files
|
python
|
{
"resource": ""
}
|
q23406
|
load_gltf
|
train
|
def load_gltf(file_obj=None,
resolver=None,
**mesh_kwargs):
"""
Load a GLTF file, which consists of a directory structure
with multiple files.
Parameters
-------------
file_obj : None or file-like
Object containing header JSON, or None
resolver : trimesh.visual.Resolver
Object which can be used to load other files by name
**mesh_kwargs : dict
Passed to mesh constructor
Returns
--------------
kwargs : dict
Arguments to create scene
"""
try:
# see if we've been passed the GLTF header file
tree = json.load(file_obj)
except BaseException:
# otherwise header should be in 'model.gltf'
data = resolver['model.gltf']
# old versions of python/json need strings
try:
tree = json.loads(data)
except BaseException:
tree = json.loads(data.decode('utf-8'))
# use the resolver to get data from file names
buffers = [resolver[b['uri']] for b in tree['buffers']]
# turn the layout header and data into kwargs
# that can be used to instantiate a trimesh.Scene object
kwargs = _read_buffers(header=tree,
buffers=buffers,
mesh_kwargs=mesh_kwargs)
return kwargs
|
python
|
{
"resource": ""
}
|
q23407
|
load_glb
|
train
|
def load_glb(file_obj, resolver=None, **mesh_kwargs):
"""
Load a GLTF file in the binary GLB format into a trimesh.Scene.
Implemented from specification:
https://github.com/KhronosGroup/glTF/tree/master/specification/2.0
Parameters
------------
file_obj : file- like object
Containing GLB data
Returns
------------
kwargs : dict
Kwargs to instantiate a trimesh.Scene
"""
# save the start position of the file for referencing
# against lengths
start = file_obj.tell()
# read the first 20 bytes which contain section lengths
head_data = file_obj.read(20)
head = np.frombuffer(head_data, dtype="<u4")
# check to make sure first index is gltf
# and second is 2, for GLTF 2.0
if head[0] != _magic["gltf"] or head[1] != 2:
raise ValueError("file is not GLTF 2.0")
# overall file length
# first chunk length
# first chunk type
length, chunk_length, chunk_type = head[2:]
# first chunk should be JSON header
if chunk_type != _magic["json"]:
raise ValueError("no initial JSON header!")
# uint32 causes an error in read, so we convert to native int
# for the length passed to read, for the JSON header
json_data = file_obj.read(int(chunk_length))
# convert to text
if hasattr(json_data, "decode"):
json_data = json_data.decode("utf-8")
# load the json header to native dict
header = json.loads(json_data)
# read the binary data referred to by GLTF as 'buffers'
buffers = []
while (file_obj.tell() - start) < length:
# the last read put us past the JSON chunk
# we now read the chunk header, which is 8 bytes
chunk_head = file_obj.read(8)
if len(chunk_head) != 8:
# double check to make sure we didn't
# read the whole file
break
chunk_length, chunk_type = np.frombuffer(chunk_head, dtype="<u4")
# make sure we have the right data type
if chunk_type != _magic["bin"]:
raise ValueError("not binary GLTF!")
# read the chunk
chunk_data = file_obj.read(int(chunk_length))
if len(chunk_data) != chunk_length:
raise ValueError("chunk was not expected length!")
buffers.append(chunk_data)
# turn the layout header and data into kwargs
# that can be used to instantiate a trimesh.Scene object
kwargs = _read_buffers(header=header,
buffers=buffers,
mesh_kwargs=mesh_kwargs)
return kwargs
|
python
|
{
"resource": ""
}
|
q23408
|
_mesh_to_material
|
train
|
def _mesh_to_material(mesh, metallic=0.0, rough=0.0):
"""
Create a simple GLTF material for a mesh using the most
commonly occurring color in that mesh.
Parameters
------------
mesh: trimesh.Trimesh
Mesh to create a material from
Returns
------------
material: dict
In GLTF material format
"""
try:
# just get the most commonly occurring color
color = mesh.visual.main_color
except BaseException:
color = np.array([100, 100, 100, 255], dtype=np.uint8)
# convert uint color to 0.0-1.0 float color
color = color.astype(float32) / np.iinfo(color.dtype).max
material = {
"pbrMetallicRoughness": {
"baseColorFactor": color.tolist(),
"metallicFactor": metallic,
"roughnessFactor": rough}
}
return material
|
python
|
{
"resource": ""
}
|
q23409
|
_create_gltf_structure
|
train
|
def _create_gltf_structure(scene,
extras=None,
include_normals=False):
"""
Generate a GLTF header.
Parameters
-------------
scene : trimesh.Scene
Input scene data
extras : JSON serializable
Will be stored in the extras field
include_normals : bool
Include vertex normals in output file?
Returns
---------------
tree : dict
Contains required keys for a GLTF scene
buffer_items : list
Contains bytes of data
"""
# we are defining a single scene, and will be setting the
# world node to the 0th index
tree = {
"scene": 0,
"scenes": [{"nodes": [0]}],
"asset": {"version": "2.0",
"generator": "github.com/mikedh/trimesh"},
"accessors": [],
"meshes": [],
"materials": [],
"cameras": [_convert_camera(scene.camera)]}
if extras is not None:
tree['extras'] = extras
# grab the flattened scene graph in GLTF's format
nodes = scene.graph.to_gltf(scene=scene)
tree.update(nodes)
buffer_items = []
for name, geometry in scene.geometry.items():
if util.is_instance_named(geometry, "Trimesh"):
# add the mesh
_append_mesh(
mesh=geometry,
name=name,
tree=tree,
buffer_items=buffer_items,
include_normals=include_normals)
elif util.is_instance_named(geometry, "Path"):
# add Path2D and Path3D objects
_append_path(
path=geometry,
name=name,
tree=tree,
buffer_items=buffer_items)
# if nothing defined a material remove it from the structure
if len(tree["materials"]) == 0:
tree.pop("materials")
return tree, buffer_items
|
python
|
{
"resource": ""
}
|
q23410
|
_byte_pad
|
train
|
def _byte_pad(data, bound=4):
"""
GLTF wants chunks aligned with 4- byte boundaries
so this function will add padding to the end of a
chunk of bytes so that it aligns with a specified
boundary size
Parameters
--------------
data : bytes
Data to be padded
bound : int
Length of desired boundary
Returns
--------------
padded : bytes
Result where: (len(padded) % bound) == 0
"""
bound = int(bound)
if len(data) % bound != 0:
pad = bytes(bound - (len(data) % bound))
result = bytes().join([data, pad])
assert (len(result) % bound) == 0
return result
return data
|
python
|
{
"resource": ""
}
|
q23411
|
_append_path
|
train
|
def _append_path(path, name, tree, buffer_items):
"""
Append a 2D or 3D path to the scene structure and put the
data into buffer_items.
Parameters
-------------
path : trimesh.Path2D or trimesh.Path3D
Source geometry
name : str
Name of geometry
tree : dict
Will be updated with data from path
buffer_items
Will have buffer appended with path data
"""
# convert the path to the unnamed args for
# a pyglet vertex list
vxlist = rendering.path_to_vertexlist(path)
tree["meshes"].append({
"name": name,
"primitives": [{
"attributes": {"POSITION": len(tree["accessors"])},
"mode": 1, # mode 1 is GL_LINES
"material": len(tree["materials"])}]})
# if units are defined, store them as an extra:
# https://github.com/KhronosGroup/glTF/tree/master/extensions
if path.units is not None and 'meter' not in path.units:
tree["meshes"][-1]["extras"] = {"units": str(path.units)}
tree["accessors"].append(
{
"bufferView": len(buffer_items),
"componentType": 5126,
"count": vxlist[0],
"type": "VEC3",
"byteOffset": 0,
"max": path.vertices.max(axis=0).tolist(),
"min": path.vertices.min(axis=0).tolist(),
}
)
# TODO add color support to Path object
# this is just exporting everying as black
tree["materials"].append(_default_material)
# data is the second value of the fourth field
# which is a (data type, data) tuple
buffer_items.append(_byte_pad(
vxlist[4][1].astype(float32).tobytes()))
|
python
|
{
"resource": ""
}
|
q23412
|
_parse_materials
|
train
|
def _parse_materials(header, views):
"""
Convert materials and images stored in a GLTF header
and buffer views to PBRMaterial objects.
Parameters
------------
header : dict
Contains layout of file
views : (n,) bytes
Raw data
Returns
------------
materials : list
List of trimesh.visual.texture.Material objects
"""
try:
import PIL.Image
except ImportError:
log.warning("unable to load textures without pillow!")
return None
# load any images
images = None
if "images" in header:
# images are referenced by index
images = [None] * len(header["images"])
# loop through images
for i, img in enumerate(header["images"]):
# get the bytes representing an image
blob = views[img["bufferView"]]
# i.e. 'image/jpeg'
# mime = img['mimeType']
try:
# load the buffer into a PIL image
images[i] = PIL.Image.open(util.wrap_as_stream(blob))
except BaseException:
log.error("failed to load image!", exc_info=True)
# store materials which reference images
materials = []
if "materials" in header:
for mat in header["materials"]:
# flatten key structure so we can loop it
loopable = mat.copy()
# this key stores another dict of crap
if "pbrMetallicRoughness" in loopable:
# add keys of keys to top level dict
loopable.update(loopable.pop("pbrMetallicRoughness"))
# save flattened keys we can use for kwargs
pbr = {}
for k, v in loopable.items():
if not isinstance(v, dict):
pbr[k] = v
elif "index" in v:
# get the index of image for texture
idx = header["textures"][v["index"]]["source"]
# store the actual image as the value
pbr[k] = images[idx]
# create a PBR material object for the GLTF material
materials.append(visual.texture.PBRMaterial(**pbr))
return materials
|
python
|
{
"resource": ""
}
|
q23413
|
_convert_camera
|
train
|
def _convert_camera(camera):
"""
Convert a trimesh camera to a GLTF camera.
Parameters
------------
camera : trimesh.scene.cameras.Camera
Trimesh camera object
Returns
-------------
gltf_camera : dict
Camera represented as a GLTF dict
"""
result = {
"name": camera.name,
"type": "perspective",
"perspective": {
"aspectRatio": camera.fov[0] / camera.fov[1],
"yfov": np.radians(camera.fov[1]),
},
}
return result
|
python
|
{
"resource": ""
}
|
q23414
|
FilePathResolver.get
|
train
|
def get(self, name):
"""
Get an asset.
Parameters
-------------
name : str
Name of the asset
Returns
------------
data : bytes
Loaded data from asset
"""
# load the file by path name
with open(os.path.join(self.parent,
name.strip()), 'rb') as f:
data = f.read()
return data
|
python
|
{
"resource": ""
}
|
q23415
|
ZipResolver.get
|
train
|
def get(self, name):
"""
Get an asset from the ZIP archive.
Parameters
-------------
name : str
Name of the asset
Returns
-------------
data : bytes
Loaded data from asset
"""
# not much we can do with that
if name is None:
return
# if name isn't in archive try some similar values
if name not in self.archive:
if hasattr(name, 'decode'):
name = name.decode('utf-8')
# try with cleared whitespace, split paths
for option in [name,
name.lstrip('./'),
name.strip(),
name.split('/')[-1]]:
if option in self.archive:
name = option
break
# read file object from beginning
self.archive[name].seek(0)
# data is stored as a file object
data = self.archive[name].read()
return data
|
python
|
{
"resource": ""
}
|
q23416
|
WebResolver.get
|
train
|
def get(self, name):
"""
Get a resource from the remote site.
Parameters
-------------
name : str
Asset name, i.e. 'quadknot.obj.mtl'
"""
# do import here to keep soft dependency
import requests
# append base url to requested name
url = urljoin(self.base_url, name)
# fetch the data from the remote url
response = requests.get(url)
# return the bytes of the response
return response.content
|
python
|
{
"resource": ""
}
|
q23417
|
Geometry.apply_translation
|
train
|
def apply_translation(self, translation):
"""
Translate the current mesh.
Parameters
----------
translation : (3,) float
Translation in XYZ
"""
translation = np.asanyarray(translation, dtype=np.float64)
if translation.shape != (3,):
raise ValueError('Translation must be (3,)!')
matrix = np.eye(4)
matrix[:3, 3] = translation
self.apply_transform(matrix)
|
python
|
{
"resource": ""
}
|
q23418
|
Geometry.apply_scale
|
train
|
def apply_scale(self, scaling):
"""
Scale the mesh equally on all axis.
Parameters
----------
scaling : float
Scale factor to apply to the mesh
"""
scaling = float(scaling)
if not np.isfinite(scaling):
raise ValueError('Scaling factor must be finite number!')
matrix = np.eye(4)
matrix[:3, :3] *= scaling
# apply_transform will work nicely even on negative scales
self.apply_transform(matrix)
|
python
|
{
"resource": ""
}
|
q23419
|
Geometry.bounding_box
|
train
|
def bounding_box(self):
"""
An axis aligned bounding box for the current mesh.
Returns
----------
aabb : trimesh.primitives.Box
Box object with transform and extents defined
representing the axis aligned bounding box of the mesh
"""
from . import primitives
transform = np.eye(4)
# translate to center of axis aligned bounds
transform[:3, 3] = self.bounds.mean(axis=0)
aabb = primitives.Box(transform=transform,
extents=self.extents,
mutable=False)
return aabb
|
python
|
{
"resource": ""
}
|
q23420
|
Geometry.bounding_box_oriented
|
train
|
def bounding_box_oriented(self):
"""
An oriented bounding box for the current mesh.
Returns
---------
obb : trimesh.primitives.Box
Box object with transform and extents defined
representing the minimum volume oriented bounding box of the mesh
"""
from . import primitives, bounds
to_origin, extents = bounds.oriented_bounds(self)
obb = primitives.Box(transform=np.linalg.inv(to_origin),
extents=extents,
mutable=False)
return obb
|
python
|
{
"resource": ""
}
|
q23421
|
Geometry.bounding_sphere
|
train
|
def bounding_sphere(self):
"""
A minimum volume bounding sphere for the current mesh.
Note that the Sphere primitive returned has an unpadded, exact
sphere_radius so while the distance of every vertex of the current
mesh from sphere_center will be less than sphere_radius, the faceted
sphere primitive may not contain every vertex
Returns
--------
minball: trimesh.primitives.Sphere
Sphere primitive containing current mesh
"""
from . import primitives, nsphere
center, radius = nsphere.minimum_nsphere(self)
minball = primitives.Sphere(center=center,
radius=radius,
mutable=False)
return minball
|
python
|
{
"resource": ""
}
|
q23422
|
Geometry.bounding_cylinder
|
train
|
def bounding_cylinder(self):
"""
A minimum volume bounding cylinder for the current mesh.
Returns
--------
mincyl : trimesh.primitives.Cylinder
Cylinder primitive containing current mesh
"""
from . import primitives, bounds
kwargs = bounds.minimum_cylinder(self)
mincyl = primitives.Cylinder(mutable=False, **kwargs)
return mincyl
|
python
|
{
"resource": ""
}
|
q23423
|
export_mesh
|
train
|
def export_mesh(mesh, file_obj, file_type=None, **kwargs):
"""
Export a Trimesh object to a file- like object, or to a filename
Parameters
---------
file_obj : str, file-like
Where should mesh be exported to
file_type : str or None
Represents file type (eg: 'stl')
Returns
----------
exported : bytes or str
Result of exporter
"""
# if we opened a file object in this function
# we will want to close it when we're done
was_opened = False
if util.is_string(file_obj):
if file_type is None:
file_type = (str(file_obj).split('.')[-1]).lower()
if file_type in _mesh_exporters:
was_opened = True
file_obj = open(file_obj, 'wb')
file_type = str(file_type).lower()
if not (file_type in _mesh_exporters):
raise ValueError('%s exporter not available!', file_type)
if isinstance(mesh, (list, tuple, set, np.ndarray)):
faces = 0
for m in mesh:
faces += len(m.faces)
log.debug('Exporting %d meshes with a total of %d faces as %s',
len(mesh), faces, file_type.upper())
else:
log.debug('Exporting %d faces as %s', len(mesh.faces),
file_type.upper())
export = _mesh_exporters[file_type](mesh, **kwargs)
if hasattr(file_obj, 'write'):
result = util.write_encoded(file_obj, export)
else:
result = export
if was_opened:
file_obj.close()
return result
|
python
|
{
"resource": ""
}
|
q23424
|
export_off
|
train
|
def export_off(mesh, digits=10):
"""
Export a mesh as an OFF file, a simple text format
Parameters
-----------
mesh : trimesh.Trimesh
Geometry to export
digits : int
Number of digits to include on floats
Returns
-----------
export : str
OFF format output
"""
# make sure specified digits is an int
digits = int(digits)
# prepend a 3 (face count) to each face
faces_stacked = np.column_stack((np.ones(len(mesh.faces)) * 3,
mesh.faces)).astype(np.int64)
export = 'OFF\n'
# the header is vertex count, face count, another number
export += str(len(mesh.vertices)) + ' ' + str(len(mesh.faces)) + ' 0\n'
export += util.array_to_string(
mesh.vertices, col_delim=' ', row_delim='\n', digits=digits) + '\n'
export += util.array_to_string(
faces_stacked, col_delim=' ', row_delim='\n')
return export
|
python
|
{
"resource": ""
}
|
q23425
|
export_dict
|
train
|
def export_dict(mesh, encoding=None):
"""
Export a mesh to a dict
Parameters
------------
mesh : Trimesh object
Mesh to be exported
encoding : str, or None
'base64'
Returns
-------------
"""
def encode(item, dtype=None):
if encoding is None:
return item.tolist()
else:
if dtype is None:
dtype = item.dtype
return util.array_to_encoded(item, dtype=dtype, encoding=encoding)
# metadata keys we explicitly want to preserve
# sometimes there are giant datastructures we don't
# care about in metadata which causes exports to be
# extremely slow, so skip all but known good keys
meta_keys = ['units', 'file_name', 'file_path']
metadata = {k: v for k, v in mesh.metadata.items() if k in meta_keys}
export = {
'metadata': metadata,
'faces': encode(mesh.faces),
'face_normals': encode(mesh.face_normals),
'vertices': encode(mesh.vertices)
}
if mesh.visual.kind == 'face':
export['face_colors'] = encode(mesh.visual.face_colors)
elif mesh.visual.kind == 'vertex':
export['vertex_colors'] = encode(mesh.visual.vertex_colors)
return export
|
python
|
{
"resource": ""
}
|
q23426
|
minify
|
train
|
def minify(path):
"""
Load a javascript file and minify.
Parameters
------------
path: str, path of resource
"""
if 'http' in path:
data = requests.get(path).content.decode(
'ascii', errors='ignore')
else:
with open(path, 'rb') as f:
# some of these assholes use unicode spaces -_-
data = f.read().decode('ascii',
errors='ignore')
# don't re- minify
if '.min.' in path:
return data
try:
return jsmin.jsmin(data)
except BaseException:
return data
|
python
|
{
"resource": ""
}
|
q23427
|
circle_pattern
|
train
|
def circle_pattern(pattern_radius,
circle_radius,
count,
center=[0.0, 0.0],
angle=None,
**kwargs):
"""
Create a Path2D representing a circle pattern.
Parameters
------------
pattern_radius : float
Radius of circle centers
circle_radius : float
The radius of each circle
count : int
Number of circles in the pattern
center : (2,) float
Center of pattern
angle : float
If defined pattern will span this angle
If None, pattern will be evenly spaced
Returns
-------------
pattern : trimesh.path.Path2D
Path containing circular pattern
"""
from .path import Path2D
if angle is None:
angles = np.linspace(0.0, np.pi * 2.0, count + 1)[:-1]
elif isinstance(angle, float) or isinstance(angle, int):
angles = np.linspace(0.0, angle, count)
else:
raise ValueError('angle must be float or int!')
# centers of circles
centers = np.column_stack((
np.cos(angles), np.sin(angles))) * pattern_radius
vert = []
ents = []
for circle_center in centers:
# (3,3) center points of arc
three = arc.to_threepoint(angles=[0, np.pi],
center=circle_center,
radius=circle_radius)
# add a single circle entity
ents.append(
entities.Arc(
points=np.arange(3) + len(vert),
closed=True))
# keep flat array by extend instead of append
vert.extend(three)
# translate vertices to pattern center
vert = np.array(vert) + center
pattern = Path2D(entities=ents,
vertices=vert,
**kwargs)
return pattern
|
python
|
{
"resource": ""
}
|
q23428
|
plane_transform
|
train
|
def plane_transform(origin, normal):
"""
Given the origin and normal of a plane find the transform
that will move that plane to be coplanar with the XY plane.
Parameters
----------
origin : (3,) float
Point that lies on the plane
normal : (3,) float
Vector that points along normal of plane
Returns
---------
transform: (4,4) float
Transformation matrix to move points onto XY plane
"""
transform = align_vectors(normal, [0, 0, 1])
transform[0:3, 3] = -np.dot(transform,
np.append(origin, 1))[0:3]
return transform
|
python
|
{
"resource": ""
}
|
q23429
|
align_vectors
|
train
|
def align_vectors(a, b, return_angle=False):
"""
Find a transform between two 3D vectors.
Implements the method described here:
http://ethaneade.com/rot_between_vectors.pdf
Parameters
--------------
a : (3,) float
Source vector
b : (3,) float
Target vector
return_angle : bool
If True return the angle between the two vectors
Returns
-------------
transform : (4, 4) float
Homogenous transform from a to b
angle : float
Angle between vectors in radians
Only returned if return_angle
"""
# copy of input vectors
a = np.array(a, dtype=np.float64, copy=True)
b = np.array(b, dtype=np.float64, copy=True)
# make sure vectors are 3D
if a.shape != (3,) or b.shape != (3,):
raise ValueError('only works for (3,) vectors')
# unitize input vectors
a /= np.linalg.norm(a)
b /= np.linalg.norm(b)
# projection of a onto b
dot = np.dot(a, b)
# are vectors just reversed
if dot < (tol.zero - 1):
# a reversed vector is 180 degrees
angle = np.pi
# get an arbitrary perpendicular vector to a
perp = util.generate_basis(a)[0] * np.eye(3)
# (3, 3) rotation from a to b
rotation = (2 * np.dot(perp, perp.T)) - np.eye(3)
# are vectors already the same
elif dot > (1 - tol.zero):
angle = 0.0
# no rotation
rotation = np.eye(3)
# vectors are at some angle to each other
else:
# we already handled values out of the range [-1.0, 1.0]
angle = np.arccos(dot)
# (3,) vector perpendicular to both a and b
w = np.cross(a, b)
# a float between 0.5 and 1.0
c = 1.0 / (1.0 + dot)
# (3, 3) skew- symmetric matrix from the (3,) vector w
# the matrix has the property: wx == -wx.T
wx = np.array([[0, -w[2], w[1]],
[w[2], 0, -w[0]],
[-w[1], w[0], 0]])
# (3, 3) rotation from a to b
rotation = np.eye(3) + wx + (np.dot(wx, wx) * c)
# put rotation into homogenous transformation matrix
transform = np.eye(4)
transform[:3, :3] = rotation
if return_angle:
return transform, angle
return transform
|
python
|
{
"resource": ""
}
|
q23430
|
vector_angle
|
train
|
def vector_angle(pairs):
"""
Find the angles between pairs of unit vectors.
Parameters
----------
pairs : (n, 2, 3) float
Unit vector pairs
Returns
----------
angles : (n,) float
Angles between vectors in radians
"""
pairs = np.asanyarray(pairs, dtype=np.float64)
if len(pairs) == 0:
return np.array([])
elif util.is_shape(pairs, (2, 3)):
pairs = pairs.reshape((-1, 2, 3))
elif not util.is_shape(pairs, (-1, 2, (2, 3))):
raise ValueError('pairs must be (n,2,(2|3))!')
# do the dot product between vectors
dots = util.diagonal_dot(pairs[:, 0], pairs[:, 1])
# clip for floating point error
dots = np.clip(dots, -1.0, 1.0)
# do cos and remove arbitrary sign
angles = np.abs(np.arccos(dots))
return angles
|
python
|
{
"resource": ""
}
|
q23431
|
triangulate_quads
|
train
|
def triangulate_quads(quads):
"""
Given a set of quad faces, return them as triangle faces.
Parameters
-----------
quads: (n, 4) int
Vertex indices of quad faces
Returns
-----------
faces : (m, 3) int
Vertex indices of triangular faces
"""
if len(quads) == 0:
return quads
quads = np.asanyarray(quads)
faces = np.vstack((quads[:, [0, 1, 2]],
quads[:, [2, 3, 0]]))
return faces
|
python
|
{
"resource": ""
}
|
q23432
|
mean_vertex_normals
|
train
|
def mean_vertex_normals(vertex_count,
faces,
face_normals,
**kwargs):
"""
Find vertex normals from the mean of the faces that contain
that vertex.
Parameters
-----------
vertex_count : int
The number of vertices faces refer to
faces : (n, 3) int
List of vertex indices
face_normals : (n, 3) float
Normal vector for each face
Returns
-----------
vertex_normals : (vertex_count, 3) float
Normals for every vertex
Vertices unreferenced by faces will be zero.
"""
def summed_sparse():
# use a sparse matrix of which face contains each vertex to
# figure out the summed normal at each vertex
# allow cached sparse matrix to be passed
if 'sparse' in kwargs:
sparse = kwargs['sparse']
else:
sparse = index_sparse(vertex_count, faces)
summed = sparse.dot(face_normals)
return summed
def summed_loop():
# loop through every face, in tests was ~50x slower than
# doing this with a sparse matrix
summed = np.zeros((vertex_count, 3))
for face, normal in zip(faces, face_normals):
summed[face] += normal
return summed
try:
summed = summed_sparse()
except BaseException:
log.warning(
'unable to generate sparse matrix! Falling back!',
exc_info=True)
summed = summed_loop()
# invalid normals will be returned as zero
vertex_normals = util.unitize(summed)
return vertex_normals
|
python
|
{
"resource": ""
}
|
q23433
|
index_sparse
|
train
|
def index_sparse(column_count, indices):
"""
Return a sparse matrix for which vertices are contained in which faces.
Returns
---------
sparse: scipy.sparse.coo_matrix of shape (column_count, len(faces))
dtype is boolean
Examples
----------
In [1]: sparse = faces_sparse(len(mesh.vertices), mesh.faces)
In [2]: sparse.shape
Out[2]: (12, 20)
In [3]: mesh.faces.shape
Out[3]: (20, 3)
In [4]: mesh.vertices.shape
Out[4]: (12, 3)
In [5]: dense = sparse.toarray().astype(int)
In [6]: dense
Out[6]:
array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]])
In [7]: dense.sum(axis=0)
Out[7]: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3])
"""
indices = np.asanyarray(indices)
column_count = int(column_count)
row = indices.reshape(-1)
col = np.tile(np.arange(len(indices)).reshape(
(-1, 1)), (1, indices.shape[1])).reshape(-1)
shape = (column_count, len(indices))
data = np.ones(len(col), dtype=np.bool)
sparse = coo_matrix((data, (row, col)),
shape=shape,
dtype=np.bool)
return sparse
|
python
|
{
"resource": ""
}
|
q23434
|
get_json
|
train
|
def get_json(file_name='../dxf.json.template'):
"""
Load the JSON blob into native objects
"""
with open(file_name, 'r') as f:
t = json.load(f)
return t
|
python
|
{
"resource": ""
}
|
q23435
|
write_json
|
train
|
def write_json(template, file_name='../dxf.json.template'):
"""
Write a native object to a JSON blob
"""
with open(file_name, 'w') as f:
json.dump(template, f, indent=4)
|
python
|
{
"resource": ""
}
|
q23436
|
replace_whitespace
|
train
|
def replace_whitespace(text, SAFE_SPACE='|<^>|', insert=True):
"""
Replace non-strippable whitepace in a string with a safe space
"""
if insert:
# replace whitespace with safe space chr
args = (' ', SAFE_SPACE)
else:
# replace safe space chr with whitespace
args = (SAFE_SPACE, ' ')
return '\n'.join(line.strip().replace(*args)
for line in str.splitlines(text))
|
python
|
{
"resource": ""
}
|
q23437
|
discretize_arc
|
train
|
def discretize_arc(points,
close=False,
scale=1.0):
"""
Returns a version of a three point arc consisting of
line segments.
Parameters
---------
points : (3, d) float
Points on the arc where d in [2,3]
close : boolean
If True close the arc into a circle
scale : float
What is the approximate overall drawing scale
Used to establish order of magnitude for precision
Returns
---------
discrete : (m, d) float
Connected points in space
"""
# make sure points are (n, 3)
points, is_2D = util.stack_3D(points, return_2D=True)
# find the center of the points
center_info = arc_center(points)
center, R, N, angle = (center_info['center'],
center_info['radius'],
center_info['normal'],
center_info['span'])
# if requested, close arc into a circle
if close:
angle = np.pi * 2
# the number of facets, based on the angle criteria
count_a = angle / res.seg_angle
count_l = ((R * angle)) / (res.seg_frac * scale)
# figure out the number of line segments
count = np.max([count_a, count_l])
# force at LEAST 4 points for the arc
# otherwise the endpoints will diverge
count = np.clip(count, 4, np.inf)
count = int(np.ceil(count))
V1 = util.unitize(points[0] - center)
V2 = util.unitize(np.cross(-N, V1))
t = np.linspace(0, angle, count)
discrete = np.tile(center, (count, 1))
discrete += R * np.cos(t).reshape((-1, 1)) * V1
discrete += R * np.sin(t).reshape((-1, 1)) * V2
# do an in-process check to make sure result endpoints
# match the endpoints of the source arc
if not close:
arc_dist = np.linalg.norm(points[[0, -1]] -
discrete[[0, -1]], axis=1)
arc_ok = (arc_dist < tol.merge).all()
if not arc_ok:
log.warn(
'failed to discretize arc (endpoint distance %s)',
str(arc_dist))
log.warn('Failed arc points: %s', str(points))
raise ValueError('Arc endpoints diverging!')
discrete = discrete[:, :(3 - is_2D)]
return discrete
|
python
|
{
"resource": ""
}
|
q23438
|
to_threepoint
|
train
|
def to_threepoint(center, radius, angles=None):
"""
For 2D arcs, given a center and radius convert them to three
points on the arc.
Parameters
-----------
center : (2,) float
Center point on the plane
radius : float
Radius of arc
angles : (2,) float
Angles in radians for start and end angle
if not specified, will default to (0.0, pi)
Returns
----------
three : (3, 2) float
Arc control points
"""
# if no angles provided assume we want a half circle
if angles is None:
angles = [0.0, np.pi]
# force angles to float64
angles = np.asanyarray(angles, dtype=np.float64)
if angles.shape != (2,):
raise ValueError('angles must be (2,)!')
# provide the wrap around
if angles[1] < angles[0]:
angles[1] += np.pi * 2
center = np.asanyarray(center, dtype=np.float64)
if center.shape != (2,):
raise ValueError('only valid on 2D arcs!')
# turn the angles of [start, end]
# into [start, middle, end]
angles = np.array([angles[0],
angles.mean(),
angles[1]],
dtype=np.float64)
# turn angles into (3,2) points
three = np.column_stack((np.cos(angles),
np.sin(angles))) * radius
three += center
return three
|
python
|
{
"resource": ""
}
|
q23439
|
abspath
|
train
|
def abspath(rel):
"""
Take paths relative to the current file and
convert them to absolute paths.
Parameters
------------
rel : str
Relative path, IE '../stuff'
Returns
-------------
abspath : str
Absolute path, IE '/home/user/stuff'
"""
return os.path.abspath(os.path.join(cwd, rel))
|
python
|
{
"resource": ""
}
|
q23440
|
load_pyassimp
|
train
|
def load_pyassimp(file_obj,
file_type=None,
resolver=None,
**kwargs):
"""
Use the pyassimp library to load a mesh from a file object
and type or file name if file_obj is a string
Parameters
---------
file_obj: str, or file object
File path or object containing mesh data
file_type : str
File extension, aka 'stl'
resolver : trimesh.visual.resolvers.Resolver
Used to load referenced data (like texture files)
kwargs : dict
Passed through to mesh constructor
Returns
---------
scene : trimesh.Scene
Native trimesh copy of assimp scene
"""
def LP_to_TM(lp):
# try to get the vertex colors attribute
colors = (np.reshape(lp.colors, (-1, 4))
[:, :3] * 255).round().astype(np.uint8)
# If no vertex colors, try to extract them from the material
if len(colors) == 0:
if 'diffuse' in lp.material.properties.keys():
colors = np.array(lp.material.properties['diffuse'])
# pass kwargs through to mesh constructor
mesh_kwargs = copy.deepcopy(kwargs)
# add data from the LP_Mesh
mesh_kwargs.update({'vertices': lp.vertices,
'vertex_normals': lp.normals,
'faces': lp.faces,
'vertex_colors': colors})
return mesh_kwargs
# did we open the file inside this function
opened = False
# not a file object
if not hasattr(file_obj, 'read'):
# if there is no read attribute
# we assume we've been passed a file name
file_type = (str(file_obj).split('.')[-1]).lower()
file_obj = open(file_obj, 'rb')
opened = True
# we need files to be bytes
elif not hasattr(file_obj, 'mode') or file_obj.mode != 'rb':
# assimp will crash on anything that isn't binary
# so if we have a text mode file or anything else
# grab the data, encode as bytes, and then use stream
data = file_obj.read()
if hasattr(data, 'encode'):
data = data.encode('utf-8')
file_obj = util.wrap_as_stream(data)
# load the scene using pyassimp
scene = pyassimp.load(file_obj,
file_type=file_type)
# save a record of mesh names used so we
# don't have to do queries on mesh_id.values()
mesh_names = set()
# save a mapping for {id(mesh) : name}
mesh_id = {}
# save results as {name : Trimesh}
meshes = {}
# loop through scene LPMesh objects
for m in scene.meshes:
# skip meshes without tri/quad faces
if m.faces.shape[1] not in [3, 4]:
continue
# if this mesh has the name of an existing mesh
if m.name in mesh_names:
# make it the name plus the unique ID of the object
name = m.name + str(id(m))
else:
# otherwise just use the name it calls itself by
name = m.name
# save the name to mark as consumed
mesh_names.add(name)
# save the id:name mapping
mesh_id[id(m)] = name
# convert the mesh to a trimesh object
meshes[name] = LP_to_TM(m)
# now go through and collect the transforms from the scene
# we are going to save them as a list of dict kwargs
transforms = []
# nodes as (parent, node) tuples
# use deque so we can pop from both ends
queue = collections.deque(
[('world', n) for
n in scene.rootnode.children])
# consume the queue
while len(queue) > 0:
# parent name, node object
parent, node = queue.pop()
# assimp uses weirdly duplicate node names
# object ID's are actually unique and consistent
node_name = id(node)
transforms.append({'frame_from': parent,
'frame_to': node_name,
'matrix': node.transformation})
# loop through meshes this node uses
# note that they are the SAME objects as converted
# above so we can find their reference using id()
for m in node.meshes:
if id(m) not in mesh_id:
continue
# create kwargs for graph.update
edge = {'frame_from': node_name,
'frame_to': str(id(m)) + str(node_name),
'matrix': np.eye(4),
'geometry': mesh_id[id(m)]}
transforms.append(edge)
# add any children to the queue to be visited
for child in node.children:
queue.appendleft((node_name, child))
# release the loaded scene
pyassimp.release(scene)
# if we opened the file in this function close it
if opened:
file_obj.close()
# create kwargs for trimesh.exchange.load.load_kwargs
result = {'class': 'Scene',
'geometry': meshes,
'graph': transforms,
'base_frame': 'world'}
return result
|
python
|
{
"resource": ""
}
|
q23441
|
load_cyassimp
|
train
|
def load_cyassimp(file_obj,
file_type=None,
resolver=None,
**kwargs):
"""
Load a file using the cyassimp bindings.
The easiest way to install these is with conda:
conda install -c menpo/label/master cyassimp
Parameters
---------
file_obj: str, or file object
File path or object containing mesh data
file_type : str
File extension, aka 'stl'
resolver : trimesh.visual.resolvers.Resolver
Used to load referenced data (like texture files)
kwargs : dict
Passed through to mesh constructor
Returns
---------
meshes : (n,) list of dict
Contain kwargs for Trimesh constructor
"""
if hasattr(file_obj, 'read'):
# if it has a read attribute it is probably a file object
with tempfile.NamedTemporaryFile(
suffix=str(file_type)) as file_temp:
file_temp.write(file_obj.read())
# file name should be bytes
scene = cyassimp.AIImporter(
file_temp.name.encode('utf-8'))
scene.build_scene()
else:
scene = cyassimp.AIImporter(file_obj.encode('utf-8'))
scene.build_scene()
meshes = []
for m in scene.meshes:
mesh_kwargs = kwargs.copy()
mesh_kwargs.update({'vertices': m.points,
'faces': m.trilist})
meshes.append(mesh_kwargs)
if len(meshes) == 1:
return meshes[0]
return meshes
|
python
|
{
"resource": ""
}
|
q23442
|
load_path
|
train
|
def load_path(obj, file_type=None, **kwargs):
"""
Load a file to a Path object.
Parameters
-----------
obj : One of the following:
- Path, Path2D, or Path3D objects
- open file object (dxf or svg)
- file name (dxf or svg)
- shapely.geometry.Polygon
- shapely.geometry.MultiLineString
- dict with kwargs for Path constructor
- (n,2,(2|3)) float, line segments
file_type : str
Type of file is required if file
object passed.
Returns
---------
path : Path, Path2D, Path3D object
Data as a native trimesh Path object
"""
if isinstance(obj, Path):
# we have been passed a Path object so
# do nothing and return the passed object
return obj
elif util.is_file(obj):
# for open file objects use loaders
loaded = path_loaders[file_type](obj,
file_type=file_type)
obj.close()
elif util.is_string(obj):
# strings passed are evaluated as file objects
with open(obj, 'rb') as file_obj:
# get the file type from the extension
file_type = os.path.splitext(obj)[-1][1:].lower()
# call the loader
loaded = path_loaders[file_type](file_obj,
file_type=file_type)
elif util.is_instance_named(obj, 'Polygon'):
# convert from shapely polygons to Path2D
loaded = misc.polygon_to_path(obj)
elif util.is_instance_named(obj, 'MultiLineString'):
# convert from shapely LineStrings to Path2D
loaded = misc.linestrings_to_path(obj)
elif util.is_instance_named(obj, 'dict'):
# load as kwargs
loaded = misc.dict_to_path(obj)
elif util.is_sequence(obj):
# load as lines in space
loaded = misc.lines_to_path(obj)
else:
raise ValueError('Not a supported object type!')
# pass kwargs through to path loader
kwargs.update(loaded)
# convert the kwargs to a Path2D or Path3D object
path = _create_path(**kwargs)
return path
|
python
|
{
"resource": ""
}
|
q23443
|
_create_path
|
train
|
def _create_path(entities,
vertices,
metadata=None,
**kwargs):
"""
Turn entities and vertices into a Path2D or a Path3D
object depending on dimension of vertices.
Parameters
-----------
entities : list
Entity objects that reference vertex indices
vertices : (n, 2) or (n, 3) float
Vertices in space
metadata : dict
Any metadata about the path object
Returns
-----------
as_path : Path2D or Path3D object
Args in native trimesh object form
"""
# make sure vertices are numpy array
vertices = np.asanyarray(vertices, dtype=np.float64)
# check dimension of vertices to decide on object type
if vertices.shape[1] == 2:
path_type = Path2D
elif vertices.shape[1] == 3:
path_type = Path3D
else:
# weird or empty vertices, just use default Path object
path_type = Path
# create the object
as_path = path_type(entities=entities,
vertices=vertices,
metadata=metadata,
**kwargs)
return as_path
|
python
|
{
"resource": ""
}
|
q23444
|
split_scene
|
train
|
def split_scene(geometry):
"""
Given a geometry, list of geometries, or a Scene
return them as a single Scene object.
Parameters
----------
geometry : splittable
Returns
---------
scene: trimesh.Scene
"""
# already a scene, so return it
if util.is_instance_named(geometry, 'Scene'):
return geometry
# a list of things
if util.is_sequence(geometry):
metadata = {}
for g in geometry:
try:
metadata.update(g.metadata)
except BaseException:
continue
return Scene(geometry,
metadata=metadata)
# a single geometry so we are going to split
split = collections.deque()
metadata = {}
for g in util.make_sequence(geometry):
split.extend(g.split())
metadata.update(g.metadata)
# if there is only one geometry in the mesh
# name it from the file name
if len(split) == 1 and 'file_name' in metadata:
split = {metadata['file_name']: split[0]}
scene = Scene(split, metadata=metadata)
return scene
|
python
|
{
"resource": ""
}
|
q23445
|
append_scenes
|
train
|
def append_scenes(iterable, common=['world']):
"""
Concatenate multiple scene objects into one scene.
Parameters
-------------
iterable : (n,) Trimesh or Scene
Geometries that should be appended
common : (n,) str
Nodes that shouldn't be remapped
Returns
------------
result : trimesh.Scene
Scene containing all geometry
"""
if isinstance(iterable, Scene):
return iterable
# save geometry in dict
geometry = {}
# save transforms as edge tuples
edges = []
# nodes which shouldn't be remapped
common = set(common)
# nodes which are consumed and need to be remapped
consumed = set()
def node_remap(node):
"""
Remap node to new name if necessary
Parameters
-------------
node : hashable
Node name in original scene
Returns
-------------
name : hashable
Node name in concatenated scene
"""
# if we've already remapped a node use it
if node in map_node:
return map_node[node]
# if a node is consumed and isn't one of the nodes
# we're going to hold common between scenes remap it
if node not in common and node in consumed:
name = str(node) + '-' + util.unique_id().upper()
map_node[node] = name
node = name
# keep track of which nodes have been used
# in the current scene
current.add(node)
return node
# loop through every geometry
for s in iterable:
# allow Trimesh/Path2D geometry to be passed
if hasattr(s, 'scene'):
s = s.scene()
# if we don't have a scene raise an exception
if not isinstance(s, Scene):
raise ValueError('{} is not a scene!'.format(
type(s).__name__))
# remap geometries if they have been consumed
map_geom = {}
for k, v in s.geometry.items():
# if a geometry already exists add a UUID to the name
if k in geometry:
name = str(k) + '-' + util.unique_id().upper()
else:
name = k
# store name mapping
map_geom[k] = name
# store geometry with new name
geometry[name] = v
# remap nodes and edges so duplicates won't
# stomp all over each other
map_node = {}
# the nodes used in this scene
current = set()
for a, b, attr in s.graph.to_edgelist():
# remap node names from local names
a, b = node_remap(a), node_remap(b)
# remap geometry keys
# if key is not in map_geom it means one of the scenes
# referred to geometry that doesn't exist
# rather than crash here we ignore it as the user
# possibly intended to add in geometries back later
if 'geometry' in attr and attr['geometry'] in map_geom:
attr['geometry'] = map_geom[attr['geometry']]
# save the new edge
edges.append((a, b, attr))
# mark nodes from current scene as consumed
consumed.update(current)
# add all data to a new scene
result = Scene()
result.graph.from_edgelist(edges)
result.geometry.update(geometry)
return result
|
python
|
{
"resource": ""
}
|
q23446
|
Scene.add_geometry
|
train
|
def add_geometry(self,
geometry,
node_name=None,
geom_name=None,
parent_node_name=None,
transform=None):
"""
Add a geometry to the scene.
If the mesh has multiple transforms defined in its
metadata, they will all be copied into the
TransformForest of the current scene automatically.
Parameters
----------
geometry : Trimesh, Path2D, Path3D PointCloud or list
Geometry to initially add to the scene
base_frame : str or hashable
Name of base frame
metadata : dict
Any metadata about the scene
graph : TransformForest or None
A passed transform graph to use
Returns
----------
node_name : str
Name of node in self.graph
"""
if geometry is None:
return
# PointCloud objects will look like a sequence
elif util.is_sequence(geometry):
# if passed a sequence add all elements
for value in geometry:
self.add_geometry(
geometry=value,
node_name=node_name,
geom_name=geom_name,
parent_node_name=parent_node_name,
transform=transform,
)
return
elif isinstance(geometry, dict):
# if someone passed us a dict of geometry
for key, value in geometry.items():
self.add_geometry(value, geom_name=key)
return
# get or create a name to reference the geometry by
if geom_name is not None:
# if name is passed use it
name = geom_name
elif 'name' in geometry.metadata:
# if name is in metadata use it
name = geometry.metadata['name']
elif 'file_name' in geometry.metadata:
name = geometry.metadata['file_name']
else:
# try to create a simple name
name = 'geometry_' + str(len(self.geometry))
# if its already taken add a unique random string to it
if name in self.geometry:
name += ':' + util.unique_id().upper()
# save the geometry reference
self.geometry[name] = geometry
# create a unique node name if not passed
if node_name is None:
# a random unique identifier
unique = util.unique_id(increment=len(self.geometry))
# geometry name + UUID
node_name = name + '_' + unique.upper()
if transform is None:
# create an identity transform from parent_node
transform = np.eye(4)
self.graph.update(frame_to=node_name,
frame_from=parent_node_name,
matrix=transform,
geometry=name,
geometry_flags={'visible': True})
return node_name
|
python
|
{
"resource": ""
}
|
q23447
|
Scene.md5
|
train
|
def md5(self):
"""
MD5 of scene which will change when meshes or
transforms are changed
Returns
--------
hashed: str, MD5 hash of scene
"""
# start with transforms hash
hashes = [self.graph.md5()]
for g in self.geometry.values():
if hasattr(g, 'md5'):
hashes.append(g.md5())
elif hasattr(g, 'tostring'):
hashes.append(str(hash(g.tostring())))
else:
# try to just straight up hash
# this may raise errors
hashes.append(str(hash(g)))
md5 = util.md5_object(''.join(hashes))
return md5
|
python
|
{
"resource": ""
}
|
q23448
|
Scene.is_valid
|
train
|
def is_valid(self):
"""
Is every geometry connected to the root node.
Returns
-----------
is_valid : bool
Does every geometry have a transform
"""
if len(self.geometry) == 0:
return True
try:
referenced = {self.graph[i][1]
for i in self.graph.nodes_geometry}
except BaseException:
# if connectivity to world frame is broken return false
return False
# every geometry is referenced
ok = referenced == set(self.geometry.keys())
return ok
|
python
|
{
"resource": ""
}
|
q23449
|
Scene.bounds_corners
|
train
|
def bounds_corners(self):
"""
A list of points that represent the corners of the
AABB of every geometry in the scene.
This can be useful if you want to take the AABB in
a specific frame.
Returns
-----------
corners: (n, 3) float, points in space
"""
# the saved corners of each instance
corners_inst = []
# (n, 3) float corners of each geometry
corners_geom = {k: bounds_module.corners(v.bounds)
for k, v in self.geometry.items()}
for node_name in self.graph.nodes_geometry:
# access the transform and geometry name from node
transform, geometry_name = self.graph[node_name]
# not all nodes have associated geometry
if geometry_name is None:
continue
# transform geometry corners into where
# the instance of the geometry is located
corners_inst.extend(
transformations.transform_points(
corners_geom[geometry_name],
transform))
# make corners numpy array
corners_inst = np.array(corners_inst,
dtype=np.float64)
return corners_inst
|
python
|
{
"resource": ""
}
|
q23450
|
Scene.bounds
|
train
|
def bounds(self):
"""
Return the overall bounding box of the scene.
Returns
--------
bounds: (2,3) float points for min, max corner
"""
corners = self.bounds_corners
bounds = np.array([corners.min(axis=0),
corners.max(axis=0)])
return bounds
|
python
|
{
"resource": ""
}
|
q23451
|
Scene.triangles
|
train
|
def triangles(self):
"""
Return a correctly transformed polygon soup of the
current scene.
Returns
----------
triangles: (n,3,3) float, triangles in space
"""
triangles = collections.deque()
triangles_node = collections.deque()
for node_name in self.graph.nodes_geometry:
# which geometry does this node refer to
transform, geometry_name = self.graph[node_name]
# get the actual potential mesh instance
geometry = self.geometry[geometry_name]
if not hasattr(geometry, 'triangles'):
continue
# append the (n, 3, 3) triangles to a sequence
triangles.append(
transformations.transform_points(
geometry.triangles.copy().reshape((-1, 3)),
matrix=transform))
# save the node names for each triangle
triangles_node.append(
np.tile(node_name,
len(geometry.triangles)))
# save the resulting nodes to the cache
self._cache['triangles_node'] = np.hstack(triangles_node)
triangles = np.vstack(triangles).reshape((-1, 3, 3))
return triangles
|
python
|
{
"resource": ""
}
|
q23452
|
Scene.geometry_identifiers
|
train
|
def geometry_identifiers(self):
"""
Look up geometries by identifier MD5
Returns
---------
identifiers: dict, identifier md5: key in self.geometry
"""
identifiers = {mesh.identifier_md5: name
for name, mesh in self.geometry.items()}
return identifiers
|
python
|
{
"resource": ""
}
|
q23453
|
Scene.duplicate_nodes
|
train
|
def duplicate_nodes(self):
"""
Return a sequence of node keys of identical meshes.
Will combine meshes duplicated by copying in space with different keys in
self.geometry, as well as meshes repeated by self.nodes.
Returns
-----------
duplicates: (m) sequence of keys to self.nodes that represent
identical geometry
"""
# if there is no geometry we can have no duplicate nodes
if len(self.geometry) == 0:
return []
# geometry name : md5 of mesh
mesh_hash = {k: int(m.identifier_md5, 16)
for k, m in self.geometry.items()}
# the name of nodes in the scene graph with geometry
node_names = np.array(self.graph.nodes_geometry)
# the geometry names for each node in the same order
node_geom = np.array([self.graph[i][1] for i in node_names])
# the mesh md5 for each node in the same order
node_hash = np.array([mesh_hash[v] for v in node_geom])
# indexes of identical hashes
node_groups = grouping.group(node_hash)
# sequence of node names, where each sublist has identical geometry
duplicates = [np.sort(node_names[g]).tolist() for g in node_groups]
return duplicates
|
python
|
{
"resource": ""
}
|
q23454
|
Scene.set_camera
|
train
|
def set_camera(self,
angles=None,
distance=None,
center=None,
resolution=None,
fov=None):
"""
Create a camera object for self.camera, and add
a transform to self.graph for it.
If arguments are not passed sane defaults will be figured
out which show the mesh roughly centered.
Parameters
-----------
angles : (3,) float
Initial euler angles in radians
distance : float
Distance from centroid
center : (3,) float
Point camera should be center on
camera : Camera object
Object that stores camera parameters
"""
if fov is None:
fov = np.array([60, 45])
# if no geometry nothing to set camera to
if len(self.geometry) == 0:
return
# set with no rotation by default
if angles is None:
angles = np.zeros(3)
rotation = transformations.euler_matrix(*angles)
transform = cameras.look_at(self.bounds_corners,
fov=fov,
rotation=rotation,
distance=distance,
center=center)
if hasattr(self, '_camera') and self._camera is not None:
self.camera.fov = fov
self.camera._scene = self
self.camera.transform = transform
else:
# create a new camera object
self.camera = cameras.Camera(fov=fov,
scene=self,
transform=transform)
return self.camera
|
python
|
{
"resource": ""
}
|
q23455
|
Scene.camera
|
train
|
def camera(self):
"""
Get the single camera for the scene. If not manually
set one will abe automatically generated.
Returns
----------
camera : trimesh.scene.Camera
Camera object defined for the scene
"""
# no camera set for the scene yet
if not hasattr(self, '_camera') or self._camera is None:
# will create a camera with everything in view
return self.set_camera()
return self._camera
|
python
|
{
"resource": ""
}
|
q23456
|
Scene.lights
|
train
|
def lights(self):
"""
Get a list of the lights in the scene. If nothing is
set it will generate some automatically.
Returns
-------------
lights : [trimesh.scene.lighting.Light]
Lights in the scene.
"""
if not hasattr(self, '_lights') or self._lights is None:
# do some automatic lighting
lights, transforms = lighting.autolight(self)
# assign the transforms to the scene graph
for L, T in zip(lights, transforms):
self.graph[L.name] = T
# set the lights
self._lights = lights
return self._lights
|
python
|
{
"resource": ""
}
|
q23457
|
Scene.rezero
|
train
|
def rezero(self):
"""
Move the current scene so that the AABB of the whole
scene is centered at the origin.
Does this by changing the base frame to a new, offset
base frame.
"""
if self.is_empty or np.allclose(self.centroid, 0.0):
# early exit since what we want already exists
return
# the transformation to move the overall scene to AABB centroid
matrix = np.eye(4)
matrix[:3, 3] = -self.centroid
# we are going to change the base frame
new_base = str(self.graph.base_frame) + '_I'
self.graph.update(frame_from=new_base,
frame_to=self.graph.base_frame,
matrix=matrix)
self.graph.base_frame = new_base
|
python
|
{
"resource": ""
}
|
q23458
|
Scene.dump
|
train
|
def dump(self):
"""
Append all meshes in scene to a list of meshes.
Returns
----------
dumped: (n,) list, of Trimesh objects transformed to their
location the scene.graph
"""
result = collections.deque()
for node_name in self.graph.nodes_geometry:
transform, geometry_name = self.graph[node_name]
current = self.geometry[geometry_name].copy()
current.apply_transform(transform)
result.append(current)
return np.array(result)
|
python
|
{
"resource": ""
}
|
q23459
|
Scene.convex_hull
|
train
|
def convex_hull(self):
"""
The convex hull of the whole scene
Returns
---------
hull: Trimesh object, convex hull of all meshes in scene
"""
points = util.vstack_empty([m.vertices for m in self.dump()])
hull = convex.convex_hull(points)
return hull
|
python
|
{
"resource": ""
}
|
q23460
|
Scene.export
|
train
|
def export(self, file_type=None):
"""
Export a snapshot of the current scene.
Parameters
----------
file_type: what encoding to use for meshes
ie: dict, dict64, stl
Returns
----------
export: dict with keys:
meshes: list of meshes, encoded as per file_type
transforms: edge list of transforms, eg:
((u, v, {'matrix' : np.eye(4)}))
"""
file_type = str(file_type).strip().lower()
if file_type == 'gltf':
return gltf.export_gltf(self)
elif file_type == 'glb':
return gltf.export_glb(self)
export = {'graph': self.graph.to_edgelist(),
'geometry': {},
'scene_cache': {'bounds': self.bounds.tolist(),
'extents': self.extents.tolist(),
'centroid': self.centroid.tolist(),
'scale': self.scale}}
if file_type is None:
file_type = {'Trimesh': 'ply',
'Path2D': 'dxf'}
# if the mesh has an export method use it
# otherwise put the mesh
# itself into the export object
for geometry_name, geometry in self.geometry.items():
if hasattr(geometry, 'export'):
if isinstance(file_type, dict):
# case where we have export types that are different
# for different classes of objects.
for query_class, query_format in file_type.items():
if util.is_instance_named(geometry, query_class):
export_type = query_format
break
else:
# if file_type is not a dict, try to export everything in the
# the scene as that value like 'ply'
export_type = file_type
exported = {'data': geometry.export(file_type=export_type),
'file_type': export_type}
export['geometry'][geometry_name] = exported
else:
# case where mesh object doesn't have exporter
# might be that someone replaced the mesh with a URL
export['geometry'][geometry_name] = geometry
return export
|
python
|
{
"resource": ""
}
|
q23461
|
Scene.save_image
|
train
|
def save_image(self, resolution=(1024, 768), **kwargs):
"""
Get a PNG image of a scene.
Parameters
-----------
resolution: (2,) int, resolution to render image
**kwargs: passed to SceneViewer constructor
Returns
-----------
png: bytes, render of scene in PNG form
"""
from ..viewer import render_scene
png = render_scene(scene=self,
resolution=resolution,
**kwargs)
return png
|
python
|
{
"resource": ""
}
|
q23462
|
Scene.units
|
train
|
def units(self):
"""
Get the units for every model in the scene, and
raise a ValueError if there are mixed units.
Returns
-----------
units : str
Units for every model in the scene
"""
existing = [i.units for i in self.geometry.values()]
if any(existing[0] != e for e in existing):
# if all of our geometry doesn't have the same units already
# this function will only do some hot nonsense
raise ValueError('models in scene have inconsistent units!')
return existing[0]
|
python
|
{
"resource": ""
}
|
q23463
|
Scene.units
|
train
|
def units(self, value):
"""
Set the units for every model in the scene without
converting any units just setting the tag.
Parameters
------------
value : str
Value to set every geometry unit value to
"""
for m in self.geometry.values():
m.units = value
|
python
|
{
"resource": ""
}
|
q23464
|
Scene.convert_units
|
train
|
def convert_units(self, desired, guess=False):
"""
If geometry has units defined convert them to new units.
Returns a new scene with geometries and transforms scaled.
Parameters
----------
desired : str
Desired final unit system: 'inches', 'mm', etc.
guess : bool
Is the converter allowed to guess scale when models
don't have it specified in their metadata.
Returns
----------
scaled : trimesh.Scene
Copy of scene with scaling applied and units set
for every model
"""
# if there is no geometry do nothing
if len(self.geometry) == 0:
return self.copy()
current = self.units
if current is None:
# will raise ValueError if not in metadata
# and not allowed to guess
current = units.units_from_metadata(self, guess=guess)
# find the float conversion
scale = units.unit_conversion(current=current,
desired=desired)
# exit early if our current units are the same as desired units
if np.isclose(scale, 1.0):
result = self.copy()
else:
result = self.scaled(scale=scale)
# apply the units to every geometry of the scaled result
result.units = desired
return result
|
python
|
{
"resource": ""
}
|
q23465
|
Scene.explode
|
train
|
def explode(self, vector=None, origin=None):
"""
Explode a scene around a point and vector.
Parameters
-----------
vector : (3,) float or float
Explode radially around a direction vector or spherically
origin : (3,) float
Point to explode around
"""
if origin is None:
origin = self.centroid
if vector is None:
vector = self.scale / 25.0
vector = np.asanyarray(vector, dtype=np.float64)
origin = np.asanyarray(origin, dtype=np.float64)
for node_name in self.graph.nodes_geometry:
transform, geometry_name = self.graph[node_name]
centroid = self.geometry[geometry_name].centroid
# transform centroid into nodes location
centroid = np.dot(transform,
np.append(centroid, 1))[:3]
if vector.shape == ():
# case where our vector is a single number
offset = (centroid - origin) * vector
elif np.shape(vector) == (3,):
projected = np.dot(vector, (centroid - origin))
offset = vector * projected
else:
raise ValueError('explode vector wrong shape!')
transform[0:3, 3] += offset
self.graph[node_name] = transform
|
python
|
{
"resource": ""
}
|
q23466
|
Scene.scaled
|
train
|
def scaled(self, scale):
"""
Return a copy of the current scene, with meshes and scene
transforms scaled to the requested factor.
Parameters
-----------
scale : float
Factor to scale meshes and transforms
Returns
-----------
scaled : trimesh.Scene
A copy of the current scene but scaled
"""
scale = float(scale)
# matrix for 2D scaling
scale_2D = np.eye(3) * scale
# matrix for 3D scaling
scale_3D = np.eye(4) * scale
# preallocate transforms and geometries
nodes = self.graph.nodes_geometry
transforms = np.zeros((len(nodes), 4, 4))
geometries = [None] * len(nodes)
# collect list of transforms
for i, node in enumerate(nodes):
transforms[i], geometries[i] = self.graph[node]
# result is a copy
result = self.copy()
# remove all existing transforms
result.graph.clear()
for group in grouping.group(geometries):
# hashable reference to self.geometry
geometry = geometries[group[0]]
# original transform from world to geometry
original = transforms[group[0]]
# transform for geometry
new_geom = np.dot(scale_3D, original)
if result.geometry[geometry].vertices.shape[1] == 2:
# if our scene is 2D only scale in 2D
result.geometry[geometry].apply_transform(scale_2D)
else:
# otherwise apply the full transform
result.geometry[geometry].apply_transform(new_geom)
for node, T in zip(self.graph.nodes_geometry[group],
transforms[group]):
# generate the new transforms
transform = util.multi_dot(
[scale_3D, T, np.linalg.inv(new_geom)])
# apply scale to translation
transform[:3, 3] *= scale
# update scene with new transforms
result.graph.update(frame_to=node,
matrix=transform,
geometry=geometry)
return result
|
python
|
{
"resource": ""
}
|
q23467
|
Scene.copy
|
train
|
def copy(self):
"""
Return a deep copy of the current scene
Returns
----------
copied : trimesh.Scene
Copy of the current scene
"""
# use the geometries copy method to
# allow them to handle references to unpickle-able objects
geometry = {n: g.copy() for n, g in self.geometry.items()}
# create a new scene with copied geometry and graph
copied = Scene(geometry=geometry,
graph=self.graph.copy())
return copied
|
python
|
{
"resource": ""
}
|
q23468
|
Scene.show
|
train
|
def show(self, viewer=None, **kwargs):
"""
Display the current scene.
Parameters
-----------
viewer: str 'gl': open a pyglet window
str,'notebook': return ipython.display.HTML
None: automatically pick based on whether or not
we are in an ipython notebook
smooth : bool
Turn on or off automatic smooth shading
"""
if viewer is None:
# check to see if we are in a notebook or not
from ..viewer import in_notebook
viewer = ['gl', 'notebook'][int(in_notebook())]
if viewer == 'gl':
# this imports pyglet, and will raise an ImportError
# if pyglet is not available
from ..viewer import SceneViewer
return SceneViewer(self, **kwargs)
elif viewer == 'notebook':
from ..viewer import scene_to_notebook
return scene_to_notebook(self, **kwargs)
else:
raise ValueError('viewer must be "gl", "notebook", or None')
|
python
|
{
"resource": ""
}
|
q23469
|
available_formats
|
train
|
def available_formats():
"""
Get a list of all available loaders
Returns
-----------
loaders : list
Extensions of available loaders
i.e. 'stl', 'ply', 'dxf', etc.
"""
loaders = mesh_formats()
loaders.extend(path_formats())
loaders.extend(compressed_loaders.keys())
return loaders
|
python
|
{
"resource": ""
}
|
q23470
|
load_mesh
|
train
|
def load_mesh(file_obj,
file_type=None,
resolver=None,
**kwargs):
"""
Load a mesh file into a Trimesh object
Parameters
-----------
file_obj : str or file object
File name or file with mesh data
file_type : str or None
Which file type, e.g. 'stl'
kwargs : dict
Passed to Trimesh constructor
Returns
----------
mesh : trimesh.Trimesh or trimesh.Scene
Loaded geometry data
"""
# parse the file arguments into clean loadable form
(file_obj, # file- like object
file_type, # str, what kind of file
metadata, # dict, any metadata from file name
opened, # bool, did we open the file ourselves
resolver # object to load referenced resources
) = parse_file_args(file_obj=file_obj,
file_type=file_type,
resolver=resolver)
try:
# make sure we keep passed kwargs to loader
# but also make sure loader keys override passed keys
results = mesh_loaders[file_type](file_obj,
file_type=file_type,
resolver=resolver,
**kwargs)
if util.is_file(file_obj):
file_obj.close()
log.debug('loaded mesh using %s',
mesh_loaders[file_type].__name__)
if not isinstance(results, list):
results = [results]
loaded = []
for result in results:
kwargs.update(result)
loaded.append(load_kwargs(kwargs))
loaded[-1].metadata.update(metadata)
if len(loaded) == 1:
loaded = loaded[0]
finally:
# if we failed to load close file
if opened:
file_obj.close()
return loaded
|
python
|
{
"resource": ""
}
|
q23471
|
load_compressed
|
train
|
def load_compressed(file_obj,
file_type=None,
resolver=None,
mixed=False,
**kwargs):
"""
Given a compressed archive load all the geometry that
we can from it.
Parameters
----------
file_obj : open file-like object
Containing compressed data
file_type : str
Type of the archive file
mixed : bool
If False, for archives containing both 2D and 3D
data will only load the 3D data into the Scene.
Returns
----------
scene : trimesh.Scene
Geometry loaded in to a Scene object
"""
# parse the file arguments into clean loadable form
(file_obj, # file- like object
file_type, # str, what kind of file
metadata, # dict, any metadata from file name
opened, # bool, did we open the file ourselves
resolver # object to load referenced resources
) = parse_file_args(file_obj=file_obj,
file_type=file_type,
resolver=resolver)
try:
# a dict of 'name' : file-like object
files = util.decompress(file_obj=file_obj,
file_type=file_type)
# store loaded geometries as a list
geometries = []
# so loaders can access textures/etc
resolver = visual.resolvers.ZipResolver(files)
# try to save the files with meaningful metadata
if 'file_path' in metadata:
archive_name = metadata['file_path']
else:
archive_name = 'archive'
# populate our available formats
if mixed:
available = available_formats()
else:
# all types contained in ZIP archive
contains = set(util.split_extension(n).lower()
for n in files.keys())
# if there are no mesh formats available
if contains.isdisjoint(mesh_formats()):
available = path_formats()
else:
available = mesh_formats()
for name, data in files.items():
# only load formats that we support
compressed_type = util.split_extension(name).lower()
if compressed_type not in available:
# don't raise an exception, just try the next one
continue
# store the file name relative to the archive
metadata['file_name'] = (archive_name + '/' +
os.path.basename(name))
# load the individual geometry
loaded = load(file_obj=data,
file_type=compressed_type,
resolver=resolver,
metadata=metadata,
**kwargs)
# some loaders return multiple geometries
if util.is_sequence(loaded):
# if the loader has returned a list of meshes
geometries.extend(loaded)
else:
# if the loader has returned a single geometry
geometries.append(loaded)
finally:
# if we opened the file in this function
# clean up after ourselves
if opened:
file_obj.close()
# append meshes or scenes into a single Scene object
result = append_scenes(geometries)
return result
|
python
|
{
"resource": ""
}
|
q23472
|
load_remote
|
train
|
def load_remote(url, **kwargs):
"""
Load a mesh at a remote URL into a local trimesh object.
This must be called explicitly rather than automatically
from trimesh.load to ensure users don't accidentally make
network requests.
Parameters
------------
url : string
URL containing mesh file
**kwargs : passed to `load`
"""
# import here to keep requirement soft
import requests
# download the mesh
response = requests.get(url)
# wrap as file object
file_obj = util.wrap_as_stream(response.content)
# so loaders can access textures/etc
resolver = visual.resolvers.WebResolver(url)
# actually load
loaded = load(file_obj=file_obj,
file_type=url,
resolver=resolver,
**kwargs)
return loaded
|
python
|
{
"resource": ""
}
|
q23473
|
load_kwargs
|
train
|
def load_kwargs(*args, **kwargs):
"""
Load geometry from a properly formatted dict or kwargs
"""
def handle_scene():
"""
Load a scene from our kwargs:
class: Scene
geometry: dict, name: Trimesh kwargs
graph: list of dict, kwargs for scene.graph.update
base_frame: str, base frame of graph
"""
scene = Scene()
scene.geometry.update({k: load_kwargs(v) for
k, v in kwargs['geometry'].items()})
for k in kwargs['graph']:
if isinstance(k, dict):
scene.graph.update(**k)
elif util.is_sequence(k) and len(k) == 3:
scene.graph.update(k[1], k[0], **k[2])
if 'base_frame' in kwargs:
scene.graph.base_frame = kwargs['base_frame']
if 'metadata' in kwargs:
scene.metadata.update(kwargs['metadata'])
return scene
def handle_trimesh_kwargs():
"""
Load information with vertices and faces into a mesh
or PointCloud object.
"""
if (isinstance(kwargs['vertices'], dict) or
isinstance(kwargs['faces'], dict)):
return Trimesh(**misc.load_dict(kwargs))
elif kwargs['faces'] is None:
# vertices without faces returns a PointCloud
return PointCloud(**kwargs)
else:
return Trimesh(**kwargs)
def handle_trimesh_export():
data, file_type = kwargs['data'], kwargs['file_type']
if not isinstance(data, dict):
data = util.wrap_as_stream(data)
k = mesh_loaders[file_type](data,
file_type=file_type)
return Trimesh(**k)
# if we've been passed a single dict instead of kwargs
# substitute the dict for kwargs
if (len(kwargs) == 0 and
len(args) == 1 and
isinstance(args[0], dict)):
kwargs = args[0]
# function : list of expected keys
handlers = {handle_scene: ('graph', 'geometry'),
handle_trimesh_kwargs: ('vertices', 'faces'),
handle_trimesh_export: ('file_type', 'data')}
# loop through handler functions and expected key
handler = None
for func, expected in handlers.items():
if all(i in kwargs for i in expected):
# all expected kwargs exist
handler = func
# exit the loop as we found one
break
if handler is None:
raise ValueError('unable to determine type!')
return handler()
|
python
|
{
"resource": ""
}
|
q23474
|
parse_file_args
|
train
|
def parse_file_args(file_obj,
file_type,
resolver=None,
**kwargs):
"""
Given a file_obj and a file_type try to turn them into a file-like
object and a lowercase string of file type.
Parameters
-----------
file_obj: str: if string represents a file path, returns
-------------------------------------------
file_obj: an 'rb' opened file object of the path
file_type: the extension from the file path
str: if string is NOT a path, but has JSON-like special characters
-------------------------------------------
file_obj: the same string passed as file_obj
file_type: set to 'json'
str: string is a valid URL
-------------------------------------------
file_obj: an open 'rb' file object with retrieved data
file_type: from the extension
str: string is not an existing path or a JSON-like object
-------------------------------------------
ValueError will be raised as we can't do anything with input
file like object: we cannot grab information on file_type automatically
-------------------------------------------
ValueError will be raised if file_type is None
file_obj: same as input
file_type: same as input
other object: like a shapely.geometry.Polygon, etc:
-------------------------------------------
file_obj: same as input
file_type: if None initially, set to the class name
(in lower case), otherwise passed through
file_type: str, type of file and handled according to above
Returns
-----------
file_obj: loadable object
file_type: str, lower case of the type of file (eg 'stl', 'dae', etc)
metadata: dict, any metadata
opened: bool, did we open the file or not
"""
metadata = {}
opened = False
if ('metadata' in kwargs and
isinstance(kwargs['metadata'], dict)):
metadata.update(kwargs['metadata'])
if util.is_file(file_obj) and file_type is None:
raise ValueError('file_type must be set when passing file objects!')
if util.is_string(file_obj):
try:
# os.path.isfile will return False incorrectly
# if we don't give it an absolute path
file_path = os.path.expanduser(file_obj)
file_path = os.path.abspath(file_path)
exists = os.path.isfile(file_path)
except BaseException:
exists = False
# file obj is a string which exists on filesystm
if exists:
# if not passed create a resolver to find other files
if resolver is None:
resolver = visual.resolvers.FilePathResolver(file_path)
# save the file name and path to metadata
metadata['file_path'] = file_path
metadata['file_name'] = os.path.basename(file_obj)
# if file_obj is a path that exists use extension as file_type
if file_type is None:
file_type = util.split_extension(
file_path,
special=['tar.gz', 'tar.bz2'])
# actually open the file
file_obj = open(file_path, 'rb')
opened = True
else:
if '{' in file_obj:
# if a dict bracket is in the string, its probably a straight
# JSON
file_type = 'json'
elif 'https://' in file_obj or 'http://' in file_obj:
# we've been passed a URL, warn to use explicit function
# and don't do network calls via magical pipeline
raise ValueError(
'use load_remote to load URL: {}'.format(file_obj))
elif file_type is None:
raise ValueError('string is not a file: {}'.format(file_obj))
if file_type is None:
file_type = file_obj.__class__.__name__
if util.is_string(file_type) and '.' in file_type:
# if someone has passed the whole filename as the file_type
# use the file extension as the file_type
if 'file_path' not in metadata:
metadata['file_path'] = file_type
metadata['file_name'] = os.path.basename(file_type)
file_type = util.split_extension(file_type)
if resolver is None and os.path.exists(file_type):
resolver = visual.resolvers.FilePathResolver(file_type)
# all our stored extensions reference in lower case
file_type = file_type.lower()
# if we still have no resolver try using file_obj name
if (resolver is None and
hasattr(file_obj, 'name') and
len(file_obj.name) > 0):
resolver = visual.resolvers.FilePathResolver(file_obj.name)
return file_obj, file_type, metadata, opened, resolver
|
python
|
{
"resource": ""
}
|
q23475
|
pack_rectangles
|
train
|
def pack_rectangles(rectangles, sheet_size, shuffle=False):
"""
Pack smaller rectangles onto a larger rectangle, using a binary
space partition tree.
Parameters
----------
rectangles : (n, 2) float
An array of (width, height) pairs
representing the rectangles to be packed.
sheet_size : (2,) float
Width, height of rectangular sheet
shuffle : bool
Whether or not to shuffle the insert order of the
smaller rectangles, as the final packing density depends
on insertion order.
Returns
---------
density : float
Area filled over total sheet area
offset : (m,2) float
Offsets to move rectangles to their packed location
inserted : (n,) bool
Which of the original rectangles were packed
consumed_box : (2,) float
Bounding box size of packed result
"""
offset = np.zeros((len(rectangles), 2))
inserted = np.zeros(len(rectangles), dtype=np.bool)
box_order = np.argsort(np.sum(rectangles**2, axis=1))[::-1]
area = 0.0
density = 0.0
if shuffle:
shuffle_len = int(np.random.random() * len(rectangles)) - 1
box_order[0:shuffle_len] = np.random.permutation(
box_order[0:shuffle_len])
sheet = RectangleBin(size=sheet_size)
for index in box_order:
insert_location = sheet.insert(rectangles[index])
if insert_location is not None:
area += np.prod(rectangles[index])
offset[index] += insert_location
inserted[index] = True
consumed_box = np.max((offset + rectangles)[inserted], axis=0)
density = area / np.product(consumed_box)
return density, offset[inserted], inserted, consumed_box
|
python
|
{
"resource": ""
}
|
q23476
|
pack_paths
|
train
|
def pack_paths(paths, sheet_size=None):
"""
Pack a list of Path2D objects into a rectangle.
Parameters
------------
paths: (n,) Path2D
Geometry to be packed
Returns
------------
packed : trimesh.path.Path2D
Object containing input geometry
inserted : (m,) int
Indexes of paths inserted into result
"""
from .util import concatenate
if sheet_size is not None:
sheet_size = np.sort(sheet_size)[::-1]
quantity = []
for path in paths:
if 'quantity' in path.metadata:
quantity.append(path.metadata['quantity'])
else:
quantity.append(1)
# pack using exterior polygon (will OBB)
polygons = [i.polygons_closed[i.root[0]] for i in paths]
# pack the polygons using rectangular bin packing
inserted, transforms = multipack(polygons=polygons,
quantity=quantity,
sheet_size=sheet_size)
multi = []
for i, T in zip(inserted, transforms):
multi.append(paths[i].copy())
multi[-1].apply_transform(T)
# append all packed paths into a single Path object
packed = concatenate(multi)
return packed, inserted
|
python
|
{
"resource": ""
}
|
q23477
|
multipack
|
train
|
def multipack(polygons,
sheet_size=None,
iterations=50,
density_escape=.95,
spacing=0.094,
quantity=None):
"""
Pack polygons into a rectangle by taking each Polygon's OBB
and then packing that as a rectangle.
Parameters
------------
polygons : (n,) shapely.geometry.Polygon
Source geometry
sheet_size : (2,) float
Size of rectangular sheet
iterations : int
Number of times to run the loop
density_escape : float
When to exit early (0.0 - 1.0)
spacing : float
How big a gap to leave between polygons
quantity : (n,) int, or None
Quantity of each Polygon
Returns
-------------
overall_inserted : (m,) int
Indexes of inserted polygons
packed : (m, 3, 3) float
Homogeonous transforms from original frame to packed frame
"""
from .polygons import polygons_obb
if quantity is None:
quantity = np.ones(len(polygons), dtype=np.int64)
else:
quantity = np.asanyarray(quantity, dtype=np.int64)
if len(quantity) != len(polygons):
raise ValueError('quantity must match polygons')
# find the oriented bounding box of the polygons
obb, rectangles = polygons_obb(polygons)
# pad all sides of the rectangle
rectangles += 2.0 * spacing
# move the OBB transform so the polygon is centered
# in the padded rectangle
for i, r in enumerate(rectangles):
obb[i][0:2, 2] += r * .5
# for polygons occurring multiple times
indexes = np.hstack([np.ones(q, dtype=np.int64) * i
for i, q in enumerate(quantity)])
# stack using advanced indexing
obb = obb[indexes]
rectangles = rectangles[indexes]
# store timing
tic = time.time()
overall_density = 0.0
# if no sheet size specified, make a large one
if sheet_size is None:
max_dim = np.max(rectangles, axis=0)
sum_dim = np.sum(rectangles, axis=0)
sheet_size = [sum_dim[0], max_dim[1] * 2]
log.debug('packing %d polygons', len(polygons))
# run packing for a number of iterations, shuffling insertion order
for i in range(iterations):
(density,
offset,
inserted,
sheet) = pack_rectangles(rectangles,
sheet_size=sheet_size,
shuffle=(i != 0))
if density > overall_density:
overall_density = density
overall_offset = offset
overall_inserted = inserted
if density > density_escape:
break
toc = time.time()
log.debug('packing finished %i iterations in %f seconds',
i + 1,
toc - tic)
log.debug('%i/%i parts were packed successfully',
np.sum(overall_inserted),
quantity.sum())
log.debug('final rectangular density is %f.', overall_density)
# transformations to packed positions
packed = obb[overall_inserted]
# apply the offset and inter- polygon spacing
packed.reshape(-1, 9)[:, [2, 5]] += overall_offset + spacing
return indexes[overall_inserted], packed
|
python
|
{
"resource": ""
}
|
q23478
|
RectangleBin.insert
|
train
|
def insert(self, rectangle):
"""
Insert a rectangle into the bin.
Parameters
-------------
rectangle: (2,) float, size of rectangle to insert
"""
rectangle = np.asanyarray(rectangle, dtype=np.float64)
for child in self.child:
if child is not None:
attempt = child.insert(rectangle)
if attempt is not None:
return attempt
if self.occupied:
return None
# compare the bin size to the insertion candidate size
size_test = self.extents - rectangle
# this means the inserted rectangle is too big for the cell
if np.any(size_test < -tol.zero):
return None
# since the cell is big enough for the current rectangle, either it
# is going to be inserted here, or the cell is going to be split
# either way, the cell is now occupied.
self.occupied = True
# this means the inserted rectangle fits perfectly
# since we already checked to see if it was negative, no abs is needed
if np.all(size_test < tol.zero):
return self.bounds[0:2]
# since the rectangle fits but the empty space is too big,
# we need to create some children to insert into
# first, we decide which way to split
vertical = size_test[0] > size_test[1]
length = rectangle[int(not vertical)]
child_bounds = self.split(length, vertical)
self.child[0] = RectangleBin(bounds=child_bounds[0])
self.child[1] = RectangleBin(bounds=child_bounds[1])
return self.child[0].insert(rectangle)
|
python
|
{
"resource": ""
}
|
q23479
|
RectangleBin.split
|
train
|
def split(self, length, vertical=True):
"""
Returns two bounding boxes representing the current
bounds split into two smaller boxes.
Parameters
-------------
length: float, length to split
vertical: bool, if True will split box vertically
Returns
-------------
box: (2,4) float, two bounding boxes consisting of:
[minx, miny, maxx, maxy]
"""
# also know as [minx, miny, maxx, maxy]
[left, bottom, right, top] = self.bounds
if vertical:
box = [[left, bottom, left + length, top],
[left + length, bottom, right, top]]
else:
box = [[left, bottom, right, bottom + length],
[left, bottom + length, right, top]]
return box
|
python
|
{
"resource": ""
}
|
q23480
|
oriented_bounds_2D
|
train
|
def oriented_bounds_2D(points, qhull_options='QbB'):
"""
Find an oriented bounding box for an array of 2D points.
Parameters
----------
points : (n,2) float
Points in 2D.
Returns
----------
transform : (3,3) float
Homogenous 2D transformation matrix to move the
input points so that the axis aligned bounding box
is CENTERED AT THE ORIGIN.
rectangle : (2,) float
Size of extents once input points are transformed
by transform
"""
# make sure input is a numpy array
points = np.asanyarray(points, dtype=np.float64)
# create a convex hull object of our points
# 'QbB' is a qhull option which has it scale the input to unit
# box to avoid precision issues with very large/small meshes
convex = spatial.ConvexHull(
points, qhull_options=qhull_options)
# (n,2,3) line segments
hull_edges = convex.points[convex.simplices]
# (n,2) points on the convex hull
hull_points = convex.points[convex.vertices]
# direction of the edges of the hull polygon
edge_vectors = np.diff(hull_edges, axis=1).reshape((-1, 2))
# unitize vectors
edge_vectors /= np.linalg.norm(edge_vectors, axis=1).reshape((-1, 1))
# create a set of perpendicular vectors
perp_vectors = np.fliplr(edge_vectors) * [-1.0, 1.0]
# find the projection of every hull point on every edge vector
# this does create a potentially gigantic n^2 array in memory,
# and there is the 'rotating calipers' algorithm which avoids this
# however, we have reduced n with a convex hull and numpy dot products
# are extremely fast so in practice this usually ends up being pretty
# reasonable
x = np.dot(edge_vectors, hull_points.T)
y = np.dot(perp_vectors, hull_points.T)
# reduce the projections to maximum and minimum per edge vector
bounds = np.column_stack((x.min(axis=1),
y.min(axis=1),
x.max(axis=1),
y.max(axis=1)))
# calculate the extents and area for each edge vector pair
extents = np.diff(bounds.reshape((-1, 2, 2)),
axis=1).reshape((-1, 2))
area = np.product(extents, axis=1)
area_min = area.argmin()
# (2,) float of smallest rectangle size
rectangle = extents[area_min]
# find the (3,3) homogenous transformation which moves the input
# points to have a bounding box centered at the origin
offset = -bounds[area_min][:2] - (rectangle * .5)
theta = np.arctan2(*edge_vectors[area_min][::-1])
transform = transformations.planar_matrix(offset,
theta)
# we would like to consistently return an OBB with
# the largest dimension along the X axis rather than
# the long axis being arbitrarily X or Y.
if np.less(*rectangle):
# a 90 degree rotation
flip = transformations.planar_matrix(theta=np.pi / 2)
# apply the rotation
transform = np.dot(flip, transform)
# switch X and Y in the OBB extents
rectangle = np.roll(rectangle, 1)
return transform, rectangle
|
python
|
{
"resource": ""
}
|
q23481
|
corners
|
train
|
def corners(bounds):
"""
Given a pair of axis aligned bounds, return all
8 corners of the bounding box.
Parameters
----------
bounds : (2,3) or (2,2) float
Axis aligned bounds
Returns
----------
corners : (8,3) float
Corner vertices of the cube
"""
bounds = np.asanyarray(bounds, dtype=np.float64)
if util.is_shape(bounds, (2, 2)):
bounds = np.column_stack((bounds, [0, 0]))
elif not util.is_shape(bounds, (2, 3)):
raise ValueError('bounds must be (2,2) or (2,3)!')
minx, miny, minz, maxx, maxy, maxz = np.arange(6)
corner_index = np.array([minx, miny, minz,
maxx, miny, minz,
maxx, maxy, minz,
minx, maxy, minz,
minx, miny, maxz,
maxx, miny, maxz,
maxx, maxy, maxz,
minx, maxy, maxz]).reshape((-1, 3))
corners = bounds.reshape(-1)[corner_index]
return corners
|
python
|
{
"resource": ""
}
|
q23482
|
contains
|
train
|
def contains(bounds, points):
"""
Do an axis aligned bounding box check on a list of points.
Parameters
-----------
bounds : (2, dimension) float
Axis aligned bounding box
points : (n, dimension) float
Points in space
Returns
-----------
points_inside : (n,) bool
True if points are inside the AABB
"""
# make sure we have correct input types
bounds = np.asanyarray(bounds, dtype=np.float64)
points = np.asanyarray(points, dtype=np.float64)
if len(bounds) != 2:
raise ValueError('bounds must be (2,dimension)!')
if not util.is_shape(points, (-1, bounds.shape[1])):
raise ValueError('bounds shape must match points!')
# run the simple check
points_inside = np.logical_and(
(points > bounds[0]).all(axis=1),
(points < bounds[1]).all(axis=1))
return points_inside
|
python
|
{
"resource": ""
}
|
q23483
|
in_notebook
|
train
|
def in_notebook():
"""
Check to see if we are in an IPython or Jypyter notebook.
Returns
-----------
in_notebook : bool
Returns True if we are in a notebook
"""
try:
# function returns IPython context, but only in IPython
ipy = get_ipython() # NOQA
# we only want to render rich output in notebooks
# in terminals we definitely do not want to output HTML
name = str(ipy.__class__).lower()
terminal = 'terminal' in name
# spyder uses ZMQshell, and can appear to be a notebook
spyder = '_' in os.environ and 'spyder' in os.environ['_']
# assume we are in a notebook if we are not in
# a terminal and we haven't been run by spyder
notebook = (not terminal) and (not spyder)
return notebook
except BaseException:
return False
|
python
|
{
"resource": ""
}
|
q23484
|
_ScandinavianStemmer._r1_scandinavian
|
train
|
def _r1_scandinavian(self, word, vowels):
"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) >= 3:
r1 = word[i+1:]
else:
return word
break
return r1
|
python
|
{
"resource": ""
}
|
q23485
|
_StandardStemmer._r1r2_standard
|
train
|
def _r1r2_standard(self, word, vowels):
"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = ""
r2 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
return (r1, r2)
|
python
|
{
"resource": ""
}
|
q23486
|
_StandardStemmer._rv_standard
|
train
|
def _rv_standard(self, word, vowels):
"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = ""
if len(word) >= 2:
if word[1] not in vowels:
for i in range(2, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
elif word[:2] in vowels:
for i in range(2, len(word)):
if word[i] not in vowels:
rv = word[i+1:]
break
else:
rv = word[3:]
return rv
|
python
|
{
"resource": ""
}
|
q23487
|
FrenchStemmer.__rv_french
|
train
|
def __rv_french(self, word, vowels):
"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = ""
if len(word) >= 2:
if (word.startswith(("par", "col", "tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in range(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv
|
python
|
{
"resource": ""
}
|
q23488
|
HungarianStemmer.__r1_hungarian
|
train
|
def __r1_hungarian(self, word, vowels, digraphs):
"""
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
"""
r1 = ""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in range(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in range(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
|
python
|
{
"resource": ""
}
|
q23489
|
RussianStemmer.__regions_russian
|
train
|
def __regions_russian(self, word):
"""
Return the regions RV and R2 which are used by the Russian stemmer.
In any word, RV is the region after the first vowel,
or the end of the word if it contains no vowel.
R2 is the region after the first non-vowel following
a vowel in R1, or the end of the word if there is no such non-vowel.
R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
:param word: The Russian word whose regions RV and R2 are determined.
:type word: str or unicode
:return: the regions RV and R2 for the respective Russian word.
:rtype: tuple
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
r1 = ""
r2 = ""
rv = ""
vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y")
word = (word.replace("i^a", "A")
.replace("i^u", "U")
.replace("e`", "E"))
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
for i in range(len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
r2 = (r2.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
rv = (rv.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
return (rv, r2)
|
python
|
{
"resource": ""
}
|
q23490
|
RussianStemmer.__cyrillic_to_roman
|
train
|
def __cyrillic_to_roman(self, word):
"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("\u0410", "a").replace("\u0430", "a")
.replace("\u0411", "b").replace("\u0431", "b")
.replace("\u0412", "v").replace("\u0432", "v")
.replace("\u0413", "g").replace("\u0433", "g")
.replace("\u0414", "d").replace("\u0434", "d")
.replace("\u0415", "e").replace("\u0435", "e")
.replace("\u0401", "e").replace("\u0451", "e")
.replace("\u0416", "zh").replace("\u0436", "zh")
.replace("\u0417", "z").replace("\u0437", "z")
.replace("\u0418", "i").replace("\u0438", "i")
.replace("\u0419", "i`").replace("\u0439", "i`")
.replace("\u041A", "k").replace("\u043A", "k")
.replace("\u041B", "l").replace("\u043B", "l")
.replace("\u041C", "m").replace("\u043C", "m")
.replace("\u041D", "n").replace("\u043D", "n")
.replace("\u041E", "o").replace("\u043E", "o")
.replace("\u041F", "p").replace("\u043F", "p")
.replace("\u0420", "r").replace("\u0440", "r")
.replace("\u0421", "s").replace("\u0441", "s")
.replace("\u0422", "t").replace("\u0442", "t")
.replace("\u0423", "u").replace("\u0443", "u")
.replace("\u0424", "f").replace("\u0444", "f")
.replace("\u0425", "kh").replace("\u0445", "kh")
.replace("\u0426", "t^s").replace("\u0446", "t^s")
.replace("\u0427", "ch").replace("\u0447", "ch")
.replace("\u0428", "sh").replace("\u0448", "sh")
.replace("\u0429", "shch").replace("\u0449", "shch")
.replace("\u042A", "''").replace("\u044A", "''")
.replace("\u042B", "y").replace("\u044B", "y")
.replace("\u042C", "'").replace("\u044C", "'")
.replace("\u042D", "e`").replace("\u044D", "e`")
.replace("\u042E", "i^u").replace("\u044E", "i^u")
.replace("\u042F", "i^a").replace("\u044F", "i^a"))
return word
|
python
|
{
"resource": ""
}
|
q23491
|
RussianStemmer.__roman_to_cyrillic
|
train
|
def __roman_to_cyrillic(self, word):
"""
Transliterate a Russian word back into the Cyrillic alphabet.
A Russian word formerly transliterated into the Roman alphabet
in order to ease the stemming process, is transliterated back
into the Cyrillic alphabet, its original form.
:param word: The word that is transliterated.
:type word: str or unicode
:return: word, the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("i^u", "\u044E").replace("i^a", "\u044F")
.replace("shch", "\u0449").replace("kh", "\u0445")
.replace("t^s", "\u0446").replace("ch", "\u0447")
.replace("e`", "\u044D").replace("i`", "\u0439")
.replace("sh", "\u0448").replace("k", "\u043A")
.replace("e", "\u0435").replace("zh", "\u0436")
.replace("a", "\u0430").replace("b", "\u0431")
.replace("v", "\u0432").replace("g", "\u0433")
.replace("d", "\u0434").replace("e", "\u0435")
.replace("z", "\u0437").replace("i", "\u0438")
.replace("l", "\u043B").replace("m", "\u043C")
.replace("n", "\u043D").replace("o", "\u043E")
.replace("p", "\u043F").replace("r", "\u0440")
.replace("s", "\u0441").replace("t", "\u0442")
.replace("u", "\u0443").replace("f", "\u0444")
.replace("''", "\u044A").replace("y", "\u044B")
.replace("'", "\u044C"))
return word
|
python
|
{
"resource": ""
}
|
q23492
|
SwedishStemmer.stem
|
train
|
def stem(self, word):
"""
Stem a Swedish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("els", "lig", "ig"):
word = word[:-len(suffix)]
elif suffix in ("fullt", "l\xF6st"):
word = word[:-1]
break
return word
|
python
|
{
"resource": ""
}
|
q23493
|
deaccent
|
train
|
def deaccent(text):
"""
Remove accentuation from the given string.
"""
norm = unicodedata.normalize("NFD", text)
result = "".join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
|
python
|
{
"resource": ""
}
|
q23494
|
tokenize
|
train
|
def tokenize(text, lowercase=False, deacc=False):
"""
Iteratively yield tokens as unicode strings, optionally also lowercasing them
and removing accent marks.
"""
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
|
python
|
{
"resource": ""
}
|
q23495
|
clean_text_by_sentences
|
train
|
def clean_text_by_sentences(text, language="english", additional_stopwords=None):
""" Tokenizes a given text into sentences, applying filters and lemmatizing them.
Returns a SyntacticUnit list. """
init_textcleanner(language, additional_stopwords)
original_sentences = split_sentences(text)
filtered_sentences = filter_words(original_sentences)
return merge_syntactic_units(original_sentences, filtered_sentences)
|
python
|
{
"resource": ""
}
|
q23496
|
clean_text_by_word
|
train
|
def clean_text_by_word(text, language="english", deacc=False, additional_stopwords=None):
""" Tokenizes a given text into words, applying filters and lemmatizing them.
Returns a dict of word -> syntacticUnit. """
init_textcleanner(language, additional_stopwords)
text_without_acronyms = replace_with_separator(text, "", [AB_ACRONYM_LETTERS])
original_words = list(tokenize(text_without_acronyms, lowercase=True, deacc=deacc))
filtered_words = filter_words(original_words)
if HAS_PATTERN:
tags = tag(" ".join(original_words)) # tag needs the context of the words in the text
else:
tags = None
units = merge_syntactic_units(original_words, filtered_words, tags)
return { unit.text : unit for unit in units }
|
python
|
{
"resource": ""
}
|
q23497
|
_get_sentences_with_word_count
|
train
|
def _get_sentences_with_word_count(sentences, words):
""" Given a list of sentences, returns a list of sentences with a
total word count similar to the word count provided.
"""
word_count = 0
selected_sentences = []
# Loops until the word count is reached.
for sentence in sentences:
words_in_sentence = len(sentence.text.split())
# Checks if the inclusion of the sentence gives a better approximation
# to the word parameter.
if abs(words - word_count - words_in_sentence) > abs(words - word_count):
return selected_sentences
selected_sentences.append(sentence)
word_count += words_in_sentence
return selected_sentences
|
python
|
{
"resource": ""
}
|
q23498
|
pagerank_weighted
|
train
|
def pagerank_weighted(graph, initial_value=None, damping=0.85):
"""Calculates PageRank for an undirected graph"""
if initial_value == None: initial_value = 1.0 / len(graph.nodes())
scores = dict.fromkeys(graph.nodes(), initial_value)
iteration_quantity = 0
for iteration_number in range(100):
iteration_quantity += 1
convergence_achieved = 0
for i in graph.nodes():
rank = 1 - damping
for j in graph.neighbors(i):
neighbors_sum = sum(graph.edge_weight((j, k)) for k in graph.neighbors(j))
rank += damping * scores[j] * graph.edge_weight((j, i)) / neighbors_sum
if abs(scores[i] - rank) <= CONVERGENCE_THRESHOLD:
convergence_achieved += 1
scores[i] = rank
if convergence_achieved == len(graph.nodes()):
break
return scores
|
python
|
{
"resource": ""
}
|
q23499
|
Env.db_url
|
train
|
def db_url(self, var=DEFAULT_DATABASE_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to DATABASE_URL.
:rtype: dict
"""
return self.db_url_config(self.get_value(var, default=default), engine=engine)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.