id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
4,680 | import random
import math
import numpy as np
import torch
from .spc.uint8 import uint8_to_bits
The provided code snippet includes necessary dependencies for implementing the `random_shape_per_tensor` function. Write a Python function `def random_shape_per_tensor(batch_size, min_shape=None, max_shape=None)` to solve the following problem:
Generate random :attr:`shape_per_tensor`. Args: batch_size (int): Batch size (first dimension) of the generated tensor. min_shape (list, tuple or torch.LongTensor): Minimum values for each dimension of generated shapes. Default: 1 for each dimensions. max_shape (list, tuple or torch.LongTensor): maximum values for each dimension of generated shapes. Return: (torch.LongTensor): A shape_per_tensor (2D). Example: >>> _ = torch.random.manual_seed(1) >>> random_shape_per_tensor(3, min_shape=(4, 4), max_shape=(10, 10)) tensor([[ 4, 7], [ 7, 7], [ 8, 10]])
Here is the function:
def random_shape_per_tensor(batch_size, min_shape=None, max_shape=None):
"""Generate random :attr:`shape_per_tensor`.
Args:
batch_size (int): Batch size (first dimension) of the generated tensor.
min_shape (list, tuple or torch.LongTensor):
Minimum values for each dimension of generated shapes.
Default: 1 for each dimensions.
max_shape (list, tuple or torch.LongTensor):
maximum values for each dimension of generated shapes.
Return:
(torch.LongTensor): A shape_per_tensor (2D).
Example:
>>> _ = torch.random.manual_seed(1)
>>> random_shape_per_tensor(3, min_shape=(4, 4), max_shape=(10, 10))
tensor([[ 4, 7],
[ 7, 7],
[ 8, 10]])
"""
if min_shape is None:
min_shape = [1] * len(max_shape)
output = torch.cat([torch.randint(low_dim, high_dim + 1, size=(batch_size, 1))
for low_dim, high_dim in zip(min_shape, max_shape)], dim=1)
return output | Generate random :attr:`shape_per_tensor`. Args: batch_size (int): Batch size (first dimension) of the generated tensor. min_shape (list, tuple or torch.LongTensor): Minimum values for each dimension of generated shapes. Default: 1 for each dimensions. max_shape (list, tuple or torch.LongTensor): maximum values for each dimension of generated shapes. Return: (torch.LongTensor): A shape_per_tensor (2D). Example: >>> _ = torch.random.manual_seed(1) >>> random_shape_per_tensor(3, min_shape=(4, 4), max_shape=(10, 10)) tensor([[ 4, 7], [ 7, 7], [ 8, 10]]) |
4,681 | import random
import math
import numpy as np
import torch
from .spc.uint8 import uint8_to_bits
The provided code snippet includes necessary dependencies for implementing the `random_tensor` function. Write a Python function `def random_tensor(low, high, shape, dtype=torch.float, device='cpu')` to solve the following problem:
Generate a random tensor. Args: low (float): the lowest value to be drawn from the distribution. high (float): the highest value to be drawn from the distribution. shape (list, tuple or torch.LongTensor): the desired output shape. dtype (torch.dtype): the desired output dtype. Default: ``torch.float``. device (torch.device): the desired output device. Default: 'cpu' Return: (torch.Tensor): a random generated tensor. Example: >>> _ = torch.random.manual_seed(1) >>> random_tensor(4., 5., (3, 3), dtype=torch.float, device='cpu') tensor([[4.7576, 4.2793, 4.4031], [4.7347, 4.0293, 4.7999], [4.3971, 4.7544, 4.5695]])
Here is the function:
def random_tensor(low, high, shape, dtype=torch.float, device='cpu'):
"""Generate a random tensor.
Args:
low (float): the lowest value to be drawn from the distribution.
high (float): the highest value to be drawn from the distribution.
shape (list, tuple or torch.LongTensor): the desired output shape.
dtype (torch.dtype): the desired output dtype. Default: ``torch.float``.
device (torch.device): the desired output device. Default: 'cpu'
Return:
(torch.Tensor): a random generated tensor.
Example:
>>> _ = torch.random.manual_seed(1)
>>> random_tensor(4., 5., (3, 3), dtype=torch.float, device='cpu')
tensor([[4.7576, 4.2793, 4.4031],
[4.7347, 4.0293, 4.7999],
[4.3971, 4.7544, 4.5695]])
"""
if dtype in (torch.half, torch.float, torch.double):
output = torch.rand(shape, dtype=dtype, device=device)
if (low != 0.) or (high != 1.):
output = output * (high - low) + low
elif dtype == torch.bool:
assert (low is None) or (low == 0)
assert (high is None) or (high == 1)
output = torch.randint(0, 2, size=shape, dtype=dtype, device=device)
else:
output = torch.randint(low, high + 1, size=shape, dtype=dtype, device=device)
return output | Generate a random tensor. Args: low (float): the lowest value to be drawn from the distribution. high (float): the highest value to be drawn from the distribution. shape (list, tuple or torch.LongTensor): the desired output shape. dtype (torch.dtype): the desired output dtype. Default: ``torch.float``. device (torch.device): the desired output device. Default: 'cpu' Return: (torch.Tensor): a random generated tensor. Example: >>> _ = torch.random.manual_seed(1) >>> random_tensor(4., 5., (3, 3), dtype=torch.float, device='cpu') tensor([[4.7576, 4.2793, 4.4031], [4.7347, 4.0293, 4.7999], [4.3971, 4.7544, 4.5695]]) |
4,682 | import random
import math
import numpy as np
import torch
from .spc.uint8 import uint8_to_bits
def uint8_to_bits(uint8_t):
"""Convert uint8 ByteTensor to binary BoolTensor.
Args:
uint8_t (torch.ByteTensor): Tensor to convert.
Returns:
(BoolTensor):
Converted tensor of same shape + last dimension 8
and device than `uint8_t`.
Examples:
>>> uint8_t = torch.ByteTensor([[3, 5], [16, 2]])
>>> uint8_to_bits(uint8_t)
tensor([[[ True, True, False, False, False, False, False, False],
[ True, False, True, False, False, False, False, False]],
<BLANKLINE>
[[False, False, False, False, True, False, False, False],
[False, True, False, False, False, False, False, False]]])
"""
# TODO(cfujitsang): This is a naive implementation
global _uint8_to_bits_luts
device = uint8_t.device
if device not in _uint8_to_bits_luts:
# flip is for converting to left-to-right binary
lut = torch.flip(
torch.tensor(list(product([False, True], repeat=8)),
dtype=torch.bool, device=device),
dims=(1,)).contiguous()
_uint8_to_bits_luts[device] = lut
else:
lut = _uint8_to_bits_luts[device]
return lut[uint8_t.long()]
The provided code snippet includes necessary dependencies for implementing the `random_spc_octrees` function. Write a Python function `def random_spc_octrees(batch_size, max_level, device='cpu')` to solve the following problem:
Generate random SPC octrees. Args: batch_size (int): The desired number of octrees. max_level (int): The desired max level of the octrees. device (torch.device): The desired output device. Default: 'cpu'. Return: (torch.ByteTensor, torch.IntTensor): - A batch of randomly generated octrees. - The length of each octree. Example: >>> _ = torch.random.manual_seed(1) >>> random_spc_octrees(2, 3, device='cpu') (tensor([ 71, 180, 220, 9, 134, 59, 42, 102, 210, 193, 204, 190, 107, 24, 104, 151, 13, 7, 18, 107, 16, 154, 57, 110, 19, 22, 230, 48, 135, 65, 69, 147, 148, 184, 203, 229, 114, 232, 18, 231, 241, 195], dtype=torch.uint8), tensor([19, 23], dtype=torch.int32))
Here is the function:
def random_spc_octrees(batch_size, max_level, device='cpu'):
"""Generate random SPC octrees.
Args:
batch_size (int): The desired number of octrees.
max_level (int): The desired max level of the octrees.
device (torch.device): The desired output device. Default: 'cpu'.
Return:
(torch.ByteTensor, torch.IntTensor):
- A batch of randomly generated octrees.
- The length of each octree.
Example:
>>> _ = torch.random.manual_seed(1)
>>> random_spc_octrees(2, 3, device='cpu')
(tensor([ 71, 180, 220, 9, 134, 59, 42, 102, 210, 193, 204, 190, 107, 24,
104, 151, 13, 7, 18, 107, 16, 154, 57, 110, 19, 22, 230, 48,
135, 65, 69, 147, 148, 184, 203, 229, 114, 232, 18, 231, 241, 195],
dtype=torch.uint8), tensor([19, 23], dtype=torch.int32))
"""
octrees = []
lengths = []
for bs in range(batch_size):
octree_length = 0
cur_num_nodes = 1
for i in range(max_level):
cur_nodes = torch.randint(1, 256, size=(cur_num_nodes,),
dtype=torch.uint8, device=device)
cur_num_nodes = torch.sum(uint8_to_bits(cur_nodes))
octrees.append(cur_nodes)
octree_length += cur_nodes.shape[0]
lengths.append(octree_length)
return torch.cat(octrees, dim=0), torch.tensor(lengths, dtype=torch.torch.int32) | Generate random SPC octrees. Args: batch_size (int): The desired number of octrees. max_level (int): The desired max level of the octrees. device (torch.device): The desired output device. Default: 'cpu'. Return: (torch.ByteTensor, torch.IntTensor): - A batch of randomly generated octrees. - The length of each octree. Example: >>> _ = torch.random.manual_seed(1) >>> random_spc_octrees(2, 3, device='cpu') (tensor([ 71, 180, 220, 9, 134, 59, 42, 102, 210, 193, 204, 190, 107, 24, 104, 151, 13, 7, 18, 107, 16, 154, 57, 110, 19, 22, 230, 48, 135, 65, 69, 147, 148, 184, 203, 229, 114, 232, 18, 231, 241, 195], dtype=torch.uint8), tensor([19, 23], dtype=torch.int32)) |
4,683 | import random
import math
import numpy as np
import torch
from .spc.uint8 import uint8_to_bits
The provided code snippet includes necessary dependencies for implementing the `sample_spherical_coords` function. Write a Python function `def sample_spherical_coords(shape, azimuth_low=0., azimuth_high=math.pi * 2., elevation_low=0., elevation_high=math.pi * 0.5, device='cpu', dtype=torch.float)` to solve the following problem:
Sample spherical coordinates with a uniform distribution. Args: shape (Sequence): shape of outputs. azimuth_low (float, optional): lower bound for azimuth, in radian. Default: 0. azimuth_high (float, optional): higher bound for azimuth, in radian. Default: 2 * pi. elevation_low (float, optional): lower bound for elevation, in radian. Default: 0. elevation_high (float, optional): higher bound for elevation, in radian. Default: pi / 2. device (torch.device, optional): device of the output tensor. Default: 'cpu'. dtype (torch.dtype, optional): dtype of the output tensor. Default: torch.float. Returns: (torch.Tensor, torch.Tensor): the azimuth and elevation, both of desired ``shape``.
Here is the function:
def sample_spherical_coords(shape,
azimuth_low=0., azimuth_high=math.pi * 2.,
elevation_low=0., elevation_high=math.pi * 0.5,
device='cpu', dtype=torch.float):
"""Sample spherical coordinates with a uniform distribution.
Args:
shape (Sequence): shape of outputs.
azimuth_low (float, optional): lower bound for azimuth, in radian. Default: 0.
azimuth_high (float, optional): higher bound for azimuth, in radian. Default: 2 * pi.
elevation_low (float, optional): lower bound for elevation, in radian. Default: 0.
elevation_high (float, optional): higher bound for elevation, in radian. Default: pi / 2.
device (torch.device, optional): device of the output tensor. Default: 'cpu'.
dtype (torch.dtype, optional): dtype of the output tensor. Default: torch.float.
Returns:
(torch.Tensor, torch.Tensor): the azimuth and elevation, both of desired ``shape``.
"""
low = torch.tensor([
[azimuth_low], [math.sin(elevation_low)]
], device=device, dtype=dtype).reshape(2, *[1 for _ in shape])
high = torch.tensor([
[azimuth_high], [math.sin(elevation_high)]
], device=device, dtype=dtype).reshape(2, *[1 for _ in shape])
rand = torch.rand([2, *shape], dtype=dtype, device=device)
inter_samples = low + rand * (high - low)
azimuth = inter_samples[0]
elevation = torch.asin(inter_samples[1])
return azimuth, elevation | Sample spherical coordinates with a uniform distribution. Args: shape (Sequence): shape of outputs. azimuth_low (float, optional): lower bound for azimuth, in radian. Default: 0. azimuth_high (float, optional): higher bound for azimuth, in radian. Default: 2 * pi. elevation_low (float, optional): lower bound for elevation, in radian. Default: 0. elevation_high (float, optional): higher bound for elevation, in radian. Default: pi / 2. device (torch.device, optional): device of the output tensor. Default: 'cpu'. dtype (torch.dtype, optional): dtype of the output tensor. Default: torch.float. Returns: (torch.Tensor, torch.Tensor): the azimuth and elevation, both of desired ``shape``. |
4,684 | import warnings
import numpy as np
import torch
from kaolin import _C
class InterpolateTrilinear(torch.autograd.Function):
def forward(ctx, coords, pidx, point_hierarchy, trinkets, feats, level):
feats_out = _C.ops.spc.interpolate_trilinear_cuda(coords.contiguous(), pidx.contiguous(),
point_hierarchy.contiguous(), trinkets.contiguous(),
feats.contiguous(), level)
ctx.save_for_backward(coords, pidx, point_hierarchy, trinkets, feats)
ctx.level = level
ctx.feats_shape = feats.shape
ctx.coords_shape = coords.shape
return feats_out
def backward(ctx, grad_output):
coords, pidx, point_hierarchy, trinkets, feats = ctx.saved_tensors
level = ctx.level
mask = pidx > -1
selected_points = point_hierarchy.index_select(0, pidx[mask])
selected_trinkets = trinkets.index_select(0, pidx[mask])
is_needs_grad_by_coords = ctx.needs_input_grad[0]
is_needs_grad_by_features = ctx.needs_input_grad[4]
grad_feats = None
if is_needs_grad_by_features:
# TODO(ttakikawa): Write a fused kernel
grad_feats = torch.zeros(ctx.feats_shape, device=grad_output.device, dtype=grad_output.dtype)
coeffs = coords_to_trilinear_coeffs(coords[mask], selected_points[:, None].repeat(1, coords.shape[1], 1), level).type(grad_output.dtype)
grad_per_corner = (coeffs[..., None] * grad_output[mask][..., None, :]).sum(1)
grad_feats.index_add_(0, selected_trinkets.reshape(-1),
grad_per_corner.reshape(-1, ctx.feats_shape[-1]).to(grad_feats.dtype))
# TODO (operel): May want to reimplement with CUDA
grad_coords = None
if is_needs_grad_by_coords:
# Let N be the number of intersected cells in a batch (e.g. pidx > -1)
# Let D be the features dimensionality
# Shape (N, 3), xyz coords of intersected cells in range [0, 2^lod]
coords_ = (2 ** level) * (coords[mask].reshape(-1, 3) * 0.5 + 0.5)
# Shape (N, 3), quantized xyz coords of intersected cells in range [0, 2^lod]
points_ = selected_points[:, None].repeat(1, coords.shape[1], 1).reshape(-1, 3)
# Shape (N, 3), local cell coordinates in range [0.0, 1.0]
x_ = coords_ - points_
# Shape (N, 3), 1.0 - local cell coordinates in range [0.0, 1.0]
_x = 1.0 - x_
# Shape (N, 8 x 3) tensor of @(coeffs)/@(xyz) where
# coeffs is the tensor of c000, c001, .. c111, the trilinear interp coefficients
# (see coords_to_trilinear_coeffs), and xyz is the coords
grad_coeffs_by_xyz = torch.stack([
-_x[:, 1] * _x[:, 2], -_x[:, 0] * _x[:, 2], -_x[:, 0] * _x[:, 1],
-_x[:, 1] * x_[:, 2], -_x[:, 0] * x_[:, 2], _x[:, 0] * _x[:, 1],
-x_[:, 1] * _x[:, 2], _x[:, 0] * _x[:, 2], -_x[:, 0] * x_[:, 1],
-x_[:, 1] * x_[:, 2], _x[:, 0] * x_[:, 2], _x[:, 0] * x_[:, 1],
_x[:, 1] * _x[:, 2], -x_[:, 0] * _x[:, 2], -x_[:, 0] * _x[:, 1],
_x[:, 1] * x_[:, 2], -x_[:, 0] * x_[:, 2], x_[:, 0] * _x[:, 1],
x_[:, 1] * _x[:, 2], x_[:, 0] * _x[:, 2], -x_[:, 0] * x_[:, 1],
x_[:, 1] * x_[:, 2], x_[:, 0] * x_[:, 2], x_[:, 0] * x_[:, 1]
], dim=1).to(dtype=grad_output.dtype, device=grad_output.device)
# Shape (N, 8, 3) tensor of @(coeffs)/@(xyz)
grad_coeffs_by_xyz = grad_coeffs_by_xyz.reshape(-1, 8, 3)
# Shape (N, D, 8) tensor of @(feats_out)/@(coeffs)
grad_fout_by_coeffs = feats[selected_trinkets.long()].permute(0, 2, 1)
# Shape (N, D, 3) tensor of @(feats_out)/@(xyz), after applying chain rule
_grad_fout_by_xyz = grad_fout_by_coeffs @ grad_coeffs_by_xyz
# Shape (N, 1, 3) tensor of @(out)/@(xyz) applying chain rule again
grad_fout_by_xyz = torch.zeros(
(grad_output.shape[0], *_grad_fout_by_xyz.shape[1:]),
device='cuda', dtype=_grad_fout_by_xyz.dtype)
grad_fout_by_xyz[mask] = _grad_fout_by_xyz
grad_coords = grad_output @ grad_fout_by_xyz
return grad_coords, None, None, None, grad_feats, None
The provided code snippet includes necessary dependencies for implementing the `unbatched_interpolate_trilinear` function. Write a Python function `def unbatched_interpolate_trilinear(coords, pidx, point_hierarchy, trinkets, feats, level)` to solve the following problem:
r"""Performs trilinear interpolation on a SPC feature grid. Args: coords (torch.FloatTensor): 3D coordinates of shape :math:`(\text{num_coords}, \text{num_samples}, 3)` in normalized space [-1, 1]. ``num_samples`` indicates the number of coordinates that are grouped inside the same SPC node for performance optimization purposes. In many cases the ``pidx`` is generated from :func:`kaolin.ops.spc.unbatched_query` and so the ``num_samples`` will be 1. pidx (torch.IntTensor): Index to the point hierarchy which contains the voxel which the coords exists in. Tensor of shape :math:`(\text{num_coords})`. This can be computed with :func:`kaolin.ops.spc.unbatched_query`. point_hierarchy (torch.ShortTensor): The point hierarchy of shape :math:`(\text{num_points}, 3)`. See :ref:`point_hierarchies <spc_points>` for a detailed description. trinkets (torch.IntTensor): An indirection pointer (in practice, an index) to the feature tensor of shape :math:`(\text{num_points}, 8)`. feats (torch.Tensor): Floating point feature vectors to interpolate of shape :math:`(\text{num_feats}, \text{feature_dim})`. level (int): The level of SPC to interpolate on. Returns: (torch.FloatTensor): Interpolated feature vectors of shape :math:`(\text{num_voxels}, \text{num_samples}, \text{feature_dim})`.
Here is the function:
def unbatched_interpolate_trilinear(coords, pidx, point_hierarchy, trinkets, feats, level):
r"""Performs trilinear interpolation on a SPC feature grid.
Args:
coords (torch.FloatTensor): 3D coordinates of shape
:math:`(\text{num_coords}, \text{num_samples}, 3)`
in normalized space [-1, 1]. ``num_samples`` indicates the number of
coordinates that are grouped inside the same SPC node for performance
optimization purposes. In many cases the ``pidx`` is
generated from :func:`kaolin.ops.spc.unbatched_query`
and so the ``num_samples`` will be 1.
pidx (torch.IntTensor): Index to the point hierarchy which contains the voxel
which the coords exists in. Tensor of shape
:math:`(\text{num_coords})`.
This can be computed with :func:`kaolin.ops.spc.unbatched_query`.
point_hierarchy (torch.ShortTensor):
The point hierarchy of shape :math:`(\text{num_points}, 3)`.
See :ref:`point_hierarchies <spc_points>` for a detailed description.
trinkets (torch.IntTensor): An indirection pointer (in practice, an index) to the feature
tensor of shape :math:`(\text{num_points}, 8)`.
feats (torch.Tensor): Floating point feature vectors to interpolate of shape
:math:`(\text{num_feats}, \text{feature_dim})`.
level (int): The level of SPC to interpolate on.
Returns:
(torch.FloatTensor):
Interpolated feature vectors of shape :math:`(\text{num_voxels}, \text{num_samples}, \text{feature_dim})`.
"""
return InterpolateTrilinear.apply(coords, pidx, point_hierarchy, trinkets, feats, level) | r"""Performs trilinear interpolation on a SPC feature grid. Args: coords (torch.FloatTensor): 3D coordinates of shape :math:`(\text{num_coords}, \text{num_samples}, 3)` in normalized space [-1, 1]. ``num_samples`` indicates the number of coordinates that are grouped inside the same SPC node for performance optimization purposes. In many cases the ``pidx`` is generated from :func:`kaolin.ops.spc.unbatched_query` and so the ``num_samples`` will be 1. pidx (torch.IntTensor): Index to the point hierarchy which contains the voxel which the coords exists in. Tensor of shape :math:`(\text{num_coords})`. This can be computed with :func:`kaolin.ops.spc.unbatched_query`. point_hierarchy (torch.ShortTensor): The point hierarchy of shape :math:`(\text{num_points}, 3)`. See :ref:`point_hierarchies <spc_points>` for a detailed description. trinkets (torch.IntTensor): An indirection pointer (in practice, an index) to the feature tensor of shape :math:`(\text{num_points}, 8)`. feats (torch.Tensor): Floating point feature vectors to interpolate of shape :math:`(\text{num_feats}, \text{feature_dim})`. level (int): The level of SPC to interpolate on. Returns: (torch.FloatTensor): Interpolated feature vectors of shape :math:`(\text{num_voxels}, \text{num_samples}, \text{feature_dim})`. |
4,685 | import warnings
import numpy as np
import torch
from kaolin import _C
def coords_to_trilinear_coeffs(coords, points, level):
r"""Calculates the coefficients for trilinear interpolation.
This calculates coefficients with respect to the dual octree, which represent the corners of the octree
where the features are stored.
To interpolate with the coefficients, do:
``torch.sum(features * coeffs, dim=-1)``
with ``features`` of shape :math:`(\text{num_points}, 8)`
Args:
coords (torch.FloatTensor): 3D coordinates of shape :math:`(\text{num_coords}, 3)`
in normalized space [-1, 1].
points (torch.ShortTensor): Quantized 3D points (the 0th bit of the voxel x is in),
of shape :math:`(\text{num_points}, 3)`.
level (int): The level of SPC to interpolate on.
Returns:
(torch.FloatTensor):
The trilinear interpolation coefficients of shape :math:`(\text{num_coords}, 8)`.
"""
shape = list(coords.shape)
shape[-1] = 8
points = points.reshape(-1, 3)
coords = coords.reshape(-1, 3)
coords_ = (2 ** level) * (coords * 0.5 + 0.5)
return _C.ops.spc.coords_to_trilinear_cuda(
coords_.contiguous(), points.contiguous()).reshape(*shape)
The provided code snippet includes necessary dependencies for implementing the `coords_to_trilinear` function. Write a Python function `def coords_to_trilinear(coords, points, level)` to solve the following problem:
r"""Calculates the coefficients for trilinear interpolation. .. deprecated:: 0.11.0 This function is deprecated. Use :func:`coords_to_trilinear_coeffs`. This calculates coefficients with respect to the dual octree, which represent the corners of the octree where the features are stored. To interpolate with the coefficients, do: ``torch.sum(features * coeffs, dim=-1)`` with ``features`` of shape :math:`(\text{num_points}, 8)` Args: coords (torch.FloatTensor): 3D coordinates of shape :math:`(\text{num_coords}, 3)` in normalized space [-1, 1]. points (torch.ShortTensor): Quantized 3D points (the 0th bit of the voxel x is in), of shape :math:`(\text{num_points}, 3)`. level (int): The level of SPC to interpolate on. Returns: (torch.FloatTensor): The trilinear interpolation coefficients of shape :math:`(\text{num_points}, 8)`.
Here is the function:
def coords_to_trilinear(coords, points, level):
r"""Calculates the coefficients for trilinear interpolation.
.. deprecated:: 0.11.0
This function is deprecated. Use :func:`coords_to_trilinear_coeffs`.
This calculates coefficients with respect to the dual octree, which represent the corners of the octree
where the features are stored.
To interpolate with the coefficients, do:
``torch.sum(features * coeffs, dim=-1)``
with ``features`` of shape :math:`(\text{num_points}, 8)`
Args:
coords (torch.FloatTensor): 3D coordinates of shape :math:`(\text{num_coords}, 3)`
in normalized space [-1, 1].
points (torch.ShortTensor): Quantized 3D points (the 0th bit of the voxel x is in),
of shape :math:`(\text{num_points}, 3)`.
level (int): The level of SPC to interpolate on.
Returns:
(torch.FloatTensor):
The trilinear interpolation coefficients of shape :math:`(\text{num_points}, 8)`.
"""
warnings.warn("coords_to_trilinear is deprecated, "
"please use kaolin.ops.spc.coords_to_trilinear_coeffs instead",
DeprecationWarning, stacklevel=2)
return coords_to_trilinear_coeffs(coords, points, level) | r"""Calculates the coefficients for trilinear interpolation. .. deprecated:: 0.11.0 This function is deprecated. Use :func:`coords_to_trilinear_coeffs`. This calculates coefficients with respect to the dual octree, which represent the corners of the octree where the features are stored. To interpolate with the coefficients, do: ``torch.sum(features * coeffs, dim=-1)`` with ``features`` of shape :math:`(\text{num_points}, 8)` Args: coords (torch.FloatTensor): 3D coordinates of shape :math:`(\text{num_coords}, 3)` in normalized space [-1, 1]. points (torch.ShortTensor): Quantized 3D points (the 0th bit of the voxel x is in), of shape :math:`(\text{num_points}, 3)`. level (int): The level of SPC to interpolate on. Returns: (torch.FloatTensor): The trilinear interpolation coefficients of shape :math:`(\text{num_points}, 8)`. |
4,686 | import warnings
import numpy as np
import torch
from kaolin import _C
The provided code snippet includes necessary dependencies for implementing the `create_dense_spc` function. Write a Python function `def create_dense_spc(level, device)` to solve the following problem:
Creates a dense SPC model Args: level (int): The level at which the octree will be initialized to. device (torch.device): Torch device to keep the spc octree Returns: (torch.ByteTensor): the octree tensor
Here is the function:
def create_dense_spc(level, device):
"""Creates a dense SPC model
Args:
level (int): The level at which the octree will be initialized to.
device (torch.device): Torch device to keep the spc octree
Returns:
(torch.ByteTensor): the octree tensor
"""
lengths = torch.tensor([sum(8 ** l for l in range(level))], dtype=torch.int32)
octree = torch.full((lengths,), 255, device=device, dtype=torch.uint8)
return octree, lengths | Creates a dense SPC model Args: level (int): The level at which the octree will be initialized to. device (torch.device): Torch device to keep the spc octree Returns: (torch.ByteTensor): the octree tensor |
4,687 | import math
from torch.autograd import Function
import torch
from kaolin import _C
import math
from .uint8 import bits_to_uint8
from kaolin.rep import Spc
from .points import points_to_morton, points_to_corners
The provided code snippet includes necessary dependencies for implementing the `scan_octrees` function. Write a Python function `def scan_octrees(octrees, lengths)` to solve the following problem:
r"""Scan batch of octrees tensor. Scanning refers to processing the octrees to extract auxiliary information. There are two steps. First, a list is formed containing the number of set bits in each octree node/byte. Second, the exclusive sum of this list is taken. Args: octrees (torch.ByteTensor): Batched :ref:`packed<packed>` collection of octrees of shape :math:`(\text{num_node})`. lengths (torch.IntTensor): The number of byte per octree. of shape :math:`(\text{batch_size})`. Returns: (int, torch.IntTensor, torch.IntTensor): - max_level, an int containing the depth of the octrees. - :ref:`pyramids<spc_pyramids>`, a tensor containing structural information about the batch of structured point cloud hierarchies, of shape :math:`(\text{batch_size}, 2, \text{max_level + 1})`. See :ref:`the documentation <spc_pyramids>` for more details. - :ref:`exsum<spc_exsum>`, a 1D tensor containing the exclusive sum of the bit counts of each byte of the individual octrees within the batched input ``octrees`` tensor, of size :math:(\text{octree_num_bytes} + \text{batch_size})`. See :ref:`the documentation <spc_exsum>` for more details. .. note:: The returned tensor of exclusive sums is padded with an extra element for each item in the batch.
Here is the function:
def scan_octrees(octrees, lengths):
r"""Scan batch of octrees tensor.
Scanning refers to processing the octrees to extract auxiliary information.
There are two steps. First, a list is formed
containing the number of set bits in each octree node/byte. Second, the exclusive
sum of this list is taken.
Args:
octrees (torch.ByteTensor):
Batched :ref:`packed<packed>` collection of octrees of shape :math:`(\text{num_node})`.
lengths (torch.IntTensor):
The number of byte per octree. of shape :math:`(\text{batch_size})`.
Returns:
(int, torch.IntTensor, torch.IntTensor):
- max_level, an int containing the depth of the octrees.
- :ref:`pyramids<spc_pyramids>`, a tensor containing structural information about
the batch of structured point cloud hierarchies,
of shape :math:`(\text{batch_size}, 2, \text{max_level + 1})`.
See :ref:`the documentation <spc_pyramids>` for more details.
- :ref:`exsum<spc_exsum>`, a 1D tensor containing the exclusive sum of the bit
counts of each byte of the individual octrees within the batched input ``octrees`` tensor,
of size :math:(\text{octree_num_bytes} + \text{batch_size})`.
See :ref:`the documentation <spc_exsum>` for more details.
.. note::
The returned tensor of exclusive sums is padded with an extra element for each
item in the batch.
"""
return _C.ops.spc.scan_octrees_cuda(octrees.contiguous(), lengths.contiguous()) | r"""Scan batch of octrees tensor. Scanning refers to processing the octrees to extract auxiliary information. There are two steps. First, a list is formed containing the number of set bits in each octree node/byte. Second, the exclusive sum of this list is taken. Args: octrees (torch.ByteTensor): Batched :ref:`packed<packed>` collection of octrees of shape :math:`(\text{num_node})`. lengths (torch.IntTensor): The number of byte per octree. of shape :math:`(\text{batch_size})`. Returns: (int, torch.IntTensor, torch.IntTensor): - max_level, an int containing the depth of the octrees. - :ref:`pyramids<spc_pyramids>`, a tensor containing structural information about the batch of structured point cloud hierarchies, of shape :math:`(\text{batch_size}, 2, \text{max_level + 1})`. See :ref:`the documentation <spc_pyramids>` for more details. - :ref:`exsum<spc_exsum>`, a 1D tensor containing the exclusive sum of the bit counts of each byte of the individual octrees within the batched input ``octrees`` tensor, of size :math:(\text{octree_num_bytes} + \text{batch_size})`. See :ref:`the documentation <spc_exsum>` for more details. .. note:: The returned tensor of exclusive sums is padded with an extra element for each item in the batch. |
4,688 | import math
from torch.autograd import Function
import torch
from kaolin import _C
import math
from .uint8 import bits_to_uint8
from kaolin.rep import Spc
from .points import points_to_morton, points_to_corners
The provided code snippet includes necessary dependencies for implementing the `generate_points` function. Write a Python function `def generate_points(octrees, pyramids, exsum)` to solve the following problem:
r"""Generate the point data for a structured point cloud. Decode batched octree into batch of structured point hierarchies, and batch of book keeping pyramids. Args: octrees (torch.ByteTensor): Batched (packed) collection of octrees of shape :math:`(\text{num_bytes})`. pyramids (torch.IntTensor): Batched tensor containing point hierarchy structural information of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)` exsum (torch.IntTensor): Batched tensor containing the exclusive sum of the bit counts of individual octrees of shape :math:`(k + \text{batch_size})` Returns: (torch.ShortTensor): A tensor containing batched point hierachies derived from a batch of octrees, of shape :math:`(\text{num_points_at_all_levels}, 3)`. See :ref:`the documentation<spc_points>` for more details
Here is the function:
def generate_points(octrees, pyramids, exsum):
r"""Generate the point data for a structured point cloud.
Decode batched octree into batch of structured point hierarchies,
and batch of book keeping pyramids.
Args:
octrees (torch.ByteTensor):
Batched (packed) collection of octrees of shape :math:`(\text{num_bytes})`.
pyramids (torch.IntTensor):
Batched tensor containing point hierarchy structural information
of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`
exsum (torch.IntTensor):
Batched tensor containing the exclusive sum of the bit
counts of individual octrees of shape :math:`(k + \text{batch_size})`
Returns:
(torch.ShortTensor):
A tensor containing batched point hierachies derived from a batch of octrees,
of shape :math:`(\text{num_points_at_all_levels}, 3)`.
See :ref:`the documentation<spc_points>` for more details
"""
return _C.ops.spc.generate_points_cuda(octrees.contiguous(),
pyramids.contiguous(),
exsum.contiguous()) | r"""Generate the point data for a structured point cloud. Decode batched octree into batch of structured point hierarchies, and batch of book keeping pyramids. Args: octrees (torch.ByteTensor): Batched (packed) collection of octrees of shape :math:`(\text{num_bytes})`. pyramids (torch.IntTensor): Batched tensor containing point hierarchy structural information of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)` exsum (torch.IntTensor): Batched tensor containing the exclusive sum of the bit counts of individual octrees of shape :math:`(k + \text{batch_size})` Returns: (torch.ShortTensor): A tensor containing batched point hierachies derived from a batch of octrees, of shape :math:`(\text{num_points_at_all_levels}, 3)`. See :ref:`the documentation<spc_points>` for more details |
4,689 | import math
from torch.autograd import Function
import torch
from kaolin import _C
import math
from .uint8 import bits_to_uint8
from kaolin.rep import Spc
from .points import points_to_morton, points_to_corners
class ToDenseFunction(Function):
def forward(ctx, point_hierarchies, level, pyramids, inputs):
inputs = inputs.contiguous()
pyramids = pyramids.contiguous()
point_hierarchies = point_hierarchies.contiguous()
ctx.save_for_backward(point_hierarchies, pyramids, inputs)
ctx.level = level
return _C.ops.spc.to_dense_forward(point_hierarchies, level, pyramids, inputs)
def backward(ctx, grad_outputs):
grad_outputs = grad_outputs.contiguous()
point_hierarchies, pyramids, inputs = ctx.saved_tensors
d_inputs = _C.ops.spc.to_dense_backward(point_hierarchies, ctx.level, pyramids,
inputs, grad_outputs)
return None, None, None, d_inputs
The provided code snippet includes necessary dependencies for implementing the `to_dense` function. Write a Python function `def to_dense(point_hierarchies, pyramids, input, level=-1, **kwargs)` to solve the following problem:
r"""Convert batched structured point cloud to a batched dense feature grids. The size of the input should correspond to level :math:`l` within the structured point cloud hierarchy. A dense voxel grid of size :math:`(\text{batch_size}, 2^l, 2^l, 2^l, \text{input_channels})` is returned where (for a particular batch): .. math:: Y_{P_i} = X_i \quad\text{for}\; i \in 0,\ldots,|X|-1, where :math:`P_i` is used as a 3D index for dense array :math:`Y`, and :math:`X_i` is the input feature corresponding to to point :math:`P_i`. Locations in :math:`Y` without a correspondense in :math:`X` are set to zero. Args: point_hierarchies (torch.ShortTensor): :ref:`Packed <packed>` collection of point hierarchies, of shape :math:`(\text{num_points})`. See :ref:`point_hierarchies <spc_points>` for a detailed description. pyramids (torch.IntTensor): Batched tensor containing point hierarchy structural information of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`. See :ref:`pyramids <spc_pyramids>` for a detailed description. input (torch.FloatTensor): Batched tensor of input feature data, of shape :math:`(\text{num_inputs}, \text{feature_dim})`. With :math:`\text{num_inputs}` corresponding to a number of points in the batched point hierarchy at ``level``. level (int): The level at which the octree points are converted to feature grids. Returns: (torch.FloatTensor): The feature grids, of shape :math:`(\text{batch_size}, \text{feature_dim}, 8^\text{level}, 8^\text{level}, 8^\text{level})`.
Here is the function:
def to_dense(point_hierarchies, pyramids, input, level=-1, **kwargs):
r"""Convert batched structured point cloud to a batched dense feature grids.
The size of the input should correspond to level :math:`l` within the
structured point cloud hierarchy. A dense voxel grid of size
:math:`(\text{batch_size}, 2^l, 2^l, 2^l, \text{input_channels})` is
returned where (for a particular batch):
.. math::
Y_{P_i} = X_i \quad\text{for}\; i \in 0,\ldots,|X|-1,
where :math:`P_i` is used as a 3D index for dense array :math:`Y`, and :math:`X_i` is the
input feature corresponding to to point :math:`P_i`. Locations in :math:`Y` without a
correspondense in :math:`X` are set to zero.
Args:
point_hierarchies (torch.ShortTensor):
:ref:`Packed <packed>` collection of point hierarchies,
of shape :math:`(\text{num_points})`.
See :ref:`point_hierarchies <spc_points>` for a detailed description.
pyramids (torch.IntTensor):
Batched tensor containing point hierarchy structural information
of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`.
See :ref:`pyramids <spc_pyramids>` for a detailed description.
input (torch.FloatTensor):
Batched tensor of input feature data,
of shape :math:`(\text{num_inputs}, \text{feature_dim})`.
With :math:`\text{num_inputs}` corresponding to a number of points in the
batched point hierarchy at ``level``.
level (int):
The level at which the octree points are converted to feature grids.
Returns:
(torch.FloatTensor):
The feature grids, of shape
:math:`(\text{batch_size}, \text{feature_dim}, 8^\text{level}, 8^\text{level}, 8^\text{level})`.
"""
remaining_kwargs = kwargs.keys() - Spc.KEYS
if len(remaining_kwargs) > 0:
raise TypeError("to_dense got an unexpected keyword argument "
f"{list(remaining_kwargs)[0]}")
if level < 0:
max_level = pyramids.shape[2] - 2
level = max_level + 1 + level
return ToDenseFunction.apply(point_hierarchies, level, pyramids, input) | r"""Convert batched structured point cloud to a batched dense feature grids. The size of the input should correspond to level :math:`l` within the structured point cloud hierarchy. A dense voxel grid of size :math:`(\text{batch_size}, 2^l, 2^l, 2^l, \text{input_channels})` is returned where (for a particular batch): .. math:: Y_{P_i} = X_i \quad\text{for}\; i \in 0,\ldots,|X|-1, where :math:`P_i` is used as a 3D index for dense array :math:`Y`, and :math:`X_i` is the input feature corresponding to to point :math:`P_i`. Locations in :math:`Y` without a correspondense in :math:`X` are set to zero. Args: point_hierarchies (torch.ShortTensor): :ref:`Packed <packed>` collection of point hierarchies, of shape :math:`(\text{num_points})`. See :ref:`point_hierarchies <spc_points>` for a detailed description. pyramids (torch.IntTensor): Batched tensor containing point hierarchy structural information of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`. See :ref:`pyramids <spc_pyramids>` for a detailed description. input (torch.FloatTensor): Batched tensor of input feature data, of shape :math:`(\text{num_inputs}, \text{feature_dim})`. With :math:`\text{num_inputs}` corresponding to a number of points in the batched point hierarchy at ``level``. level (int): The level at which the octree points are converted to feature grids. Returns: (torch.FloatTensor): The feature grids, of shape :math:`(\text{batch_size}, \text{feature_dim}, 8^\text{level}, 8^\text{level}, 8^\text{level})`. |
4,690 | import math
from torch.autograd import Function
import torch
from kaolin import _C
import math
from .uint8 import bits_to_uint8
from kaolin.rep import Spc
from .points import points_to_morton, points_to_corners
def bits_to_uint8(bool_t):
"""Convert uint8 ByteTensor to binary BoolTensor.
Args:
bool_t (torch.BoolTensor): Tensor to convert, of last dimension 8.
Return:
(torch.LongTensor):
Converted tensor of same shape[:-1] and device than `bool_t`.
Examples:
>>> bool_t = torch.tensor(
... [[[1, 1, 0, 0, 0, 0, 0, 0],
... [1, 0, 1, 0, 0, 0, 0, 0]],
... [[0, 0, 0, 0, 1, 0, 0, 0],
... [0, 1, 0, 0, 0, 0, 0, 0]]])
>>> bits_to_uint8(bool_t)
tensor([[ 3, 5],
[16, 2]], dtype=torch.uint8)
"""
# TODO(cfujitsang): This is a naive implementation
global _bool_to_uint8_w
device = bool_t.device
if device not in _bool_to_uint8_w:
weights = 2 ** torch.arange(8, device=device,
dtype=torch.long)
_bool_to_uint8_w[device] = weights
else:
weights = _bool_to_uint8_w[device]
return torch.sum(bool_t * weights.reshape(*([1] * (bool_t.dim() - 1)), 8),
dim=-1).byte()
The provided code snippet includes necessary dependencies for implementing the `feature_grids_to_spc` function. Write a Python function `def feature_grids_to_spc(feature_grids, masks=None)` to solve the following problem:
r"""Convert sparse feature grids to Structured Point Cloud. Args: feature_grids (torch.Tensor): The sparse 3D feature grids, of shape :math:`(\text{batch_size}, \text{feature_dim}, X, Y, Z)` masks (optional, torch.BoolTensor): The masks showing where are the features, of shape :math:`(\text{batch_size}, X, Y, Z)`. Default: A feature is determined when not full of zeros. Returns: (torch.ByteTensor, torch.IntTensor, torch.Tensor): a tuple containing: - The octree, of size :math:`(\text{num_nodes})` - The lengths of each octree, of size :math:`(\text{batch_size})` - The coalescent features, of same dtype than ``feature_grids``, of shape :math:`(\text{num_features}, \text{feature_dim})`.
Here is the function:
def feature_grids_to_spc(feature_grids, masks=None):
r"""Convert sparse feature grids to Structured Point Cloud.
Args:
feature_grids (torch.Tensor):
The sparse 3D feature grids, of shape
:math:`(\text{batch_size}, \text{feature_dim}, X, Y, Z)`
masks (optional, torch.BoolTensor):
The masks showing where are the features,
of shape :math:`(\text{batch_size}, X, Y, Z)`.
Default: A feature is determined when not full of zeros.
Returns:
(torch.ByteTensor, torch.IntTensor, torch.Tensor):
a tuple containing:
- The octree, of size :math:`(\text{num_nodes})`
- The lengths of each octree, of size :math:`(\text{batch_size})`
- The coalescent features, of same dtype than ``feature_grids``,
of shape :math:`(\text{num_features}, \text{feature_dim})`.
"""
batch_size = feature_grids.shape[0]
feature_dim = feature_grids.shape[1]
x_dim = feature_grids.shape[2]
y_dim = feature_grids.shape[3]
z_dim = feature_grids.shape[4]
dtype = feature_grids.dtype
device = feature_grids.device
feature_grids = feature_grids.permute(0, 2, 3, 4, 1)
level = math.ceil(math.log2(max(x_dim, y_dim, z_dim)))
# We enforce a power of 2 size to make the subdivision easier
max_dim = 2 ** level
padded_feature_grids = torch.zeros(
(batch_size, max_dim, max_dim, max_dim, feature_dim),
device=device, dtype=dtype)
padded_feature_grids[:, :x_dim, :y_dim, :z_dim] = feature_grids
if masks is None:
masks = torch.any(padded_feature_grids != 0, dim=-1)
else:
assert masks.shape == feature_grids.shape[:-1]
padded_masks = torch.zeros(
(batch_size, max_dim, max_dim, max_dim),
device=device, dtype=torch.bool)
padded_masks[:, :x_dim, :y_dim, :z_dim] = masks
masks = padded_masks
bool2uint8_w = 2 ** torch.arange(8, device=device).reshape(1, 8)
octrees = []
coalescent_features = []
lengths = []
# TODO(cfujitsang): vectorize for speedup
for bs in range(batch_size):
octree = []
cur_mask = masks[bs:bs + 1]
cur_feature_grid = padded_feature_grids[bs:bs + 1]
cur_dim = max_dim
while cur_dim > 1:
cur_dim = cur_dim // 2
cur_mask = cur_mask.reshape(-1, 2, cur_dim, 2, cur_dim, 2, cur_dim)
cur_feature_grid = cur_feature_grid.reshape(
-1, 2, cur_dim, 2, cur_dim, 2, cur_dim, feature_dim)
cur_level_mask = torch.sum(cur_mask, dim=(2, 4, 6)) > 0
# indexing by masking follow naturally the morton order
cur_feature_grid = cur_feature_grid.permute(0, 1, 3, 5, 2, 4, 6, 7).reshape(
-1, 8, cur_dim, cur_dim, cur_dim, feature_dim)[cur_level_mask.reshape(-1, 8)]
cur_mask = cur_mask.permute(0, 1, 3, 5, 2, 4, 6).reshape(
-1, 8, cur_dim, cur_dim, cur_dim)[cur_level_mask.reshape(-1, 8)]
uint8_mask = bits_to_uint8(cur_level_mask.reshape(-1, 8))
octree.append(uint8_mask)
octree = torch.cat(octree, dim=0)
octrees.append(octree)
lengths.append(octree.shape[0])
coalescent_features.append(cur_feature_grid.reshape(-1, feature_dim))
octrees = torch.cat(octrees, dim=0)
lengths = torch.tensor(lengths, dtype=torch.int)
coalescent_features = torch.cat(coalescent_features, dim=0)
return octrees, lengths, coalescent_features | r"""Convert sparse feature grids to Structured Point Cloud. Args: feature_grids (torch.Tensor): The sparse 3D feature grids, of shape :math:`(\text{batch_size}, \text{feature_dim}, X, Y, Z)` masks (optional, torch.BoolTensor): The masks showing where are the features, of shape :math:`(\text{batch_size}, X, Y, Z)`. Default: A feature is determined when not full of zeros. Returns: (torch.ByteTensor, torch.IntTensor, torch.Tensor): a tuple containing: - The octree, of size :math:`(\text{num_nodes})` - The lengths of each octree, of size :math:`(\text{batch_size})` - The coalescent features, of same dtype than ``feature_grids``, of shape :math:`(\text{num_features}, \text{feature_dim})`. |
4,691 | import math
from torch.autograd import Function
import torch
from kaolin import _C
import math
from .uint8 import bits_to_uint8
from kaolin.rep import Spc
from .points import points_to_morton, points_to_corners
The provided code snippet includes necessary dependencies for implementing the `unbatched_query` function. Write a Python function `def unbatched_query(octree, exsum, query_coords, level, with_parents=False)` to solve the following problem:
r"""Query point indices from the octree. Given a :ref:`point hierarchy<spc_points>` (implicitly encoded in ``octree``) and some coordinates, this function will efficiently find the indices of the points in :ref:`point hierarchy<spc_points>` corresponding to the coordinates. Returns -1 if the point does not exist. Args: octree (torch.ByteTensor): The octree, of shape :math:`(\text{num_bytes})`. exsum (torch.IntTensor): The exclusive sum of the octree bytes, of shape :math:`(\text{num_bytes} + 1)`. See :ref:`spc_exsum` for more details. query_coords (torch.FloatTensor or torch.IntTensor): A tensor of locations to sample of shape :math:`(\text{num_query}, 3)`. If the tensor is a FloatTensor, assumes the coordinates are normalized in [-1, 1]. Otherwise if the tensor is an IntTensor, assumes the coordinates are in the [0, 2^level] space. level (int): The level of the octree to query from. with_parents (bool): If True, will return an array of indices up to the specified level as opposed to only a single level (default: False). Returns: pidx (torch.LongTensor): The indices into the point hierarchy of shape :math:`(\text{num_query})`. If with_parents is True, then the shape will be :math:`(\text{num_query, level+1})`. Examples: >>> import kaolin >>> points = torch.tensor([[3,2,0],[3,1,1],[3,3,3]], device='cuda', dtype=torch.short) >>> octree = kaolin.ops.spc.unbatched_points_to_octree(points, 2) >>> length = torch.tensor([len(octree)], dtype=torch.int32) >>> _, _, prefix = kaolin.ops.spc.scan_octrees(octree, length) >>> query_coords = torch.tensor([[3,2,0]], device='cuda', dtype=torch.short) >>> kaolin.ops.spc.unbatched_query(octree, prefix, query_coords, 2, with_parents=False) tensor([5], device='cuda:0') >>> kaolin.ops.spc.unbatched_query(octree, prefix, query_coords, 2, with_parents=True) tensor([[0, 2, 5]], device='cuda:0')
Here is the function:
def unbatched_query(octree, exsum, query_coords, level, with_parents=False):
r"""Query point indices from the octree.
Given a :ref:`point hierarchy<spc_points>` (implicitly encoded in ``octree``) and some coordinates,
this function will efficiently find the indices of the points in :ref:`point hierarchy<spc_points>`
corresponding to the coordinates. Returns -1 if the point does not exist.
Args:
octree (torch.ByteTensor): The octree, of shape :math:`(\text{num_bytes})`.
exsum (torch.IntTensor): The exclusive sum of the octree bytes,
of shape :math:`(\text{num_bytes} + 1)`.
See :ref:`spc_exsum` for more details.
query_coords (torch.FloatTensor or torch.IntTensor):
A tensor of locations to sample of shape :math:`(\text{num_query}, 3)`. If the tensor is
a FloatTensor, assumes the coordinates are normalized in [-1, 1]. Otherwise if the tensor is
an IntTensor, assumes the coordinates are in the [0, 2^level] space.
level (int): The level of the octree to query from.
with_parents (bool): If True, will return an array of indices up to the specified level as opposed
to only a single level (default: False).
Returns:
pidx (torch.LongTensor):
The indices into the point hierarchy of shape :math:`(\text{num_query})`.
If with_parents is True, then the shape will be :math:`(\text{num_query, level+1})`.
Examples:
>>> import kaolin
>>> points = torch.tensor([[3,2,0],[3,1,1],[3,3,3]], device='cuda', dtype=torch.short)
>>> octree = kaolin.ops.spc.unbatched_points_to_octree(points, 2)
>>> length = torch.tensor([len(octree)], dtype=torch.int32)
>>> _, _, prefix = kaolin.ops.spc.scan_octrees(octree, length)
>>> query_coords = torch.tensor([[3,2,0]], device='cuda', dtype=torch.short)
>>> kaolin.ops.spc.unbatched_query(octree, prefix, query_coords, 2, with_parents=False)
tensor([5], device='cuda:0')
>>> kaolin.ops.spc.unbatched_query(octree, prefix, query_coords, 2, with_parents=True)
tensor([[0, 2, 5]], device='cuda:0')
"""
if not query_coords.is_floating_point():
input_coords = (query_coords.float() / (2**level)) * 2.0 - 1.0
else:
input_coords = query_coords
if with_parents:
return _C.ops.spc.query_multiscale_cuda(octree.contiguous(), exsum.contiguous(),
input_coords.contiguous(), level).long()
else:
return _C.ops.spc.query_cuda(octree.contiguous(), exsum.contiguous(),
input_coords.contiguous(), level).long() | r"""Query point indices from the octree. Given a :ref:`point hierarchy<spc_points>` (implicitly encoded in ``octree``) and some coordinates, this function will efficiently find the indices of the points in :ref:`point hierarchy<spc_points>` corresponding to the coordinates. Returns -1 if the point does not exist. Args: octree (torch.ByteTensor): The octree, of shape :math:`(\text{num_bytes})`. exsum (torch.IntTensor): The exclusive sum of the octree bytes, of shape :math:`(\text{num_bytes} + 1)`. See :ref:`spc_exsum` for more details. query_coords (torch.FloatTensor or torch.IntTensor): A tensor of locations to sample of shape :math:`(\text{num_query}, 3)`. If the tensor is a FloatTensor, assumes the coordinates are normalized in [-1, 1]. Otherwise if the tensor is an IntTensor, assumes the coordinates are in the [0, 2^level] space. level (int): The level of the octree to query from. with_parents (bool): If True, will return an array of indices up to the specified level as opposed to only a single level (default: False). Returns: pidx (torch.LongTensor): The indices into the point hierarchy of shape :math:`(\text{num_query})`. If with_parents is True, then the shape will be :math:`(\text{num_query, level+1})`. Examples: >>> import kaolin >>> points = torch.tensor([[3,2,0],[3,1,1],[3,3,3]], device='cuda', dtype=torch.short) >>> octree = kaolin.ops.spc.unbatched_points_to_octree(points, 2) >>> length = torch.tensor([len(octree)], dtype=torch.int32) >>> _, _, prefix = kaolin.ops.spc.scan_octrees(octree, length) >>> query_coords = torch.tensor([[3,2,0]], device='cuda', dtype=torch.short) >>> kaolin.ops.spc.unbatched_query(octree, prefix, query_coords, 2, with_parents=False) tensor([5], device='cuda:0') >>> kaolin.ops.spc.unbatched_query(octree, prefix, query_coords, 2, with_parents=True) tensor([[0, 2, 5]], device='cuda:0') |
4,692 | import math
from torch.autograd import Function
import torch
from kaolin import _C
import math
from .uint8 import bits_to_uint8
from kaolin.rep import Spc
from .points import points_to_morton, points_to_corners
def unbatched_get_level_points(point_hierarchy, pyramid, level):
r"""Returns the point set for the given level from the point hierarchy.
Args:
point_hierarchy (torch.ShortTensor):
The point hierarchy of shape :math:`(\text{num_points}, 3)`.
See :ref:`point_hierarchies <spc_points>` for a detailed description.
pyramid (torch.IntTensor):
The pyramid of shape :math:`(2, \text{max_level}+2)`
See :ref:`pyramids <spc_pyramids>` for a detailed description.
level (int): The level of the point hierarchy to retrieve.
Returns:
(torch.ShortTensor): The pointset of shape :math:`(\text{num_points_on_level}, 3)`.
"""
return point_hierarchy[pyramid[1, level]:pyramid[1, level + 1]]
def points_to_morton(points):
r"""Convert (quantized) 3D points to morton codes.
Args:
points (torch.ShortTensor):
Quantized 3D points. This is not exactly like SPC points hierarchies
as this is only the data for a specific level,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.LongTensor):
The morton code of the points,
of shape :math:`(\text{num_points})`
Examples:
>>> inputs = torch.tensor([
... [0, 0, 0],
... [0, 0, 1],
... [0, 0, 2],
... [0, 0, 3],
... [0, 1, 0]], device='cuda', dtype=torch.int16)
>>> points_to_morton(inputs)
tensor([0, 1, 8, 9, 2], device='cuda:0')
"""
shape = list(points.shape)[:-1]
points = points.reshape(-1, 3)
return _C.ops.spc.points_to_morton_cuda(points.contiguous()).reshape(*shape)
def points_to_corners(points):
r"""Calculates the corners of the points assuming each point is the 0th bit corner.
Args:
points (torch.ShortTensor): Quantized 3D points,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.ShortTensor):
Quantized 3D new points,
of shape :math:`(\text{num_points}, 8, 3)`.
Examples:
>>> inputs = torch.tensor([
... [0, 0, 0],
... [0, 2, 0]], device='cuda', dtype=torch.int16)
>>> points_to_corners(inputs)
tensor([[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]],
<BLANKLINE>
[[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[0, 3, 1],
[1, 2, 0],
[1, 2, 1],
[1, 3, 0],
[1, 3, 1]]], device='cuda:0', dtype=torch.int16)
"""
shape = list(points.shape)
shape.insert(-1, 8)
return _C.ops.spc.points_to_corners_cuda(points.contiguous()).reshape(*shape)
The provided code snippet includes necessary dependencies for implementing the `unbatched_make_dual` function. Write a Python function `def unbatched_make_dual(point_hierarchy, pyramid)` to solve the following problem:
r"""Creates the dual of the octree given the point hierarchy and pyramid. Each node of the primary octree (represented as the :ref:`point_hierarchies <spc_points>`) can be thought of as voxels with 8 corners. The dual of the octree represents the corners of the primary octree nodes as another tree of nodes with a hierarchy of points and a pyramid. The mapping from the primary octree nodes to the nodes in the dual tree can be obtained through trinkets which can be created from ``make_trinkets``. Args: point_hierarchy (torch.ShortTensor): The point hierarchy of shape :math:`(\text{num_points}, 3)`. See :ref:`point_hierarchies <spc_points>` for a detailed description. pyramid (torch.IntTensor): The pyramid of shape :math:`(2, \text{max_level}+2)` See :ref:`pyramids <spc_pyramids>` for a detailed description. Returns: (torch.ShortTensor, torch.IntTensor): - The point hierarchy of the dual octree of shape :math:`(\text{num_dual_points}, 3)`. - The dual pyramid of shape :math:`(2, \text{max_level}+2)` Examples: >>> import kaolin >>> points = torch.tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0]], device='cuda', dtype=torch.int16) >>> level = 1 >>> octree = kaolin.ops.spc.unbatched_points_to_octree(points, level) >>> length = torch.tensor([len(octree)], dtype=torch.int32) >>> _, pyramid, prefix = kaolin.ops.spc.scan_octrees(octree, length) >>> point_hierarchy = kaolin.ops.spc.generate_points(octree, pyramid, prefix) >>> point_hierarchy_dual, pyramid_dual = kaolin.ops.spc.unbatched_make_dual(point_hierarchy, pyramid[0]) >>> kaolin.ops.spc.unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, 0) # the corners of the root tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]], device='cuda:0', dtype=torch.int16) >>> kaolin.ops.spc.unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, 1) # the corners of the 1st level tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], [0, 0, 2], [0, 1, 2], [1, 0, 2], [1, 1, 2], [0, 2, 0], [0, 2, 1], [1, 2, 0], [1, 2, 1]], device='cuda:0', dtype=torch.int16)
Here is the function:
def unbatched_make_dual(point_hierarchy, pyramid):
r"""Creates the dual of the octree given the point hierarchy and pyramid.
Each node of the primary octree (represented as the :ref:`point_hierarchies <spc_points>`)
can be thought of as voxels with 8 corners. The dual of the octree represents the corners
of the primary octree nodes as another tree of nodes with a hierarchy of points and a pyramid.
The mapping from the primary octree nodes to the nodes in the dual tree can be obtained through
trinkets which can be created from ``make_trinkets``.
Args:
point_hierarchy (torch.ShortTensor):
The point hierarchy of shape :math:`(\text{num_points}, 3)`.
See :ref:`point_hierarchies <spc_points>` for a detailed description.
pyramid (torch.IntTensor):
The pyramid of shape :math:`(2, \text{max_level}+2)`
See :ref:`pyramids <spc_pyramids>` for a detailed description.
Returns:
(torch.ShortTensor, torch.IntTensor):
- The point hierarchy of the dual octree of shape :math:`(\text{num_dual_points}, 3)`.
- The dual pyramid of shape :math:`(2, \text{max_level}+2)`
Examples:
>>> import kaolin
>>> points = torch.tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0]], device='cuda', dtype=torch.int16)
>>> level = 1
>>> octree = kaolin.ops.spc.unbatched_points_to_octree(points, level)
>>> length = torch.tensor([len(octree)], dtype=torch.int32)
>>> _, pyramid, prefix = kaolin.ops.spc.scan_octrees(octree, length)
>>> point_hierarchy = kaolin.ops.spc.generate_points(octree, pyramid, prefix)
>>> point_hierarchy_dual, pyramid_dual = kaolin.ops.spc.unbatched_make_dual(point_hierarchy, pyramid[0])
>>> kaolin.ops.spc.unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, 0) # the corners of the root
tensor([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]], device='cuda:0', dtype=torch.int16)
>>> kaolin.ops.spc.unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, 1) # the corners of the 1st level
tensor([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
[0, 0, 2],
[0, 1, 2],
[1, 0, 2],
[1, 1, 2],
[0, 2, 0],
[0, 2, 1],
[1, 2, 0],
[1, 2, 1]], device='cuda:0', dtype=torch.int16)
"""
pyramid_dual = torch.zeros_like(pyramid)
point_hierarchy_dual = []
for i in range(pyramid.shape[1] - 1):
corners = points_to_corners(unbatched_get_level_points(point_hierarchy, pyramid, i)).reshape(-1, 3)
points_dual = torch.unique(corners, dim=0)
sort_idxes = points_to_morton(points_dual).argsort()
points_dual = points_dual[sort_idxes]
point_hierarchy_dual.append(points_dual)
pyramid_dual[0, i] = len(point_hierarchy_dual[i])
if i > 0:
pyramid_dual[1, i] += pyramid_dual[:, i - 1].sum()
pyramid_dual[1, pyramid.shape[1] - 1] += pyramid_dual[:, pyramid.shape[1] - 2].sum()
point_hierarchy_dual = torch.cat(point_hierarchy_dual, dim=0)
return point_hierarchy_dual, pyramid_dual | r"""Creates the dual of the octree given the point hierarchy and pyramid. Each node of the primary octree (represented as the :ref:`point_hierarchies <spc_points>`) can be thought of as voxels with 8 corners. The dual of the octree represents the corners of the primary octree nodes as another tree of nodes with a hierarchy of points and a pyramid. The mapping from the primary octree nodes to the nodes in the dual tree can be obtained through trinkets which can be created from ``make_trinkets``. Args: point_hierarchy (torch.ShortTensor): The point hierarchy of shape :math:`(\text{num_points}, 3)`. See :ref:`point_hierarchies <spc_points>` for a detailed description. pyramid (torch.IntTensor): The pyramid of shape :math:`(2, \text{max_level}+2)` See :ref:`pyramids <spc_pyramids>` for a detailed description. Returns: (torch.ShortTensor, torch.IntTensor): - The point hierarchy of the dual octree of shape :math:`(\text{num_dual_points}, 3)`. - The dual pyramid of shape :math:`(2, \text{max_level}+2)` Examples: >>> import kaolin >>> points = torch.tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0]], device='cuda', dtype=torch.int16) >>> level = 1 >>> octree = kaolin.ops.spc.unbatched_points_to_octree(points, level) >>> length = torch.tensor([len(octree)], dtype=torch.int32) >>> _, pyramid, prefix = kaolin.ops.spc.scan_octrees(octree, length) >>> point_hierarchy = kaolin.ops.spc.generate_points(octree, pyramid, prefix) >>> point_hierarchy_dual, pyramid_dual = kaolin.ops.spc.unbatched_make_dual(point_hierarchy, pyramid[0]) >>> kaolin.ops.spc.unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, 0) # the corners of the root tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]], device='cuda:0', dtype=torch.int16) >>> kaolin.ops.spc.unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, 1) # the corners of the 1st level tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], [0, 0, 2], [0, 1, 2], [1, 0, 2], [1, 1, 2], [0, 2, 0], [0, 2, 1], [1, 2, 0], [1, 2, 1]], device='cuda:0', dtype=torch.int16) |
4,693 | import math
from torch.autograd import Function
import torch
from kaolin import _C
import math
from .uint8 import bits_to_uint8
from kaolin.rep import Spc
from .points import points_to_morton, points_to_corners
def unbatched_get_level_points(point_hierarchy, pyramid, level):
r"""Returns the point set for the given level from the point hierarchy.
Args:
point_hierarchy (torch.ShortTensor):
The point hierarchy of shape :math:`(\text{num_points}, 3)`.
See :ref:`point_hierarchies <spc_points>` for a detailed description.
pyramid (torch.IntTensor):
The pyramid of shape :math:`(2, \text{max_level}+2)`
See :ref:`pyramids <spc_pyramids>` for a detailed description.
level (int): The level of the point hierarchy to retrieve.
Returns:
(torch.ShortTensor): The pointset of shape :math:`(\text{num_points_on_level}, 3)`.
"""
return point_hierarchy[pyramid[1, level]:pyramid[1, level + 1]]
def points_to_morton(points):
r"""Convert (quantized) 3D points to morton codes.
Args:
points (torch.ShortTensor):
Quantized 3D points. This is not exactly like SPC points hierarchies
as this is only the data for a specific level,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.LongTensor):
The morton code of the points,
of shape :math:`(\text{num_points})`
Examples:
>>> inputs = torch.tensor([
... [0, 0, 0],
... [0, 0, 1],
... [0, 0, 2],
... [0, 0, 3],
... [0, 1, 0]], device='cuda', dtype=torch.int16)
>>> points_to_morton(inputs)
tensor([0, 1, 8, 9, 2], device='cuda:0')
"""
shape = list(points.shape)[:-1]
points = points.reshape(-1, 3)
return _C.ops.spc.points_to_morton_cuda(points.contiguous()).reshape(*shape)
def points_to_corners(points):
r"""Calculates the corners of the points assuming each point is the 0th bit corner.
Args:
points (torch.ShortTensor): Quantized 3D points,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.ShortTensor):
Quantized 3D new points,
of shape :math:`(\text{num_points}, 8, 3)`.
Examples:
>>> inputs = torch.tensor([
... [0, 0, 0],
... [0, 2, 0]], device='cuda', dtype=torch.int16)
>>> points_to_corners(inputs)
tensor([[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]],
<BLANKLINE>
[[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[0, 3, 1],
[1, 2, 0],
[1, 2, 1],
[1, 3, 0],
[1, 3, 1]]], device='cuda:0', dtype=torch.int16)
"""
shape = list(points.shape)
shape.insert(-1, 8)
return _C.ops.spc.points_to_corners_cuda(points.contiguous()).reshape(*shape)
The provided code snippet includes necessary dependencies for implementing the `unbatched_make_trinkets` function. Write a Python function `def unbatched_make_trinkets(point_hierarchy, pyramid, point_hierarchy_dual, pyramid_dual)` to solve the following problem:
r"""Creates the trinkets for the dual octree. The trinkets are indirection pointers (in practice, indices) from the nodes of the primary octree to the nodes of the dual octree. The nodes of the dual octree represent the corners of the voxels defined by the primary octree. The trinkets are useful for accessing values stored on the corners (like for example a signed distance function) and interpolating them from the nodes of the primary octree. Args: point_hierarchy (torch.ShortTensor): The point hierarchy of shape :math:`(\text{num_points}, 3)`. pyramid (torch.IntTensor): The pyramid of shape :math:`(2, \text{max_level}+2)` point_hierarchy_dual (torch.ShortTensor): The point hierarchy of the dual octree of shape :math:`(\text{num_dual_points}, 3)`. pyramid_dual (torch.IntTensor): The dual pyramid of shape :math:`(2, \text{max_level}+2)` Returns: (torch.IntTensor, torch.IntTensor): - The trinkets of shape :math:`(\text{num_points}, 8)`. - Indirection pointers to the parents of shape :math:`(\text{num_points})`.
Here is the function:
def unbatched_make_trinkets(point_hierarchy, pyramid, point_hierarchy_dual, pyramid_dual):
r"""Creates the trinkets for the dual octree.
The trinkets are indirection pointers (in practice, indices) from the nodes of the primary octree
to the nodes of the dual octree. The nodes of the dual octree represent the corners of the voxels
defined by the primary octree. The trinkets are useful for accessing values stored on the corners
(like for example a signed distance function) and interpolating them from the nodes of the primary
octree.
Args:
point_hierarchy (torch.ShortTensor): The point hierarchy of shape :math:`(\text{num_points}, 3)`.
pyramid (torch.IntTensor): The pyramid of shape :math:`(2, \text{max_level}+2)`
point_hierarchy_dual (torch.ShortTensor): The point hierarchy of the dual octree of shape
:math:`(\text{num_dual_points}, 3)`.
pyramid_dual (torch.IntTensor): The dual pyramid of shape :math:`(2, \text{max_level}+2)`
Returns:
(torch.IntTensor, torch.IntTensor):
- The trinkets of shape :math:`(\text{num_points}, 8)`.
- Indirection pointers to the parents of shape :math:`(\text{num_points})`.
"""
device = point_hierarchy.device
trinkets = []
parents = []
# At a high level... the goal of this algorithm is to create a table which maps from the primary
# octree of voxels to the dual octree of corners, while also keeping track of parents.
# It does so by constructing a lookup table which maps morton codes of the source octree corners
# to the index of the destination (dual), then using pandas to do table lookups. It's a silly
# solution that would be much faster with a GPU but works well enough.
for lvl in range(pyramid_dual.shape[1] - 1):
# The source (primary octree) is sorted in morton order by construction
points = unbatched_get_level_points(point_hierarchy, pyramid, lvl)
corners = points_to_corners(points)
mt_src = points_to_morton(corners.reshape(-1, 3))
# The destination (dual octree) needs to be sorted too
points_dual = unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, lvl)
mt_dest = points_to_morton(points_dual)
# Uses arange to associate from the morton codes to the point index. The point index is indexed from 0.
lut = {k: i for i, k in enumerate(mt_dest.cpu().numpy())}
if lvl == 0:
parents.append(torch.tensor([-1], device='cuda', dtype=torch.int).to(device))
else:
# Dividing by 2 will yield the morton code of the parent
pc = torch.floor(points / 2.0).short()
# Morton of the parents (point_hierarchy_index -> parent_morton)
mt_pc_parent = points_to_morton(pc)
# Morton of the children (point_hierarchy_index -> self_morton)
mt_pc_child = points_to_morton(points)
points_parents = unbatched_get_level_points(point_hierarchy, pyramid, lvl - 1)
# point_hierarchy_index (i-1) -> parent_morton
mt_parents = points_to_morton(points_parents)
# parent_morton -> point_hierarchy_index
plut = {k: i for i, k in enumerate(mt_parents.cpu().numpy())}
pc_idx = [plut[i] for i in mt_pc_parent.cpu().numpy()]
parents.append(torch.tensor(pc_idx, device=device, dtype=torch.int) +
pyramid[1, lvl - 1])
idx = [lut[i] for i in mt_src.cpu().numpy()]
trinkets.extend(idx)
# Trinkets are relative to the beginning of each pyramid base
trinkets = torch.tensor(trinkets, device=device, dtype=torch.int).reshape(-1, 8)
parents = torch.cat(parents, dim=0)
return trinkets, parents | r"""Creates the trinkets for the dual octree. The trinkets are indirection pointers (in practice, indices) from the nodes of the primary octree to the nodes of the dual octree. The nodes of the dual octree represent the corners of the voxels defined by the primary octree. The trinkets are useful for accessing values stored on the corners (like for example a signed distance function) and interpolating them from the nodes of the primary octree. Args: point_hierarchy (torch.ShortTensor): The point hierarchy of shape :math:`(\text{num_points}, 3)`. pyramid (torch.IntTensor): The pyramid of shape :math:`(2, \text{max_level}+2)` point_hierarchy_dual (torch.ShortTensor): The point hierarchy of the dual octree of shape :math:`(\text{num_dual_points}, 3)`. pyramid_dual (torch.IntTensor): The dual pyramid of shape :math:`(2, \text{max_level}+2)` Returns: (torch.IntTensor, torch.IntTensor): - The trinkets of shape :math:`(\text{num_points}, 8)`. - Indirection pointers to the parents of shape :math:`(\text{num_points})`. |
4,694 | import math
from torch import nn
from torch.autograd import Function
import torch
from kaolin import _C
from kaolin.rep import Spc
class Conv3dFunction(Function):
def forward(ctx, octrees, point_hierarchies, level, pyramids, exsum,
inputs, params, kernel_vectors, jump):
octrees = octrees.contiguous()
point_hierarchies = point_hierarchies.contiguous()
pyramids = pyramids.contiguous()
exsum = exsum.contiguous()
inputs = inputs.contiguous()
params = params.contiguous()
kernel_vectors = kernel_vectors.contiguous()
ctx.save_for_backward(octrees, point_hierarchies, pyramids, exsum,
inputs, params, kernel_vectors)
ctx.jump = jump # jump is an int, not a tensor
outputs, level = _C.ops.spc.Conv3d_forward(
octrees, point_hierarchies, level, pyramids, exsum,
inputs, params, kernel_vectors, jump)
ctx.level = level
level = torch.tensor([level])
ctx.mark_non_differentiable(level)
return outputs, level
def backward(ctx, grad_outputs, grad_level):
grad_outputs = grad_outputs.contiguous()
octrees, point_hierarchies, pyramids, exsum, inputs, params, kernel_vectors = ctx.saved_tensors
d_inputs, d_params = _C.ops.spc.Conv3d_backward(
octrees, point_hierarchies, ctx.level, pyramids, exsum, inputs,
grad_outputs, params, kernel_vectors, ctx.jump)
return None, None, None, None, None, d_inputs, d_params, None, None
The provided code snippet includes necessary dependencies for implementing the `conv3d` function. Write a Python function `def conv3d(octrees, point_hierarchies, level, pyramids, exsum, input, weight, kernel_vectors, jump=0, bias=None, **kwargs)` to solve the following problem:
r"""Convolution over a structured point cloud. The inputs :math:`X` are mapped to outputs :math:`Y` by the following: .. math:: Y_i = \sum_k w_k \cdot X_{n(i,k)} + b \quad\text{for}\; i \in 0,\ldots,|Y|-1, where :math:`w_k` are weights associated with the kernel, and :math:`n(i,k)` is the neighborhood function described :ref:`here <neighborhood-text>`. Args: octrees (torch.ByteTensor): :ref:`packed` octrees of shape :math:`(\text{num_bytes})`. See :ref:`octree <spc_octree>`. point_hierarchies (torch.ShortTensor): :ref:`packed` point hierarchies of shape :math:`(\text{num_points})`. See :ref:`point_hierarchies <spc_points>`. level (int): level at which the ``input`` features are associated to. pyramids (torch.IntTensor): Batched tensor containing point hierarchy structural information of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`. See :ref:`pyramids <spc_pyramids>`. exsum (torch.IntTensor): Tensor containing the :ref:`packed` exclusive sum of the bit counts of individual octrees of shape :math:`(\text{num_bytes} + \text{batch_size})`. See :ref:`exsum <spc_exsum>`. input (torch.FloatTensor): :ref:`packed` input feature data of the octrees, of shape :math:`(\text{total_num_inputs}, \text{in_channels})`, where ``total_num_inputs`` correspond to the number of nodes of the octrees at ``level``, and ``in_channels`` is the input feature dimension (for instance 3 for RGB color). weight (torch.FloatTensor): filter of shape :math:`(\text{kernel_vectors.shape[0]}, \text{in_channels}, \text{self.out_channels})`. kernel_vectors (torch.ShortTensor): A tensor of 3D offsets that define the shape of the kernel, of shape :math:`(\text{num_weights}, 3)`. See :ref:`kernel creation <kernel-text>`. jump (int, optional): The difference between the input and output levels for the convolution. A non-zero value implies downsampling. Value must be positive and refer to a valid level of the structured point cloud. Default: 0. bias (torch.FloatTensor, optional): optional bias tensor of shape :math:`(\text{out_channel})`. Returns: (torch.FloatTensor, int): - Output of convolution. Number of outputs will correspond to level in the hierachy determined by **jump**. - the level associated to the output features.
Here is the function:
def conv3d(octrees, point_hierarchies, level, pyramids, exsum, input,
weight, kernel_vectors, jump=0, bias=None, **kwargs):
r"""Convolution over a structured point cloud. The inputs :math:`X` are mapped
to outputs :math:`Y` by the following:
.. math::
Y_i = \sum_k w_k \cdot X_{n(i,k)} + b \quad\text{for}\; i \in 0,\ldots,|Y|-1,
where :math:`w_k` are weights associated with the kernel, and :math:`n(i,k)` is the
neighborhood function described :ref:`here <neighborhood-text>`.
Args:
octrees (torch.ByteTensor):
:ref:`packed` octrees of shape :math:`(\text{num_bytes})`.
See :ref:`octree <spc_octree>`.
point_hierarchies (torch.ShortTensor):
:ref:`packed` point hierarchies of shape :math:`(\text{num_points})`.
See :ref:`point_hierarchies <spc_points>`.
level (int):
level at which the ``input`` features are associated to.
pyramids (torch.IntTensor):
Batched tensor containing point hierarchy structural information
of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`.
See :ref:`pyramids <spc_pyramids>`.
exsum (torch.IntTensor):
Tensor containing the :ref:`packed` exclusive sum of the bit
counts of individual octrees of shape :math:`(\text{num_bytes} + \text{batch_size})`.
See :ref:`exsum <spc_exsum>`.
input (torch.FloatTensor):
:ref:`packed` input feature data of the octrees,
of shape :math:`(\text{total_num_inputs}, \text{in_channels})`,
where ``total_num_inputs`` correspond to the number of nodes of the octrees at ``level``,
and ``in_channels`` is the input feature dimension (for instance 3 for RGB color).
weight (torch.FloatTensor):
filter of shape :math:`(\text{kernel_vectors.shape[0]}, \text{in_channels},
\text{self.out_channels})`.
kernel_vectors (torch.ShortTensor):
A tensor of 3D offsets that define the shape of the kernel,
of shape :math:`(\text{num_weights}, 3)`.
See :ref:`kernel creation <kernel-text>`.
jump (int, optional):
The difference between the input and output levels for the convolution.
A non-zero value implies downsampling. Value must be positive and refer to a valid level of
the structured point cloud. Default: 0.
bias (torch.FloatTensor, optional):
optional bias tensor of shape :math:`(\text{out_channel})`.
Returns:
(torch.FloatTensor, int):
- Output of convolution. Number of outputs will correspond
to level in the hierachy determined by **jump**.
- the level associated to the output features.
"""
remaining_kwargs = kwargs.keys() - Spc.KEYS
if len(remaining_kwargs) > 0:
raise TypeError("conv3d got an unexpected keyword argument "
f"{list(remaining_kwargs)[0]}")
if (weight.shape[0] == 1 and jump == 0):
outputs = input.mm(weight.squeeze(0))
else:
outputs, level = Conv3dFunction.apply(octrees, point_hierarchies, level,
pyramids, exsum, input, weight,
kernel_vectors, jump)
if bias is not None:
outputs += bias.unsqueeze(0)
return outputs, int(level) | r"""Convolution over a structured point cloud. The inputs :math:`X` are mapped to outputs :math:`Y` by the following: .. math:: Y_i = \sum_k w_k \cdot X_{n(i,k)} + b \quad\text{for}\; i \in 0,\ldots,|Y|-1, where :math:`w_k` are weights associated with the kernel, and :math:`n(i,k)` is the neighborhood function described :ref:`here <neighborhood-text>`. Args: octrees (torch.ByteTensor): :ref:`packed` octrees of shape :math:`(\text{num_bytes})`. See :ref:`octree <spc_octree>`. point_hierarchies (torch.ShortTensor): :ref:`packed` point hierarchies of shape :math:`(\text{num_points})`. See :ref:`point_hierarchies <spc_points>`. level (int): level at which the ``input`` features are associated to. pyramids (torch.IntTensor): Batched tensor containing point hierarchy structural information of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`. See :ref:`pyramids <spc_pyramids>`. exsum (torch.IntTensor): Tensor containing the :ref:`packed` exclusive sum of the bit counts of individual octrees of shape :math:`(\text{num_bytes} + \text{batch_size})`. See :ref:`exsum <spc_exsum>`. input (torch.FloatTensor): :ref:`packed` input feature data of the octrees, of shape :math:`(\text{total_num_inputs}, \text{in_channels})`, where ``total_num_inputs`` correspond to the number of nodes of the octrees at ``level``, and ``in_channels`` is the input feature dimension (for instance 3 for RGB color). weight (torch.FloatTensor): filter of shape :math:`(\text{kernel_vectors.shape[0]}, \text{in_channels}, \text{self.out_channels})`. kernel_vectors (torch.ShortTensor): A tensor of 3D offsets that define the shape of the kernel, of shape :math:`(\text{num_weights}, 3)`. See :ref:`kernel creation <kernel-text>`. jump (int, optional): The difference between the input and output levels for the convolution. A non-zero value implies downsampling. Value must be positive and refer to a valid level of the structured point cloud. Default: 0. bias (torch.FloatTensor, optional): optional bias tensor of shape :math:`(\text{out_channel})`. Returns: (torch.FloatTensor, int): - Output of convolution. Number of outputs will correspond to level in the hierachy determined by **jump**. - the level associated to the output features. |
4,695 | import math
from torch import nn
from torch.autograd import Function
import torch
from kaolin import _C
from kaolin.rep import Spc
class ConvTranspose3dFunction(Function):
def forward(ctx, octrees, point_hierarchies, level, pyramids, exsum,
inputs, params, kernel_vectors, jump):
octrees = octrees.contiguous()
point_hierarchies = point_hierarchies.contiguous()
pyramids = pyramids.contiguous()
exsum = exsum.contiguous()
inputs = inputs.contiguous()
params = params.contiguous()
kernel_vectors = kernel_vectors.contiguous()
ctx.save_for_backward(octrees, point_hierarchies, pyramids, exsum, inputs,
params, kernel_vectors)
ctx.jump = jump
outputs, level = _C.ops.spc.ConvTranspose3d_forward(octrees, point_hierarchies,
level, pyramids, exsum,
inputs, params, kernel_vectors, jump)
ctx.level = level
level = torch.tensor([level])
ctx.mark_non_differentiable(level)
return outputs, level
def backward(ctx, grad_outputs, grad_level):
grad_outputs = grad_outputs.contiguous()
octrees, point_hierarchies, pyramids, exsum, inputs, params, kernel_vectors = \
ctx.saved_tensors
d_inputs, d_params = _C.ops.spc.ConvTranspose3d_backward(
octrees, point_hierarchies, ctx.level, pyramids, exsum, inputs,
grad_outputs, params, kernel_vectors, ctx.jump)
return None, None, None, None, None, d_inputs, d_params, None, None
The provided code snippet includes necessary dependencies for implementing the `conv_transpose3d` function. Write a Python function `def conv_transpose3d(octrees, point_hierarchies, level, pyramids, exsum, input, weight, kernel_vectors, jump=0, bias=None, **kwargs)` to solve the following problem:
r"""Transposed convolution over a structured point cloud. The inputs :math:`X` are mapped to outputs :math:`Y` by the following: .. math:: Y_i = \sum_k w_k \cdot X_{n^T(i,k)} + b \quad\text{for}\; i \in 0,\ldots,|Y|-1, where :math:`w_k` are weights associated with the kernel, and :math:`n^T(i,k)` is the transpose neighborhood function described :ref:`here <neighborhood-text>`. Args: octrees (torch.ByteTensor): :ref:`packed` octrees of shape :math:`(\text{num_bytes})`. See :ref:`octree <spc_octree>`. point_hierarchies (torch.ShortTensor): :ref:`packed` point hierarchies of shape :math:`(\text{num_points})`. See :ref:`point_hierarchies <spc_points>`. level (int): level at which the ``input`` features are associated to. pyramids (torch.IntTensor): Batched tensor containing point hierarchy structural information of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`. See :ref:`pyramids <spc_pyramids>`. exsum (torch.IntTensor): Tensor containing the :ref:`packed` exclusive sum of the bit counts of individual octrees of shape :math:`(\text{num_bytes} + \text{batch_size})`. See :ref:`exsum <spc_exsum>`. input (torch.FloatTensor): :ref:`packed` input feature data of the octrees, of shape :math:`(\text{total_num_inputs}, \text{in_channels})`, where ``total_num_inputs`` correspond to the number of nodes of the octrees at ``level``, and ``in_channels`` is the input feature dimension (for instance 3 for RGB color). weight (torch.FloatTensor): filter of shape :math:`(\text{kernel_vectors.shape[0]}, \text{in_channels}, \text{self.out_channels})`. kernel_vectors (torch.ShortTensor): A tensor of 3D offsets that define the shape of the kernel, of shape :math:`(\text{num_weights}, 3)`. See :ref:`kernel creation <kernel-text>`. jump (int, optional): The difference between the input and output levels for the convolution. A non-zero value implies downsampling. Value must be positive and refer to a valid level of the structured point cloud. Default: 0. bias (torch.FloatTensor, optional): optional bias tensor of shape :math:`(\text{out_channel})`.
Here is the function:
def conv_transpose3d(octrees, point_hierarchies, level, pyramids, exsum,
input, weight, kernel_vectors, jump=0, bias=None, **kwargs):
r"""Transposed convolution over a structured point cloud. The inputs :math:`X` are mapped
to outputs :math:`Y` by the following:
.. math::
Y_i = \sum_k w_k \cdot X_{n^T(i,k)} + b \quad\text{for}\; i \in 0,\ldots,|Y|-1,
where :math:`w_k` are weights associated with the kernel, and :math:`n^T(i,k)` is the
transpose neighborhood function described :ref:`here <neighborhood-text>`.
Args:
octrees (torch.ByteTensor):
:ref:`packed` octrees of shape :math:`(\text{num_bytes})`.
See :ref:`octree <spc_octree>`.
point_hierarchies (torch.ShortTensor):
:ref:`packed` point hierarchies of shape :math:`(\text{num_points})`.
See :ref:`point_hierarchies <spc_points>`.
level (int):
level at which the ``input`` features are associated to.
pyramids (torch.IntTensor):
Batched tensor containing point hierarchy structural information
of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`.
See :ref:`pyramids <spc_pyramids>`.
exsum (torch.IntTensor):
Tensor containing the :ref:`packed` exclusive sum of the bit
counts of individual octrees of shape :math:`(\text{num_bytes} + \text{batch_size})`.
See :ref:`exsum <spc_exsum>`.
input (torch.FloatTensor):
:ref:`packed` input feature data of the octrees,
of shape :math:`(\text{total_num_inputs}, \text{in_channels})`,
where ``total_num_inputs`` correspond to the number of nodes of the octrees at ``level``,
and ``in_channels`` is the input feature dimension (for instance 3 for RGB color).
weight (torch.FloatTensor):
filter of shape :math:`(\text{kernel_vectors.shape[0]}, \text{in_channels},
\text{self.out_channels})`.
kernel_vectors (torch.ShortTensor):
A tensor of 3D offsets that define the shape of the kernel,
of shape :math:`(\text{num_weights}, 3)`.
See :ref:`kernel creation <kernel-text>`.
jump (int, optional):
The difference between the input and output levels for the convolution.
A non-zero value implies downsampling. Value must be positive and refer to a valid level of
the structured point cloud. Default: 0.
bias (torch.FloatTensor, optional):
optional bias tensor of shape :math:`(\text{out_channel})`.
"""
remaining_kwargs = kwargs.keys() - Spc.KEYS
if len(remaining_kwargs) > 0:
raise TypeError("conv_transpose3d got an unexpected keyword argument "
f"{list(remaining_kwargs)[0]}")
if (weight.shape[0] == 1 and jump == 0):
outputs = input.mm(weight.squeeze(0))
else:
outputs, level = ConvTranspose3dFunction.apply(octrees, point_hierarchies, level, pyramids,
exsum, input, weight, kernel_vectors, jump)
if bias is not None:
outputs += bias.unsqueeze(0)
return outputs, int(level) | r"""Transposed convolution over a structured point cloud. The inputs :math:`X` are mapped to outputs :math:`Y` by the following: .. math:: Y_i = \sum_k w_k \cdot X_{n^T(i,k)} + b \quad\text{for}\; i \in 0,\ldots,|Y|-1, where :math:`w_k` are weights associated with the kernel, and :math:`n^T(i,k)` is the transpose neighborhood function described :ref:`here <neighborhood-text>`. Args: octrees (torch.ByteTensor): :ref:`packed` octrees of shape :math:`(\text{num_bytes})`. See :ref:`octree <spc_octree>`. point_hierarchies (torch.ShortTensor): :ref:`packed` point hierarchies of shape :math:`(\text{num_points})`. See :ref:`point_hierarchies <spc_points>`. level (int): level at which the ``input`` features are associated to. pyramids (torch.IntTensor): Batched tensor containing point hierarchy structural information of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`. See :ref:`pyramids <spc_pyramids>`. exsum (torch.IntTensor): Tensor containing the :ref:`packed` exclusive sum of the bit counts of individual octrees of shape :math:`(\text{num_bytes} + \text{batch_size})`. See :ref:`exsum <spc_exsum>`. input (torch.FloatTensor): :ref:`packed` input feature data of the octrees, of shape :math:`(\text{total_num_inputs}, \text{in_channels})`, where ``total_num_inputs`` correspond to the number of nodes of the octrees at ``level``, and ``in_channels`` is the input feature dimension (for instance 3 for RGB color). weight (torch.FloatTensor): filter of shape :math:`(\text{kernel_vectors.shape[0]}, \text{in_channels}, \text{self.out_channels})`. kernel_vectors (torch.ShortTensor): A tensor of 3D offsets that define the shape of the kernel, of shape :math:`(\text{num_weights}, 3)`. See :ref:`kernel creation <kernel-text>`. jump (int, optional): The difference between the input and output levels for the convolution. A non-zero value implies downsampling. Value must be positive and refer to a valid level of the structured point cloud. Default: 0. bias (torch.FloatTensor, optional): optional bias tensor of shape :math:`(\text{out_channel})`. |
4,696 | from itertools import product
import torch
global _uint8_bits_sum_luts
_uint8_bits_sum_luts = {}
The provided code snippet includes necessary dependencies for implementing the `uint8_bits_sum` function. Write a Python function `def uint8_bits_sum(uint8_t)` to solve the following problem:
Compute the bits sums for each byte in ByteTensor. Args: uint8_t (torch.ByteTensor): Tensor to process. Return: (torch.LongTensor): Output of same shape and device than `uint8_t`. Examples: >>> uint8_t = torch.ByteTensor([[255, 2], [3, 40]]) >>> uint8_bits_sum(uint8_t) tensor([[8, 1], [2, 2]])
Here is the function:
def uint8_bits_sum(uint8_t):
"""Compute the bits sums for each byte in ByteTensor.
Args:
uint8_t (torch.ByteTensor): Tensor to process.
Return:
(torch.LongTensor): Output of same shape and device than `uint8_t`.
Examples:
>>> uint8_t = torch.ByteTensor([[255, 2], [3, 40]])
>>> uint8_bits_sum(uint8_t)
tensor([[8, 1],
[2, 2]])
"""
global _uint8_bits_sum_luts
device = uint8_t.device
if device not in _uint8_bits_sum_luts:
base_bits = torch.tensor(list(product([False, True], repeat=8)),
dtype=torch.bool, device=device)
lut = torch.sum(base_bits, dim=1)
_uint8_bits_sum_luts[device] = lut
else:
lut = _uint8_bits_sum_luts[device]
return lut[uint8_t.long()] | Compute the bits sums for each byte in ByteTensor. Args: uint8_t (torch.ByteTensor): Tensor to process. Return: (torch.LongTensor): Output of same shape and device than `uint8_t`. Examples: >>> uint8_t = torch.ByteTensor([[255, 2], [3, 40]]) >>> uint8_bits_sum(uint8_t) tensor([[8, 1], [2, 2]]) |
4,697 | import torch
from kaolin.ops.spc.points import quantize_points, points_to_morton, morton_to_points, unbatched_points_to_octree
from kaolin.rep.spc import Spc
def _base_points_to_voxelgrids(points, resolution, return_sparse=False):
r"""Converts points to voxelgrids. This is the base function for both trianglemeshes_to_voxelgrids
and pointclouds_to_voxelgrids. Only points within range [0, 1] are used for voxelization. Points outside
of [0, 1] will be discarded.
Args:
points (torch.Tensor):
Exact batched points with shape
:math: `(\text{batch_size}, \text{P}, \text{3})
resolution (int):
Resolution of output voxelgrids
return_sparse (bool):
Whether to return a sparse voxelgrids or not.
Returns:
(torch.Tensor or torch.FloatTensor):
Exact batched voxelgrids with shape
:math:`(\text{batch_size}, \text{resolution}, \text{resolution}, \test{resolution})`.
If return_sparse == True, sparse tensor is returned.
"""
batch_size = points.shape[0]
num_p = points.shape[1]
device = points.device
dtype = points.dtype
vg_size = (batch_size, resolution, resolution, resolution)
mult = torch.ones(batch_size, device=device, dtype=dtype) * (resolution - 1) # size of (batch_size)
prefix_index = torch.arange(start=0, end=batch_size, device=device, dtype=torch.long).repeat(num_p, 1).T.reshape(-1, 1)
pc_index = torch.round(((points) * mult.view(-1, 1, 1))).long()
pc_index = torch.cat((prefix_index, pc_index.reshape(-1, 3)), dim=1)
pc_index = torch.unique(pc_index, dim=0)
# filter point that is outside of range 0 and resolution - 1
condition = pc_index[:, 1:] <= (resolution - 1)
condition = torch.logical_and(condition, pc_index[:, 1:] >= 0)
row_cond = condition.all(1)
pc_index = pc_index[row_cond, :]
pc_index = pc_index.reshape(-1, 4)
vg = torch.sparse.FloatTensor(
pc_index.T,
torch.ones(pc_index.shape[0], device=pc_index.device, dtype=dtype),
vg_size
)
if not return_sparse:
vg = vg.to_dense().to(dtype)
return vg
The provided code snippet includes necessary dependencies for implementing the `pointclouds_to_voxelgrids` function. Write a Python function `def pointclouds_to_voxelgrids(pointclouds, resolution, origin=None, scale=None, return_sparse=False)` to solve the following problem:
r"""Converts pointclouds to voxelgrids. It separates the 3D space into empty voxelgrid, and for each boxes, if there is a corresponding point, set that voxelgrid to be occupied. Will convert only points in the range `[0, 1]` after been shifted and scaled as following ``(pointclouds - origin) * scale``. Args: pointclouds (torch.Tensor): Exact batched pointclouds, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. resolution (int): Resolution of output voxelgrids. origin (optional, torch.Tensor): Origin of the voxelgrid in the pointcloud coordinates, of shape :math:`(\text{batch_size}, 3)`. Default: ``torch.min(pointcloud, dim=1)[0]``. scale (optional, torch.Tensor): Scale by which we divide the pointclouds' coordinates, of shape :math:`(\text{batch_size})`. Default: ``torch.max(torch.max(pointclouds, dim=1)[0] - origin, dim=1)[0]``. return_sparse (optional, bool): Whether to return a sparse voxelgrids or not. Default: False. Returns: (torch.Tensor or torch.FloatTensor): Exact batched voxelgrids, of shape :math:`(\text{batch_size}, \text{resolution}, \text{resolution}, \text{resolution})`. If return_sparse is ``True``, a sparse FloatTensor is returned. Example: >>> pointclouds = torch.tensor([[[0, 0, 0], ... [1, 1, 1], ... [2, 2, 2]]], dtype=torch.float) >>> pointclouds_to_voxelgrids(pointclouds, 3) tensor([[[[1., 0., 0.], [0., 0., 0.], [0., 0., 0.]], <BLANKLINE> [[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], <BLANKLINE> [[0., 0., 0.], [0., 0., 0.], [0., 0., 1.]]]])
Here is the function:
def pointclouds_to_voxelgrids(pointclouds, resolution, origin=None, scale=None, return_sparse=False):
r"""Converts pointclouds to voxelgrids. It separates the 3D space into empty
voxelgrid, and for each boxes, if there is a corresponding point, set that voxelgrid
to be occupied.
Will convert only points in the range `[0, 1]` after been shifted and scaled as following ``(pointclouds - origin) * scale``.
Args:
pointclouds (torch.Tensor):
Exact batched pointclouds, of shape
:math:`(\text{batch_size}, \text{num_points}, 3)`.
resolution (int):
Resolution of output voxelgrids.
origin (optional, torch.Tensor):
Origin of the voxelgrid in the pointcloud coordinates,
of shape :math:`(\text{batch_size}, 3)`.
Default: ``torch.min(pointcloud, dim=1)[0]``.
scale (optional, torch.Tensor):
Scale by which we divide the pointclouds' coordinates,
of shape :math:`(\text{batch_size})`.
Default: ``torch.max(torch.max(pointclouds, dim=1)[0] - origin, dim=1)[0]``.
return_sparse (optional, bool):
Whether to return a sparse voxelgrids or not. Default: False.
Returns:
(torch.Tensor or torch.FloatTensor):
Exact batched voxelgrids, of shape
:math:`(\text{batch_size}, \text{resolution}, \text{resolution}, \text{resolution})`.
If return_sparse is ``True``, a sparse FloatTensor is returned.
Example:
>>> pointclouds = torch.tensor([[[0, 0, 0],
... [1, 1, 1],
... [2, 2, 2]]], dtype=torch.float)
>>> pointclouds_to_voxelgrids(pointclouds, 3)
tensor([[[[1., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
<BLANKLINE>
[[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]],
<BLANKLINE>
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 1.]]]])
"""
if not isinstance(resolution, int):
raise TypeError(f"Expected resolution to be int "
f"but got {type(resolution)}.")
if origin is None:
min_val = torch.min(pointclouds, dim=1)[0]
origin = min_val
if scale is None:
max_val = torch.max(pointclouds, dim=1)[0]
scale = torch.max(max_val - origin, dim=1)[0]
# Normalize pointcloud with origin and scale
pointclouds = (pointclouds - origin.unsqueeze(1)) / scale.view(-1, 1, 1)
vg = _base_points_to_voxelgrids(pointclouds, resolution, return_sparse=return_sparse)
return vg | r"""Converts pointclouds to voxelgrids. It separates the 3D space into empty voxelgrid, and for each boxes, if there is a corresponding point, set that voxelgrid to be occupied. Will convert only points in the range `[0, 1]` after been shifted and scaled as following ``(pointclouds - origin) * scale``. Args: pointclouds (torch.Tensor): Exact batched pointclouds, of shape :math:`(\text{batch_size}, \text{num_points}, 3)`. resolution (int): Resolution of output voxelgrids. origin (optional, torch.Tensor): Origin of the voxelgrid in the pointcloud coordinates, of shape :math:`(\text{batch_size}, 3)`. Default: ``torch.min(pointcloud, dim=1)[0]``. scale (optional, torch.Tensor): Scale by which we divide the pointclouds' coordinates, of shape :math:`(\text{batch_size})`. Default: ``torch.max(torch.max(pointclouds, dim=1)[0] - origin, dim=1)[0]``. return_sparse (optional, bool): Whether to return a sparse voxelgrids or not. Default: False. Returns: (torch.Tensor or torch.FloatTensor): Exact batched voxelgrids, of shape :math:`(\text{batch_size}, \text{resolution}, \text{resolution}, \text{resolution})`. If return_sparse is ``True``, a sparse FloatTensor is returned. Example: >>> pointclouds = torch.tensor([[[0, 0, 0], ... [1, 1, 1], ... [2, 2, 2]]], dtype=torch.float) >>> pointclouds_to_voxelgrids(pointclouds, 3) tensor([[[[1., 0., 0.], [0., 0., 0.], [0., 0., 0.]], <BLANKLINE> [[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]], <BLANKLINE> [[0., 0., 0.], [0., 0., 0.], [0., 0., 1.]]]]) |
4,698 | import torch
from kaolin.ops.spc.points import quantize_points, points_to_morton, morton_to_points, unbatched_points_to_octree
from kaolin.rep.spc import Spc
def quantize_points(x, level):
r"""Quantize :math:`[-1, 1]` float coordinates in to
:math:`[0, (2^{level})-1]` integer coords.
If a point is out of the range :math:`[-1, 1]` it will be clipped to it.
Args:
x (torch.Tensor): Floating point coordinates,
must be of last dimension 3.
level (int): Level of the grid
Returns
(torch.ShortTensor): Quantized 3D points, of same shape than x.
"""
res = 2 ** level
qpts = torch.floor(torch.clamp(res * (x + 1.0) / 2.0, 0, res - 1.)).short()
return qpts
def unbatched_points_to_octree(points, level, sorted=False):
r"""Convert (quantized) 3D points to an octree.
This function assumes that the points are all in the same frame of reference
of :math:`[0, 2^level]`. Note that SPC.points does not satisfy this constraint.
Args:
points (torch.ShortTensor):
Quantized 3d points. This is not exactly like SPC points hierarchies
as this is only the data for a specific level,
of shape :math:`(\text{num_points}, 3)`.
level (int): Max level of octree, and the level of the points.
sorted (bool): True if the points are unique and sorted in morton order.
Default=False.
Returns:
(torch.ByteTensor):
the generated octree,
of shape :math:`(2^\text{level}, 2^\text{level}, 2^\text{level})`.
"""
if not sorted:
unique = torch.unique(points.contiguous(), dim=0).contiguous()
morton = torch.sort(points_to_morton(unique).contiguous())[0]
points = morton_to_points(morton.contiguous())
return _C.ops.spc.points_to_octree(points.contiguous(), level)
def points_to_morton(points):
r"""Convert (quantized) 3D points to morton codes.
Args:
points (torch.ShortTensor):
Quantized 3D points. This is not exactly like SPC points hierarchies
as this is only the data for a specific level,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.LongTensor):
The morton code of the points,
of shape :math:`(\text{num_points})`
Examples:
>>> inputs = torch.tensor([
... [0, 0, 0],
... [0, 0, 1],
... [0, 0, 2],
... [0, 0, 3],
... [0, 1, 0]], device='cuda', dtype=torch.int16)
>>> points_to_morton(inputs)
tensor([0, 1, 8, 9, 2], device='cuda:0')
"""
shape = list(points.shape)[:-1]
points = points.reshape(-1, 3)
return _C.ops.spc.points_to_morton_cuda(points.contiguous()).reshape(*shape)
def morton_to_points(morton):
r"""Convert morton codes to points.
Args:
morton (torch.LongTensor): The morton codes of quantized 3D points,
of shape :math:`(\text{num_points})`.
Returns:
(torch.ShortInt):
The points quantized coordinates,
of shape :math:`(\text{num_points}, 3)`.
Examples:
>>> inputs = torch.tensor([0, 1, 8, 9, 2], device='cuda')
>>> morton_to_points(inputs)
tensor([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 0, 3],
[0, 1, 0]], device='cuda:0', dtype=torch.int16)
"""
shape = list(morton.shape)
shape.append(3)
morton = morton.reshape(-1)
return _C.ops.spc.morton_to_points_cuda(morton.contiguous()).reshape(*shape)
class Spc(object):
"""Data class holding all :ref:`Structured Point Cloud (SPC)<spc>` information.
This class supports batching through :ref:`packed<packed>` representation:
a single Spc object can pack multiple SPC structures of variable sizes.
SPC data structures are represented through the combination various tensors detailed below:
``octrees`` compress the information required to build a full SPC.
In practice, they are a low level structure which also constitute the
:ref:`core part<spc_octree>` of the SPC data structure.
``octrees`` are kept as a torch.ByteTensor, where each byte represents a single octree parent cell,
and each bit represents the occupancy of a child octree cell.
e.g: 8 bits for 8 cells.
Bits describe the octree cells in Morton Order::
. . . . . . . .
| . 3 . 7 | . 3 7
| . . . . . . . . ===> 1 5
| | . 1 . | 5 .
| | . . . . . . . .
| | | | | 2 6
. .|. . | . . . | ===> 0 4
.| 2 |. 6 . |
. . | . . . . . |
. | 0 . 4 . |
. . . . . . . .
If a cell is occupied, an additional cell byte may be generated in the next level,
up till the argument ``level``.
For example, a ``SPC.octrees`` field may, look as follows::
tensor([255, 128, 64, 32, 16, 8, 4, 2, 23], dtype=torch.uint8)
Here "octrees" represents an octree of 9 nodes.
The binary representation should be interpreted as follows::
Level #1, Path*, 11111111 (All cells are occupied, therefore 8 bytes are allocated for level 2)
Level #2, Path*-1, 10000000
Level #2, Path*-2, 01000000
Level #2, Path*-3, 00100000
Level #2, Path*-4, 00010000
Level #2, Path*-5, 00001000
Level #2, Path*-6, 00000100
Level #2, Path*-7, 00000010
Level #2, Path*-8, 00010111
``lengths`` is a tensor of integers required to support batching. Since we assume a packed representation,
all octree cells are shaped as a single stacked 1D tensor. ``lengths`` specifies the number of cells (bytes) each
octree uses.
``features`` represent an optional per-point feature vector.
When ``features`` is not ``None``, a feature is kept for each point at the highest-resolution level in the octree.
``max_level`` is an integer which specifies how many recursive levels an octree should have.
``point_hierarchies``, ``pyramid``, ``exsum`` are auxilary structures, which are generated upon request and
enable efficient indexing to SPC entries.
"""
KEYS = {'octrees', 'lengths', 'max_level', 'pyramids', 'exsum', 'point_hierarchies'}
def __init__(self, octrees, lengths, max_level=None, pyramids=None,
exsum=None, point_hierarchies=None, features=None):
assert (isinstance(octrees, torch.Tensor) and octrees.dtype == torch.uint8 and
octrees.ndim == 1), "octrees must be a 1D ByteTensor."
assert (isinstance(lengths, torch.Tensor) and lengths.dtype == torch.int and
lengths.ndim == 1), "lengths must be a 1D IntTensor."
assert (max_level is None) or (isinstance(max_level, int)), \
"max_level must an int."
if pyramids is not None:
assert isinstance(pyramids, torch.Tensor) and pyramids.dtype == torch.int, \
"pyramids must be an IntTensor."
assert (pyramids.ndim == 3 and
pyramids.shape[0] == lengths.shape[0] and
pyramids.shape[1] == 2 and
((max_level is None) or (pyramids.shape[2] == max_level + 2))), \
"pyramids must be of shape (batch_size, 2, max_level + 2)."
assert not pyramids.is_cuda, "pyramids must be on cpu."
if exsum is not None:
assert isinstance(exsum, torch.Tensor) and exsum.dtype == torch.int, \
"exsum must be an IntTensor."
assert (exsum.ndim == 1 and
exsum.shape[0] == octrees.shape[0] + lengths.shape[0]), \
"exsum must be of shape (num_bytes + batch_size)."
assert exsum.device == octrees.device, \
"exsum must be on the same device than octrees."
if point_hierarchies is not None:
assert isinstance(point_hierarchies, torch.Tensor) and \
point_hierarchies.dtype == torch.short, \
"point_hierarchies must be a ShortTensor."
assert (point_hierarchies.ndim == 2 and
point_hierarchies.shape[1] == 3), \
"point_hierarchies must be of shape (num_nodes, 3)."
assert point_hierarchies.device == octrees.device, \
"point_hierarchies must be on the same device than octrees."
if features is not None:
assert isinstance(features, torch.Tensor), \
"features must be a torch.Tensor"
assert features.device == octrees.device, \
"features must be on the same device as octrees."
self.octrees = octrees
self.lengths = lengths
self._max_level = max_level
self._pyramids = pyramids
self._exsum = exsum
self._point_hierarchies = point_hierarchies
self.features = features
def make_dense(cls, level, device='cuda'):
"""Creates a dense, fully occupied Spc object.
The Spc will have ``level`` levels of detail.
Args:
level (int):
Number of levels to use for the dense Spc.
device (torch.device):
Torch device to keep the spc octree
Return:
(kaolin.rep.Spc): a new fully occupied ``Spc``.
"""
from ..ops.spc import create_dense_spc
octree, lengths = create_dense_spc(level, device) # Create a single entry batch
return Spc(octrees=octree, lengths=lengths)
def from_features(cls, feature_grids, masks=None):
"""Creates a sparse Spc object from the feature grid.
Args:
feature_grids (torch.Tensor):
The sparse 3D feature grids, of shape
:math:`(\text{batch_size}, \text{feature_dim}, X, Y, Z)`
masks (optional, torch.BoolTensor):
The topology mask, showing where are the features,
of shape :math:`(\text{batch_size}, X, Y, Z)`.
Default: A feature is determined when not full of zeros.
Returns:
(torch.ByteTensor, torch.IntTensor, torch.Tensor):
a tuple containing:
- The octree, of size :math:`(\text{num_nodes})`
- The lengths of each octree, of size :math:`(\text{batch_size})`
- The coalescent features, of same dtype than ``feature_grids``,
of shape :math:`(\text{num_features}, \text{feature_dim})`.
Return:
(kaolin.rep.Spc): a ``Spc``, with length of :math:`(\text{batch_size})`,
an octree of size octree, of size :math:`(\text{num_nodes})`, and the features field
of the same dtype as ``feature_grids`` and of shape :math:`(\text{num_features}, \text{feature_dim})`.
"""
from ..ops.spc import feature_grids_to_spc
octrees, lengths, coalescent_features = feature_grids_to_spc(feature_grids, masks=masks)
return Spc(octrees=octrees, lengths=lengths, features=coalescent_features)
# TODO(cfujitsang): could be interesting to separate into multiple functions
def _apply_scan_octrees(self):
# to break circular dependency
from ..ops.spc import scan_octrees
max_level, pyramids, exsum = scan_octrees(self.octrees, self.lengths)
self._max_level = max_level
self._pyramids = pyramids
self._exsum = exsum
def _apply_generate_points(self):
# to break circular dependency
from ..ops.spc import generate_points
self._point_hierarchies = generate_points(self.octrees, self.pyramids, self.exsum)
def max_level(self):
if self._max_level is None:
self._apply_scan_octrees()
return self._max_level
def pyramids(self):
if self._pyramids is None:
self._apply_scan_octrees()
return self._pyramids
def exsum(self):
if self._exsum is None:
self._apply_scan_octrees()
return self._exsum
def point_hierarchies(self):
if self._point_hierarchies is None:
self._apply_generate_points()
return self._point_hierarchies
def from_list(cls, octrees_list):
"""Generate an Spc from a list of octrees.
Args:
octrees_list (list of torch.ByteTensor):
list containing multiple 1D torch.ByteTensor,
each representing an octree.
Return:
(kaolin.rep.Spc): a new ``Spc``.
"""
octrees, lengths = list_to_packed(
[octree.reshape(-1, 1) for octree in octrees_list])
return cls(octrees.reshape(-1).contiguous(), lengths.reshape(-1).int())
def to(self, device, non_blocking=False,
memory_format=torch.preserve_format):
_octrees = self.octrees.to(device=device,
non_blocking=non_blocking,
memory_format=memory_format)
# torch tensor.to() return the self if the type is identical
if _octrees.data_ptr() == self.octrees.data_ptr():
return self
else:
if self._exsum is not None:
_exsum = self._exsum.to(device=device,
non_blocking=non_blocking,
memory_format=memory_format)
else:
_exsum = None
if self._point_hierarchies is not None:
_point_hierarchies = self.point_hierarchies.to(
device=device,
non_blocking=non_blocking,
memory_format=memory_format)
else:
_point_hierarchies = None
return Spc(_octrees, self.lengths, self._max_level, self._pyramids,
_exsum, _point_hierarchies)
def cuda(self, device='cuda', non_blocking=False,
memory_format=torch.preserve_format):
return self.to(device=device, non_blocking=non_blocking,
memory_format=memory_format)
def cpu(self, memory_format=torch.preserve_format):
return self.to(device='cpu', memory_format=memory_format)
def batch_size(self):
return self.lengths.shape[0]
def to_dict(self, keys=None):
if keys is None:
return {k: getattr(self, k) for k in self.KEYS}
else:
return {k: getattr(self, k) for k in keys}
def num_points(self, lod: int):
"""
Returns how many points the SPC holds at a given level of detail.
Args:
lod (int):
Index of a level of detail.
Level 0 is considered the root and always holds a single point,
level 1 holds up to :math:`(\text{num_points}=8)` points,
level 2 holds up to :math:`(\text{num_points}=8^{2})`, and so forth.
Return:
(torch.Tensor): The number of points each SPC entry holds for the given level of detail.
"""
return self.pyramids[:, 0, lod]
The provided code snippet includes necessary dependencies for implementing the `unbatched_pointcloud_to_spc` function. Write a Python function `def unbatched_pointcloud_to_spc(pointcloud, level, features=None)` to solve the following problem:
r"""This function takes as input a single point-cloud - a set of continuous coordinates in 3D, and coverts it into a :ref:`Structured Point Cloud (SPC)<spc>`, a compressed octree representation where the point cloud coordinates are quantized to integer coordinates. Point coordinates are expected to be normalized to the range :math:`[-1, 1]`. If a point is out of the range :math:`[-1, 1]` it will be clipped to it. If ``features`` are specified, the current implementation will average features of points that inhabit the same quantized bucket. Args: pointclouds (torch.Tensor): An unbatched pointcloud, of shape :math:`(\text{num_points}, 3)`. Coordinates are expected to be normalized to the range :math:`[-1, 1]`. level (int): Maximum number of levels to use in octree hierarchy. features (optional, torch.Tensor): Feature vector containing information per point, of shape :math:`(\text{num_points}, \text{feat_dim})`. Returns: (kaolin.rep.Spc): A Structured Point Cloud (SPC) object, holding a single-item batch.
Here is the function:
def unbatched_pointcloud_to_spc(pointcloud, level, features=None):
r"""This function takes as input a single point-cloud - a set of continuous coordinates in 3D,
and coverts it into a :ref:`Structured Point Cloud (SPC)<spc>`, a compressed octree representation where
the point cloud coordinates are quantized to integer coordinates.
Point coordinates are expected to be normalized to the range :math:`[-1, 1]`.
If a point is out of the range :math:`[-1, 1]` it will be clipped to it.
If ``features`` are specified, the current implementation will average features
of points that inhabit the same quantized bucket.
Args:
pointclouds (torch.Tensor):
An unbatched pointcloud, of shape :math:`(\text{num_points}, 3)`.
Coordinates are expected to be normalized to the range :math:`[-1, 1]`.
level (int):
Maximum number of levels to use in octree hierarchy.
features (optional, torch.Tensor):
Feature vector containing information per point, of shape
:math:`(\text{num_points}, \text{feat_dim})`.
Returns:
(kaolin.rep.Spc):
A Structured Point Cloud (SPC) object, holding a single-item batch.
"""
points = quantize_points(pointcloud.contiguous(), level)
# Avoid duplications if cells occupy more than one point
unique, unique_keys, unique_counts = torch.unique(points.contiguous(), dim=0,
return_inverse=True, return_counts=True)
# Create octree hierarchy
morton, keys = torch.sort(points_to_morton(unique.contiguous()).contiguous())
points = morton_to_points(morton.contiguous())
octree = unbatched_points_to_octree(points, level, sorted=True)
# Organize features for octree leaf nodes
feat = None
if features is not None:
# Feature collision of multiple points sharing the same cell is consolidated here.
# Assumes mean averaging
feat_dtype = features.dtype
is_fp = features.is_floating_point()
# Promote to double precision dtype to avoid rounding errors
feat = torch.zeros(unique.shape[0], features.shape[1], device=features.device).double()
feat = feat.index_add_(0, unique_keys, features.double()) / unique_counts[..., None].double()
if not is_fp:
feat = torch.round(feat)
feat = feat.to(feat_dtype)
feat = feat[keys]
# A full SPC requires octree hierarchy + auxilary data structures
lengths = torch.tensor([len(octree)], dtype=torch.int32) # Single entry batch
return Spc(octrees=octree, lengths=lengths, features=feat) | r"""This function takes as input a single point-cloud - a set of continuous coordinates in 3D, and coverts it into a :ref:`Structured Point Cloud (SPC)<spc>`, a compressed octree representation where the point cloud coordinates are quantized to integer coordinates. Point coordinates are expected to be normalized to the range :math:`[-1, 1]`. If a point is out of the range :math:`[-1, 1]` it will be clipped to it. If ``features`` are specified, the current implementation will average features of points that inhabit the same quantized bucket. Args: pointclouds (torch.Tensor): An unbatched pointcloud, of shape :math:`(\text{num_points}, 3)`. Coordinates are expected to be normalized to the range :math:`[-1, 1]`. level (int): Maximum number of levels to use in octree hierarchy. features (optional, torch.Tensor): Feature vector containing information per point, of shape :math:`(\text{num_points}, \text{feat_dim})`. Returns: (kaolin.rep.Spc): A Structured Point Cloud (SPC) object, holding a single-item batch. |
4,699 | import torch
from kaolin import _C
from ..mesh.trianglemesh import _unbatched_subdivide_vertices
from .pointcloud import _base_points_to_voxelgrids
def _unbatched_subdivide_vertices(vertices, faces, resolution):
r"""Subdivide the triangle mesh's vertices so that every existing edge's length is shorter
or equal to :math:`(\frac{resolution - 1}{(resolution^2)})^2`.
It creates a new vertex in the middle of an existing edge,
if the length of the edge is larger than :math:`(\frac{resolution - 1}{(resolution^2)})^2`.
Note: it does not add faces between newly added vertices.
It only addes new vertices. This function is mainly used in
:py:meth:`kaolin.ops.conversions.trianglemesh.trianglemesh_to_voxelgrid`.
Args:
vertices (torch.tensor): unbatched vertices of shape (V, 3) of mesh.
faces (torch.LongTensor): unbatched faces of shape (F, 3) of mesh.
resolution (int): target resolution to upsample to.
Returns:
(torch.Tensor): upsampled vertices.
Example:
>>> vertices = torch.tensor([[0, 0, 0],
... [1, 0, 0],
... [0, 0, 1]], dtype=torch.float)
>>> faces = torch.tensor([[0, 1, 2]], dtype=torch.long)
>>> _unbatched_subdivide_vertices(vertices, faces, 2)
tensor([[0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.1250],
[0.0000, 0.0000, 0.2500],
[0.0000, 0.0000, 0.3750],
[0.0000, 0.0000, 0.5000],
[0.0000, 0.0000, 0.6250],
[0.0000, 0.0000, 0.7500],
[0.0000, 0.0000, 0.8750],
[0.0000, 0.0000, 1.0000],
[0.1250, 0.0000, 0.0000],
[0.1250, 0.0000, 0.1250],
[0.1250, 0.0000, 0.2500],
[0.1250, 0.0000, 0.3750],
[0.1250, 0.0000, 0.5000],
[0.1250, 0.0000, 0.6250],
[0.1250, 0.0000, 0.7500],
[0.1250, 0.0000, 0.8750],
[0.2500, 0.0000, 0.0000],
[0.2500, 0.0000, 0.1250],
[0.2500, 0.0000, 0.2500],
[0.2500, 0.0000, 0.3750],
[0.2500, 0.0000, 0.5000],
[0.2500, 0.0000, 0.6250],
[0.2500, 0.0000, 0.7500],
[0.3750, 0.0000, 0.0000],
[0.3750, 0.0000, 0.1250],
[0.3750, 0.0000, 0.2500],
[0.3750, 0.0000, 0.3750],
[0.3750, 0.0000, 0.5000],
[0.3750, 0.0000, 0.6250],
[0.5000, 0.0000, 0.0000],
[0.5000, 0.0000, 0.1250],
[0.5000, 0.0000, 0.2500],
[0.5000, 0.0000, 0.3750],
[0.5000, 0.0000, 0.5000],
[0.6250, 0.0000, 0.0000],
[0.6250, 0.0000, 0.1250],
[0.6250, 0.0000, 0.2500],
[0.6250, 0.0000, 0.3750],
[0.7500, 0.0000, 0.0000],
[0.7500, 0.0000, 0.1250],
[0.7500, 0.0000, 0.2500],
[0.8750, 0.0000, 0.0000],
[0.8750, 0.0000, 0.1250],
[1.0000, 0.0000, 0.0000]])
"""
device = vertices.device
assert resolution > 1
min_edge_length = ((resolution - 1) / (resolution ** 2))**2
v1 = torch.index_select(vertices, 0, faces[:, 0]) # shape of (B, F, 3)
v2 = torch.index_select(vertices, 0, faces[:, 1])
v3 = torch.index_select(vertices, 0, faces[:, 2])
while True:
edge1_length = torch.sum((v1 - v2)**2, dim=1).unsqueeze(1) # shape (B, F, 1)
edge2_length = torch.sum((v2 - v3)**2, dim=1).unsqueeze(1)
edge3_length = torch.sum((v3 - v1)**2, dim=1).unsqueeze(1)
total_edges_length = torch.cat((edge1_length, edge2_length, edge3_length), dim=1)
max_edges_length = torch.max(total_edges_length, dim=1)[0]
# Choose the edges that is greater than the min_edge_length
keep = max_edges_length > min_edge_length
# if all the edges are smaller than the min_edge_length, stop upsampling
K = torch.sum(keep)
if K == 0:
break
V = vertices.shape[0]
v1 = v1[keep] # shape of (K, 3), where K is number of edges that has been kept
v2 = v2[keep]
v3 = v3[keep]
# New vertices is placed at the middle of the edge
v4 = (v1 + v3) / 2 # shape of (K, 3), where K is number of edges that has been kept
v5 = (v1 + v2) / 2
v6 = (v2 + v3) / 2
# update vertices
vertices = torch.cat((vertices, v4, v5, v6))
# Get rid of repeated vertices
vertices, unique_indices = torch.unique(vertices, return_inverse=True, dim=0)
# Update v1, v2, v3
v1 = torch.cat((v1, v2, v4, v3))
v2 = torch.cat((v4, v5, v5, v4))
v3 = torch.cat((v5, v6, v6, v6))
return vertices
def _base_points_to_voxelgrids(points, resolution, return_sparse=False):
r"""Converts points to voxelgrids. This is the base function for both trianglemeshes_to_voxelgrids
and pointclouds_to_voxelgrids. Only points within range [0, 1] are used for voxelization. Points outside
of [0, 1] will be discarded.
Args:
points (torch.Tensor):
Exact batched points with shape
:math: `(\text{batch_size}, \text{P}, \text{3})
resolution (int):
Resolution of output voxelgrids
return_sparse (bool):
Whether to return a sparse voxelgrids or not.
Returns:
(torch.Tensor or torch.FloatTensor):
Exact batched voxelgrids with shape
:math:`(\text{batch_size}, \text{resolution}, \text{resolution}, \test{resolution})`.
If return_sparse == True, sparse tensor is returned.
"""
batch_size = points.shape[0]
num_p = points.shape[1]
device = points.device
dtype = points.dtype
vg_size = (batch_size, resolution, resolution, resolution)
mult = torch.ones(batch_size, device=device, dtype=dtype) * (resolution - 1) # size of (batch_size)
prefix_index = torch.arange(start=0, end=batch_size, device=device, dtype=torch.long).repeat(num_p, 1).T.reshape(-1, 1)
pc_index = torch.round(((points) * mult.view(-1, 1, 1))).long()
pc_index = torch.cat((prefix_index, pc_index.reshape(-1, 3)), dim=1)
pc_index = torch.unique(pc_index, dim=0)
# filter point that is outside of range 0 and resolution - 1
condition = pc_index[:, 1:] <= (resolution - 1)
condition = torch.logical_and(condition, pc_index[:, 1:] >= 0)
row_cond = condition.all(1)
pc_index = pc_index[row_cond, :]
pc_index = pc_index.reshape(-1, 4)
vg = torch.sparse.FloatTensor(
pc_index.T,
torch.ones(pc_index.shape[0], device=pc_index.device, dtype=dtype),
vg_size
)
if not return_sparse:
vg = vg.to_dense().to(dtype)
return vg
The provided code snippet includes necessary dependencies for implementing the `trianglemeshes_to_voxelgrids` function. Write a Python function `def trianglemeshes_to_voxelgrids( vertices, faces, resolution, origin=None, scale=None, return_sparse=False )` to solve the following problem:
r"""Converts meshes to surface voxelgrids of a given resolution. It first upsamples triangle mesh's vertices to given resolution, then it performs a box test. If a voxel contains a triangle vertex, set that voxel to 1. Vertex will be offset and scaled as following: :math:`\text{normalized_vertices} = (\text{vertices} - \text{origin}) / \text{scale}` the voxelgrids will only be generated in the range [0, 1] of normalized_vertices. Args: vertices (torch.tensor): Batched vertices of the input meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.tensor): Unbatched faces of the meshes, of shape :math:`(\text{num_faces}, 3)`. resolution (int): desired resolution of generated voxelgrid. origin (torch.tensor): Origin of the voxelgrid in the mesh coordinates, of shape :math:`(\text{batch_size}, 3)`. Default: ``torch.min(vertices, dim=1)[0]``. scale (torch.tensor): The scale by which we divide the vertex position, of shape :math:`(\text{batch_size})`. Default: ``torch.max(torch.max(vertices, dim=1)[0] - origin, dim=1)[0]``. return_sparse (optional, bool): If True, sparse tensor is returned. Default: False. Returns: (torch.Tensor or torch.FloatTensor): Binary batched voxelgrids, of shape :math:`(\text{batch_size}, \text{resolution}, \text{resolution}, \text{resolution})`. If return_sparse is True, sparse tensor is returned. Example: >>> vertices = torch.tensor([[[0, 0, 0], ... [1, 0, 0], ... [0, 0, 1]]], dtype=torch.float) >>> faces = torch.tensor([[0, 1, 2]], dtype=torch.long) >>> origin = torch.zeros((1, 3)) >>> scale = torch.ones((1)) >>> trianglemeshes_to_voxelgrids(vertices, faces, 3, origin, scale) tensor([[[[1., 1., 1.], [0., 0., 0.], [0., 0., 0.]], <BLANKLINE> [[1., 1., 0.], [0., 0., 0.], [0., 0., 0.]], <BLANKLINE> [[1., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]])
Here is the function:
def trianglemeshes_to_voxelgrids(
vertices,
faces,
resolution,
origin=None,
scale=None,
return_sparse=False
):
r"""Converts meshes to surface voxelgrids of a given resolution. It first upsamples
triangle mesh's vertices to given resolution, then it performs a box test.
If a voxel contains a triangle vertex, set that voxel to 1. Vertex will be
offset and scaled as following:
:math:`\text{normalized_vertices} = (\text{vertices} - \text{origin}) / \text{scale}`
the voxelgrids will only be generated in the range [0, 1] of normalized_vertices.
Args:
vertices (torch.tensor): Batched vertices of the input meshes, of shape
:math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.tensor): Unbatched faces of the meshes, of shape
:math:`(\text{num_faces}, 3)`.
resolution (int): desired resolution of generated voxelgrid.
origin (torch.tensor): Origin of the voxelgrid in the mesh coordinates,
of shape :math:`(\text{batch_size}, 3)`.
Default: ``torch.min(vertices, dim=1)[0]``.
scale (torch.tensor): The scale by which we divide the vertex position,
of shape :math:`(\text{batch_size})`.
Default: ``torch.max(torch.max(vertices, dim=1)[0] - origin, dim=1)[0]``.
return_sparse (optional, bool): If True, sparse tensor is returned. Default: False.
Returns:
(torch.Tensor or torch.FloatTensor):
Binary batched voxelgrids, of shape
:math:`(\text{batch_size}, \text{resolution}, \text{resolution}, \text{resolution})`.
If return_sparse is True, sparse tensor is returned.
Example:
>>> vertices = torch.tensor([[[0, 0, 0],
... [1, 0, 0],
... [0, 0, 1]]], dtype=torch.float)
>>> faces = torch.tensor([[0, 1, 2]], dtype=torch.long)
>>> origin = torch.zeros((1, 3))
>>> scale = torch.ones((1))
>>> trianglemeshes_to_voxelgrids(vertices, faces, 3, origin, scale)
tensor([[[[1., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]],
<BLANKLINE>
[[1., 1., 0.],
[0., 0., 0.],
[0., 0., 0.]],
<BLANKLINE>
[[1., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]]]])
"""
if not isinstance(resolution, int):
raise TypeError(f"Expected resolution to be int "
f"but got {type(resolution)}.")
if origin is None:
min_val = torch.min(vertices, dim=1)[0]
origin = min_val
if scale is None:
max_val = torch.max(vertices, dim=1)[0]
scale = torch.max(max_val - origin, dim=1)[0]
batch_size = vertices.shape[0]
voxelgrids = []
batched_points = (vertices - origin.unsqueeze(1)) / scale.view(-1, 1, 1)
for i in range(batch_size):
points = _unbatched_subdivide_vertices(batched_points[i], faces, resolution)
voxelgrid = _base_points_to_voxelgrids(
points.unsqueeze(0), resolution, return_sparse=return_sparse
)
voxelgrids.append(voxelgrid)
return torch.cat(voxelgrids) | r"""Converts meshes to surface voxelgrids of a given resolution. It first upsamples triangle mesh's vertices to given resolution, then it performs a box test. If a voxel contains a triangle vertex, set that voxel to 1. Vertex will be offset and scaled as following: :math:`\text{normalized_vertices} = (\text{vertices} - \text{origin}) / \text{scale}` the voxelgrids will only be generated in the range [0, 1] of normalized_vertices. Args: vertices (torch.tensor): Batched vertices of the input meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.tensor): Unbatched faces of the meshes, of shape :math:`(\text{num_faces}, 3)`. resolution (int): desired resolution of generated voxelgrid. origin (torch.tensor): Origin of the voxelgrid in the mesh coordinates, of shape :math:`(\text{batch_size}, 3)`. Default: ``torch.min(vertices, dim=1)[0]``. scale (torch.tensor): The scale by which we divide the vertex position, of shape :math:`(\text{batch_size})`. Default: ``torch.max(torch.max(vertices, dim=1)[0] - origin, dim=1)[0]``. return_sparse (optional, bool): If True, sparse tensor is returned. Default: False. Returns: (torch.Tensor or torch.FloatTensor): Binary batched voxelgrids, of shape :math:`(\text{batch_size}, \text{resolution}, \text{resolution}, \text{resolution})`. If return_sparse is True, sparse tensor is returned. Example: >>> vertices = torch.tensor([[[0, 0, 0], ... [1, 0, 0], ... [0, 0, 1]]], dtype=torch.float) >>> faces = torch.tensor([[0, 1, 2]], dtype=torch.long) >>> origin = torch.zeros((1, 3)) >>> scale = torch.ones((1)) >>> trianglemeshes_to_voxelgrids(vertices, faces, 3, origin, scale) tensor([[[[1., 1., 1.], [0., 0., 0.], [0., 0., 0.]], <BLANKLINE> [[1., 1., 0.], [0., 0., 0.], [0., 0., 0.]], <BLANKLINE> [[1., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]]) |
4,700 | import torch
from kaolin import _C
from ..mesh.trianglemesh import _unbatched_subdivide_vertices
from .pointcloud import _base_points_to_voxelgrids
The provided code snippet includes necessary dependencies for implementing the `unbatched_mesh_to_spc` function. Write a Python function `def unbatched_mesh_to_spc(face_vertices, level)` to solve the following problem:
r"""Convert a mesh into a :ref:`Structured Point Cloud octree<spc_octree>`. The conversion is using a conservative rasterization process, the resulting octree is fully wrapping the mesh. .. note:: The mesh will be voxelized in the range :math:`[-1, 1]` of the vertices coordinate system. Args: face_vertices (torch.LongTensor): The vertices indexed by faces (see :func:`kaolin.ops.mesh.index_vertices_by_faces`), of shape :math:`(\text{num_faces}, 3, 3)`. level (int): number of levels in the returned SPC. Returns: (torch.ByteTensor, torch.LongTensor, torch.FloatTensor): - The generated octree, of size :math:`(\text{num_nodes})`, where :math:`\text{num_nodes}` depends on the geometry of the input mesh. - The indices of the face corresponding to each voxel at the highest level, of shape :math:`(\text{num_voxels})`. - The barycentric coordinates of the voxel with respect to corresponding face of shape :math:`(\text{num_vertices}, 2)`.
Here is the function:
def unbatched_mesh_to_spc(face_vertices, level):
r"""Convert a mesh into a :ref:`Structured Point Cloud octree<spc_octree>`.
The conversion is using a conservative rasterization process,
the resulting octree is fully wrapping the mesh.
.. note::
The mesh will be voxelized in the range :math:`[-1, 1]` of the vertices coordinate system.
Args:
face_vertices (torch.LongTensor):
The vertices indexed by faces (see :func:`kaolin.ops.mesh.index_vertices_by_faces`),
of shape :math:`(\text{num_faces}, 3, 3)`.
level (int): number of levels in the returned SPC.
Returns:
(torch.ByteTensor, torch.LongTensor, torch.FloatTensor):
- The generated octree, of size :math:`(\text{num_nodes})`,
where :math:`\text{num_nodes}` depends on the geometry of the input mesh.
- The indices of the face corresponding to each voxel at the highest level,
of shape :math:`(\text{num_voxels})`.
- The barycentric coordinates of the voxel with respect to corresponding face
of shape :math:`(\text{num_vertices}, 2)`.
"""
if face_vertices.shape[-1] != 3:
raise NotImplementedError("unbatched_mesh_to_spc is only implemented for triangle meshes")
return _C.ops.conversions.mesh_to_spc_cuda(face_vertices.contiguous(), level) | r"""Convert a mesh into a :ref:`Structured Point Cloud octree<spc_octree>`. The conversion is using a conservative rasterization process, the resulting octree is fully wrapping the mesh. .. note:: The mesh will be voxelized in the range :math:`[-1, 1]` of the vertices coordinate system. Args: face_vertices (torch.LongTensor): The vertices indexed by faces (see :func:`kaolin.ops.mesh.index_vertices_by_faces`), of shape :math:`(\text{num_faces}, 3, 3)`. level (int): number of levels in the returned SPC. Returns: (torch.ByteTensor, torch.LongTensor, torch.FloatTensor): - The generated octree, of size :math:`(\text{num_nodes})`, where :math:`\text{num_nodes}` depends on the geometry of the input mesh. - The indices of the face corresponding to each voxel at the highest level, of shape :math:`(\text{num_voxels})`. - The barycentric coordinates of the voxel with respect to corresponding face of shape :math:`(\text{num_vertices}, 2)`. |
4,701 | import torch
def _unbatched_marching_tetrahedra(vertices, tets, sdf, return_tet_idx):
"""unbatched marching tetrahedra.
Refer to :func:`marching_tetrahedra`.
"""
device = vertices.device
with torch.no_grad():
occ_n = sdf > 0
occ_fx4 = occ_n[tets.reshape(-1)].reshape(-1, 4)
occ_sum = torch.sum(occ_fx4, -1)
valid_tets = (occ_sum > 0) & (occ_sum < 4)
occ_sum = occ_sum[valid_tets]
# find all vertices
all_edges = tets[valid_tets][:, base_tet_edges.to(device)].reshape(-1, 2)
all_edges = _sort_edges(all_edges)
unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)
unique_edges = unique_edges.long()
mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1
mapping = torch.ones((unique_edges.shape[0]), dtype=torch.long, device=device) * -1
mapping[mask_edges] = torch.arange(mask_edges.sum(), dtype=torch.long, device=device)
idx_map = mapping[idx_map]
interp_v = unique_edges[mask_edges]
edges_to_interp = vertices[interp_v.reshape(-1)].reshape(-1, 2, 3)
edges_to_interp_sdf = sdf[interp_v.reshape(-1)].reshape(-1, 2, 1)
edges_to_interp_sdf[:, -1] *= -1
denominator = edges_to_interp_sdf.sum(1, keepdim=True)
edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator
verts = (edges_to_interp * edges_to_interp_sdf).sum(1)
idx_map = idx_map.reshape(-1, 6)
tetindex = (occ_fx4[valid_tets] * v_id.to(device).unsqueeze(0)).sum(-1)
num_triangles = num_triangles_table.to(device)[tetindex]
triangle_table_device = triangle_table.to(device)
# Generate triangle indices
faces = torch.cat((
torch.gather(input=idx_map[num_triangles == 1], dim=1,
index=triangle_table_device[tetindex[num_triangles == 1]][:, :3]).reshape(-1, 3),
torch.gather(input=idx_map[num_triangles == 2], dim=1,
index=triangle_table_device[tetindex[num_triangles == 2]][:, :6]).reshape(-1, 3),
), dim=0)
if return_tet_idx:
tet_idx = torch.arange(tets.shape[0], device=device)[valid_tets]
tet_idx = torch.cat((tet_idx[num_triangles == 1], tet_idx[num_triangles ==
2].unsqueeze(-1).expand(-1, 2).reshape(-1)), dim=0)
return verts, faces, tet_idx
return verts, faces
The provided code snippet includes necessary dependencies for implementing the `marching_tetrahedra` function. Write a Python function `def marching_tetrahedra(vertices, tets, sdf, return_tet_idx=False)` to solve the following problem:
r"""Convert discrete signed distance fields encoded on tetrahedral grids to triangle meshes using marching tetrahedra algorithm as described in `An efficient method of triangulating equi-valued surfaces by using tetrahedral cells`_. The output surface is differentiable with respect to input vertex positions and the SDF values. For more details and example usage in learning, see `Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis`_ NeurIPS 2021. Args: vertices (torch.tensor): batched vertices of tetrahedral meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. tets (torch.tensor): unbatched tetrahedral mesh topology, of shape :math:`(\text{num_tetrahedrons}, 4)`. sdf (torch.tensor): batched SDFs which specify the SDF value of each vertex, of shape :math:`(\text{batch_size}, \text{num_vertices})`. return_tet_idx (optional, bool): if True, return index of tetrahedron where each face is extracted. Default: False. Returns: (list[torch.Tensor], list[torch.LongTensor], (optional) list[torch.LongTensor]): - the list of vertices for mesh converted from each tetrahedral grid. - the list of faces for mesh converted from each tetrahedral grid. - the list of indices that correspond to tetrahedra where faces are extracted. Example: >>> vertices = torch.tensor([[[0, 0, 0], ... [1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> tets = torch.tensor([[0, 1, 2, 3]], dtype=torch.long) >>> sdf = torch.tensor([[-1., -1., 0.5, 0.5]], dtype=torch.float) >>> verts_list, faces_list, tet_idx_list = marching_tetrahedra(vertices, tets, sdf, True) >>> verts_list[0] tensor([[0.0000, 0.6667, 0.0000], [0.0000, 0.0000, 0.6667], [0.3333, 0.6667, 0.0000], [0.3333, 0.0000, 0.6667]]) >>> faces_list[0] tensor([[3, 0, 1], [3, 2, 0]]) >>> tet_idx_list[0] tensor([0, 0]) .. _An efficient method of triangulating equi-valued surfaces by using tetrahedral cells: https://search.ieice.org/bin/summary.php?id=e74-d_1_214 .. _Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis: https://arxiv.org/abs/2111.04276
Here is the function:
def marching_tetrahedra(vertices, tets, sdf, return_tet_idx=False):
r"""Convert discrete signed distance fields encoded on tetrahedral grids to triangle
meshes using marching tetrahedra algorithm as described in `An efficient method of
triangulating equi-valued surfaces by using tetrahedral cells`_. The output surface is differentiable with respect to
input vertex positions and the SDF values. For more details and example usage in learning, see
`Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis`_ NeurIPS 2021.
Args:
vertices (torch.tensor): batched vertices of tetrahedral meshes, of shape
:math:`(\text{batch_size}, \text{num_vertices}, 3)`.
tets (torch.tensor): unbatched tetrahedral mesh topology, of shape
:math:`(\text{num_tetrahedrons}, 4)`.
sdf (torch.tensor): batched SDFs which specify the SDF value of each vertex, of shape
:math:`(\text{batch_size}, \text{num_vertices})`.
return_tet_idx (optional, bool): if True, return index of tetrahedron
where each face is extracted. Default: False.
Returns:
(list[torch.Tensor], list[torch.LongTensor], (optional) list[torch.LongTensor]):
- the list of vertices for mesh converted from each tetrahedral grid.
- the list of faces for mesh converted from each tetrahedral grid.
- the list of indices that correspond to tetrahedra where faces are extracted.
Example:
>>> vertices = torch.tensor([[[0, 0, 0],
... [1, 0, 0],
... [0, 1, 0],
... [0, 0, 1]]], dtype=torch.float)
>>> tets = torch.tensor([[0, 1, 2, 3]], dtype=torch.long)
>>> sdf = torch.tensor([[-1., -1., 0.5, 0.5]], dtype=torch.float)
>>> verts_list, faces_list, tet_idx_list = marching_tetrahedra(vertices, tets, sdf, True)
>>> verts_list[0]
tensor([[0.0000, 0.6667, 0.0000],
[0.0000, 0.0000, 0.6667],
[0.3333, 0.6667, 0.0000],
[0.3333, 0.0000, 0.6667]])
>>> faces_list[0]
tensor([[3, 0, 1],
[3, 2, 0]])
>>> tet_idx_list[0]
tensor([0, 0])
.. _An efficient method of triangulating equi-valued surfaces by using tetrahedral cells:
https://search.ieice.org/bin/summary.php?id=e74-d_1_214
.. _Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis:
https://arxiv.org/abs/2111.04276
"""
list_of_outputs = [_unbatched_marching_tetrahedra(vertices[b], tets, sdf[b], return_tet_idx) for b in range(vertices.shape[0])]
return list(zip(*list_of_outputs)) | r"""Convert discrete signed distance fields encoded on tetrahedral grids to triangle meshes using marching tetrahedra algorithm as described in `An efficient method of triangulating equi-valued surfaces by using tetrahedral cells`_. The output surface is differentiable with respect to input vertex positions and the SDF values. For more details and example usage in learning, see `Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis`_ NeurIPS 2021. Args: vertices (torch.tensor): batched vertices of tetrahedral meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. tets (torch.tensor): unbatched tetrahedral mesh topology, of shape :math:`(\text{num_tetrahedrons}, 4)`. sdf (torch.tensor): batched SDFs which specify the SDF value of each vertex, of shape :math:`(\text{batch_size}, \text{num_vertices})`. return_tet_idx (optional, bool): if True, return index of tetrahedron where each face is extracted. Default: False. Returns: (list[torch.Tensor], list[torch.LongTensor], (optional) list[torch.LongTensor]): - the list of vertices for mesh converted from each tetrahedral grid. - the list of faces for mesh converted from each tetrahedral grid. - the list of indices that correspond to tetrahedra where faces are extracted. Example: >>> vertices = torch.tensor([[[0, 0, 0], ... [1, 0, 0], ... [0, 1, 0], ... [0, 0, 1]]], dtype=torch.float) >>> tets = torch.tensor([[0, 1, 2, 3]], dtype=torch.long) >>> sdf = torch.tensor([[-1., -1., 0.5, 0.5]], dtype=torch.float) >>> verts_list, faces_list, tet_idx_list = marching_tetrahedra(vertices, tets, sdf, True) >>> verts_list[0] tensor([[0.0000, 0.6667, 0.0000], [0.0000, 0.0000, 0.6667], [0.3333, 0.6667, 0.0000], [0.3333, 0.0000, 0.6667]]) >>> faces_list[0] tensor([[3, 0, 1], [3, 2, 0]]) >>> tet_idx_list[0] tensor([0, 0]) .. _An efficient method of triangulating equi-valued surfaces by using tetrahedral cells: https://search.ieice.org/bin/summary.php?id=e74-d_1_214 .. _Deep Marching Tetrahedra\: a Hybrid Representation for High-Resolution 3D Shape Synthesis: https://arxiv.org/abs/2111.04276 |
4,702 | import torch
import numpy as np
from . import mise
The provided code snippet includes necessary dependencies for implementing the `sdf_to_voxelgrids` function. Write a Python function `def sdf_to_voxelgrids(sdf, bbox_center=0., bbox_dim=1., init_res=32, upsampling_steps=0)` to solve the following problem:
r"""Converts SDFs to voxelgrids. For each SDF returns a voxel grid with resolution :math:`init\_res * 2 ^ {upsampling\_steps} + 1` (so the underlying voxel resolution is :math:`init\_res * 2 ^ {upsampling\_steps}`) where each grid point holds a binary value determined by the sign of the SDF at the location of the grid point after normalizing the voxel grid to the bounding box defined by bbox_center and bbox_dim. This solution is largely borrowed from "Multiresolution IsoSurface Extraction (MISE)" proposed in the CVPR 2019 paper "Occupancy Networks: Learning 3D Reconstruction in Function Space": https://arxiv.org/abs/1906.02739. Instead of evaluating SDF values of all grid points at high resolution, this function incrementally builds an octree and only evaluate dense grid points around the surface. Args: sdf (list[callable]): A list of callable that takes 3D coordinates as a :class:`torch.Tensor`, of shape :math:`(\text{num_points}, 3)` and output the N corresponding SDF values as a :class:`torch.Tensor`, of shape :math:`(\text{num_points})`. bbox_center (optional, float): Center of the surface's bounding box. Default: 0. bbox_dim (optional, float): Largest dimension of the surface's bounding box. Default: 1. init_res (optional, int): The initial resolution of the voxelgrids, should be large enough to properly define the surface. Default: 32. upsampling_steps (optional, int): Number of times the initial resolution will be doubled. Default: 0. Returns: (torch.Tensor): Binary voxelgrids, of shape :math:`(\text{batch_size}, \text{init_res} * 2 ^ \text{upsampling_steps} + 1)`. Example: >>> def sphere(points): ... return torch.sum(points ** 2, 1) ** 0.5 - 0.5 >>> sdf_to_voxelgrids([sphere], init_res=4) tensor([[[[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 1., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]], <BLANKLINE> [[0., 0., 0., 0., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 0., 0., 0., 0.]], <BLANKLINE> [[0., 0., 1., 0., 0.], [0., 1., 1., 1., 0.], [1., 1., 1., 1., 1.], [0., 1., 1., 1., 0.], [0., 0., 1., 0., 0.]], <BLANKLINE> [[0., 0., 0., 0., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 0., 0., 0., 0.]], <BLANKLINE> [[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 1., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]]]])
Here is the function:
def sdf_to_voxelgrids(sdf, bbox_center=0., bbox_dim=1., init_res=32, upsampling_steps=0):
r"""Converts SDFs to voxelgrids.
For each SDF returns a voxel grid with resolution
:math:`init\_res * 2 ^ {upsampling\_steps} + 1`
(so the underlying voxel resolution is
:math:`init\_res * 2 ^ {upsampling\_steps}`)
where each grid point holds a binary value
determined by the sign of the SDF at the location
of the grid point after normalizing the voxel grid
to the bounding box defined by bbox_center and bbox_dim.
This solution is largely borrowed from "Multiresolution IsoSurface Extraction (MISE)"
proposed in the CVPR 2019 paper "Occupancy Networks: Learning 3D Reconstruction in Function Space":
https://arxiv.org/abs/1906.02739. Instead of evaluating SDF values of all grid points at high
resolution, this function incrementally builds an octree and only evaluate dense grid points
around the surface.
Args:
sdf (list[callable]):
A list of callable that takes 3D coordinates as a :class:`torch.Tensor`, of shape
:math:`(\text{num_points}, 3)` and output the N corresponding SDF values
as a :class:`torch.Tensor`, of shape :math:`(\text{num_points})`.
bbox_center (optional, float):
Center of the surface's bounding box. Default: 0.
bbox_dim (optional, float):
Largest dimension of the surface's bounding box. Default: 1.
init_res (optional, int):
The initial resolution of the voxelgrids, should be
large enough to properly define the surface. Default: 32.
upsampling_steps (optional, int):
Number of times the initial resolution will be doubled. Default: 0.
Returns:
(torch.Tensor):
Binary voxelgrids, of shape
:math:`(\text{batch_size}, \text{init_res} * 2 ^ \text{upsampling_steps} + 1)`.
Example:
>>> def sphere(points):
... return torch.sum(points ** 2, 1) ** 0.5 - 0.5
>>> sdf_to_voxelgrids([sphere], init_res=4)
tensor([[[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
<BLANKLINE>
[[0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.]],
<BLANKLINE>
[[0., 0., 1., 0., 0.],
[0., 1., 1., 1., 0.],
[1., 1., 1., 1., 1.],
[0., 1., 1., 1., 0.],
[0., 0., 1., 0., 0.]],
<BLANKLINE>
[[0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.]],
<BLANKLINE>
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]])
"""
if not isinstance(bbox_center, (int, float)):
raise TypeError(f"Expected bbox_center to be int or float "
f"but got {type(bbox_center)}.")
if not isinstance(bbox_dim, (int, float)):
raise TypeError(f"Expected bbox_dim to be int or float "
f"but got {type(bbox_dim)}.")
if not isinstance(init_res, int):
raise TypeError(f"Expected init_res to be int "
f"but got {type(init_res)}.")
if not isinstance(upsampling_steps, int):
raise TypeError(f"Expected upsampling_steps to be int "
f"but got {type(upsampling_steps)}.")
if not isinstance(sdf, list):
raise TypeError(f"Expected sdf to be list "
f"but got {type(sdf)}.")
voxels = []
for i_batch in range(len(sdf)):
if not callable(sdf[i_batch]):
raise TypeError(f"Expected sdf[{i_batch}] to be callable "
f"but got {type(sdf[i_batch])}.")
mesh_extractor = mise.MISE(
init_res, upsampling_steps, .5)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = torch.FloatTensor(points)
# Normalize to bounding box
pointsf = pointsf / (mesh_extractor.resolution)
pointsf = bbox_dim * (pointsf - 0.5 + bbox_center)
values = sdf[i_batch](pointsf) <= 0
values = values.data.cpu().numpy().astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
voxels.append(torch.FloatTensor(mesh_extractor.to_dense()))
return torch.stack(voxels) | r"""Converts SDFs to voxelgrids. For each SDF returns a voxel grid with resolution :math:`init\_res * 2 ^ {upsampling\_steps} + 1` (so the underlying voxel resolution is :math:`init\_res * 2 ^ {upsampling\_steps}`) where each grid point holds a binary value determined by the sign of the SDF at the location of the grid point after normalizing the voxel grid to the bounding box defined by bbox_center and bbox_dim. This solution is largely borrowed from "Multiresolution IsoSurface Extraction (MISE)" proposed in the CVPR 2019 paper "Occupancy Networks: Learning 3D Reconstruction in Function Space": https://arxiv.org/abs/1906.02739. Instead of evaluating SDF values of all grid points at high resolution, this function incrementally builds an octree and only evaluate dense grid points around the surface. Args: sdf (list[callable]): A list of callable that takes 3D coordinates as a :class:`torch.Tensor`, of shape :math:`(\text{num_points}, 3)` and output the N corresponding SDF values as a :class:`torch.Tensor`, of shape :math:`(\text{num_points})`. bbox_center (optional, float): Center of the surface's bounding box. Default: 0. bbox_dim (optional, float): Largest dimension of the surface's bounding box. Default: 1. init_res (optional, int): The initial resolution of the voxelgrids, should be large enough to properly define the surface. Default: 32. upsampling_steps (optional, int): Number of times the initial resolution will be doubled. Default: 0. Returns: (torch.Tensor): Binary voxelgrids, of shape :math:`(\text{batch_size}, \text{init_res} * 2 ^ \text{upsampling_steps} + 1)`. Example: >>> def sphere(points): ... return torch.sum(points ** 2, 1) ** 0.5 - 0.5 >>> sdf_to_voxelgrids([sphere], init_res=4) tensor([[[[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 1., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]], <BLANKLINE> [[0., 0., 0., 0., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 0., 0., 0., 0.]], <BLANKLINE> [[0., 0., 1., 0., 0.], [0., 1., 1., 1., 0.], [1., 1., 1., 1., 1.], [0., 1., 1., 1., 0.], [0., 0., 1., 0., 0.]], <BLANKLINE> [[0., 0., 0., 0., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 0., 0., 0., 0.]], <BLANKLINE> [[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 1., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]]]]) |
4,703 | import torch
import torch.nn.functional as F
from kaolin import _C
faces_3x4x3 = verts_template[faces_template]
quad_face = torch.LongTensor([[0, 1, 3, 2]])
kernels = torch.cat([kernel, kernel.transpose(
2, 3), kernel.transpose(2, 4)], 0)
The provided code snippet includes necessary dependencies for implementing the `voxelgrids_to_cubic_meshes` function. Write a Python function `def voxelgrids_to_cubic_meshes(voxelgrids, is_trimesh=True)` to solve the following problem:
r"""Convert voxelgrids to meshes by replacing each occupied voxel with a cuboid mesh (unit cube). Each cube has 8 vertices and 6 (for quadmesh) or 12 faces (for triangular mesh). Internal faces are ignored. If `is_trimesh==True`, this function performs the same operation as "Cubify" defined in the ICCV 2019 paper "Mesh R-CNN": https://arxiv.org/abs/1906.02739. Args: voxelgrids (torch.Tensor): binary voxel array, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. is_trimesh (optional, bool): if True, the outputs are triangular meshes. Otherwise quadmeshes are returned. Default: True. Returns: (list[torch.Tensor], list[torch.LongTensor]): - The list of vertices for each mesh. - The list of faces for each mesh. Example: >>> voxelgrids = torch.ones((1, 1, 1, 1)) >>> verts, faces = voxelgrids_to_cubic_meshes(voxelgrids) >>> verts[0] tensor([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.], [0., 1., 1.], [1., 0., 0.], [1., 0., 1.], [1., 1., 0.], [1., 1., 1.]]) >>> faces[0] tensor([[0, 1, 2], [5, 4, 7], [0, 4, 1], [6, 2, 7], [0, 2, 4], [3, 1, 7], [3, 2, 1], [6, 7, 4], [5, 1, 4], [3, 7, 2], [6, 4, 2], [5, 7, 1]])
Here is the function:
def voxelgrids_to_cubic_meshes(voxelgrids, is_trimesh=True):
r"""Convert voxelgrids to meshes by replacing each occupied voxel with a cuboid mesh (unit cube).
Each cube has 8 vertices and 6 (for quadmesh) or 12 faces
(for triangular mesh). Internal faces are ignored.
If `is_trimesh==True`, this function performs the same operation
as "Cubify" defined in the ICCV 2019 paper "Mesh R-CNN":
https://arxiv.org/abs/1906.02739.
Args:
voxelgrids (torch.Tensor): binary voxel array, of shape
:math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`.
is_trimesh (optional, bool): if True, the outputs are triangular meshes.
Otherwise quadmeshes are returned. Default: True.
Returns:
(list[torch.Tensor], list[torch.LongTensor]):
- The list of vertices for each mesh.
- The list of faces for each mesh.
Example:
>>> voxelgrids = torch.ones((1, 1, 1, 1))
>>> verts, faces = voxelgrids_to_cubic_meshes(voxelgrids)
>>> verts[0]
tensor([[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[0., 1., 1.],
[1., 0., 0.],
[1., 0., 1.],
[1., 1., 0.],
[1., 1., 1.]])
>>> faces[0]
tensor([[0, 1, 2],
[5, 4, 7],
[0, 4, 1],
[6, 2, 7],
[0, 2, 4],
[3, 1, 7],
[3, 2, 1],
[6, 7, 4],
[5, 1, 4],
[3, 7, 2],
[6, 4, 2],
[5, 7, 1]])
"""
device = voxelgrids.device
voxelgrids = voxelgrids.unsqueeze(1)
batch_size = voxelgrids.shape[0]
face = quad_face.to(device)
if device == 'cpu':
k = kernels.to(device).half()
voxelgrids = voxelgrids.half()
else:
k = kernels.to(device).float()
voxelgrids = voxelgrids.float()
conv_results = torch.nn.functional.conv3d(
voxelgrids, k, padding=1).round() # (B, 3, r, r, r)
indices = torch.nonzero(conv_results.transpose(
0, 1), as_tuple=True) # (N, 5)
dim, batch, loc = indices[0], indices[1], torch.stack(
indices[2:], -1) # (N,) , (N, ), (N, 3)
invert = conv_results.transpose(0, 1)[indices] == -1
_, counts = torch.unique(dim, sorted=True, return_counts=True)
faces_loc = (torch.repeat_interleave(faces_3x4x3.to(device), counts, dim=0) +
loc.unsqueeze(1).float()) # (N, 4, 3)
faces_batch = []
verts_batch = []
for b in range(batch_size):
verts = faces_loc[torch.nonzero(batch == b)].view(-1, 3)
if verts.shape[0] == 0:
faces_batch.append(torch.zeros((0, 3 if is_trimesh else 4), device=device, dtype=torch.long))
verts_batch.append(torch.zeros((0, 3), device=device))
continue
invert_batch = torch.repeat_interleave(
invert[batch == b], face.shape[0], dim=0)
N = verts.shape[0] // 4
shift = torch.arange(N, device=device).unsqueeze(1) * 4 # (N,1)
faces = (face.unsqueeze(0) + shift.unsqueeze(1)
).view(-1, face.shape[-1]) # (N, 4) or (2N, 3)
faces[invert_batch] = torch.flip(faces[invert_batch], [-1])
if is_trimesh:
faces = torch.cat(
[faces[:, [0, 3, 1]], faces[:, [2, 1, 3]]], dim=0)
verts, v = torch.unique(
verts, return_inverse=True, dim=0)
faces = v[faces.reshape(-1)].reshape((-1, 3 if is_trimesh else 4))
faces_batch.append(faces)
verts_batch.append(verts)
return verts_batch, faces_batch | r"""Convert voxelgrids to meshes by replacing each occupied voxel with a cuboid mesh (unit cube). Each cube has 8 vertices and 6 (for quadmesh) or 12 faces (for triangular mesh). Internal faces are ignored. If `is_trimesh==True`, this function performs the same operation as "Cubify" defined in the ICCV 2019 paper "Mesh R-CNN": https://arxiv.org/abs/1906.02739. Args: voxelgrids (torch.Tensor): binary voxel array, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. is_trimesh (optional, bool): if True, the outputs are triangular meshes. Otherwise quadmeshes are returned. Default: True. Returns: (list[torch.Tensor], list[torch.LongTensor]): - The list of vertices for each mesh. - The list of faces for each mesh. Example: >>> voxelgrids = torch.ones((1, 1, 1, 1)) >>> verts, faces = voxelgrids_to_cubic_meshes(voxelgrids) >>> verts[0] tensor([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.], [0., 1., 1.], [1., 0., 0.], [1., 0., 1.], [1., 1., 0.], [1., 1., 1.]]) >>> faces[0] tensor([[0, 1, 2], [5, 4, 7], [0, 4, 1], [6, 2, 7], [0, 2, 4], [3, 1, 7], [3, 2, 1], [6, 7, 4], [5, 1, 4], [3, 7, 2], [6, 4, 2], [5, 7, 1]]) |
4,704 | import torch
import torch.nn.functional as F
from kaolin import _C
for i in range(3):
faces_3x4x3[i, :, (i - 1) % 3] -= 1
faces_3x4x3[i, :, (i + 1) % 3] -= 1
class MarchingCubesLorensenCuda(torch.autograd.Function):
def forward(ctx, voxelgrid, iso_value):
vertices, faces = _C.ops.conversions.unbatched_mcube_forward_cuda(voxelgrid, iso_value)
return vertices, faces
def backward(ctx, gradoutput):
# TODO: do a custom backward pass.
return None, None
The provided code snippet includes necessary dependencies for implementing the `voxelgrids_to_trianglemeshes` function. Write a Python function `def voxelgrids_to_trianglemeshes(voxelgrids, iso_value=0.5)` to solve the following problem:
r"""Converts voxelgrids to triangle meshes using marching cube algorithm. Please refer to: *Lorensen, William E.; Cline, Harvey E.* in `Marching cubes, A high resolution 3D surface construction algorithm`_ Args: voxelgrids (torch.Tensor): Exact batched voxel array with shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. iso_value (optional, float): Value in the range :math:`[0, 1]` used to determine whether a voxel is inside the surface or not. Isovalue is also used to interpolate newly created triangle vertices. Defaults to 0.5 Returns: (list[torch.FloatTensor], list[torch.LongTensor]): - The list of vertices of each mesh. - The list of faces of each mesh. Example: >>> voxelgrid = torch.tensor([[[[1, 0], ... [0, 0]], ... [[0, 0], ... [0, 0]]]], device='cuda', dtype=torch.uint8) >>> vertices, faces = voxelgrids_to_trianglemeshes(voxelgrid) >>> vertices[0] tensor([[1.0000, 1.0000, 0.5000], [1.0000, 0.5000, 1.0000], [0.5000, 1.0000, 1.0000], [1.0000, 1.0000, 1.5000], [1.0000, 1.5000, 1.0000], [1.5000, 1.0000, 1.0000]], device='cuda:0') >>> faces[0] tensor([[0, 1, 2], [3, 2, 1], [4, 0, 2], [4, 2, 3], [0, 5, 1], [5, 3, 1], [4, 5, 0], [5, 4, 3]], device='cuda:0') .. _Marching cubes, A high resolution 3D surface construction algorithm: https://www.researchgate.net/publication/202232897_Marching_Cubes_A_High_Resolution_3D_Surface_Construction_Algorithm
Here is the function:
def voxelgrids_to_trianglemeshes(voxelgrids, iso_value=0.5):
r"""Converts voxelgrids to triangle meshes using marching cube algorithm.
Please refer to: *Lorensen, William E.; Cline, Harvey E.* in
`Marching cubes, A high resolution 3D surface construction algorithm`_
Args:
voxelgrids (torch.Tensor):
Exact batched voxel array with shape
:math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`.
iso_value (optional, float):
Value in the range :math:`[0, 1]` used to determine whether a voxel is inside the
surface or not. Isovalue is also used to interpolate
newly created triangle vertices. Defaults to 0.5
Returns:
(list[torch.FloatTensor], list[torch.LongTensor]):
- The list of vertices of each mesh.
- The list of faces of each mesh.
Example:
>>> voxelgrid = torch.tensor([[[[1, 0],
... [0, 0]],
... [[0, 0],
... [0, 0]]]], device='cuda', dtype=torch.uint8)
>>> vertices, faces = voxelgrids_to_trianglemeshes(voxelgrid)
>>> vertices[0]
tensor([[1.0000, 1.0000, 0.5000],
[1.0000, 0.5000, 1.0000],
[0.5000, 1.0000, 1.0000],
[1.0000, 1.0000, 1.5000],
[1.0000, 1.5000, 1.0000],
[1.5000, 1.0000, 1.0000]], device='cuda:0')
>>> faces[0]
tensor([[0, 1, 2],
[3, 2, 1],
[4, 0, 2],
[4, 2, 3],
[0, 5, 1],
[5, 3, 1],
[4, 5, 0],
[5, 4, 3]], device='cuda:0')
.. _Marching cubes, A high resolution 3D surface construction algorithm:
https://www.researchgate.net/publication/202232897_Marching_Cubes_A_High_Resolution_3D_Surface_Construction_Algorithm
"""
# TODO: There is a bug in pytorch 1.7 and cuda 11.0, which for certain cuda operations, the value won't be written
# to the tensor. However, this but does not exist in pytorch1.6 and cuda 10.0. Need to look into it in the future.
voxelgrid_type = voxelgrids.dtype
voxelgrid_device = voxelgrids.device
batch_size = voxelgrids.shape[0]
if not voxelgrids.is_cuda:
raise NotImplementedError("voxelgrids_to_trianglemeshes does not support CPU.")
# TODO: support half and double.
voxelgrids = voxelgrids.float()
# Pad the voxelgrid with 0 in all three dimensions
voxelgrids = F.pad(voxelgrids, (1, 1, 1, 1, 1, 1), 'constant', 0)
vertices_list = []
faces_list = []
for i in range(batch_size):
curr_voxelgrid = voxelgrids[i]
if torch.all(curr_voxelgrid == 0): # Don't bother if the voxelgrid is all zeros
vertices_list.append(torch.zeros((0, 3), dtype=torch.float, device=voxelgrid_device))
faces_list.append(torch.zeros((0, 3), dtype=torch.long, device=voxelgrid_device))
continue
vertices, faces = MarchingCubesLorensenCuda.apply(curr_voxelgrid, iso_value)
faces = faces.long()
vertices_list.append(vertices)
faces_list.append(faces)
return vertices_list, faces_list | r"""Converts voxelgrids to triangle meshes using marching cube algorithm. Please refer to: *Lorensen, William E.; Cline, Harvey E.* in `Marching cubes, A high resolution 3D surface construction algorithm`_ Args: voxelgrids (torch.Tensor): Exact batched voxel array with shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. iso_value (optional, float): Value in the range :math:`[0, 1]` used to determine whether a voxel is inside the surface or not. Isovalue is also used to interpolate newly created triangle vertices. Defaults to 0.5 Returns: (list[torch.FloatTensor], list[torch.LongTensor]): - The list of vertices of each mesh. - The list of faces of each mesh. Example: >>> voxelgrid = torch.tensor([[[[1, 0], ... [0, 0]], ... [[0, 0], ... [0, 0]]]], device='cuda', dtype=torch.uint8) >>> vertices, faces = voxelgrids_to_trianglemeshes(voxelgrid) >>> vertices[0] tensor([[1.0000, 1.0000, 0.5000], [1.0000, 0.5000, 1.0000], [0.5000, 1.0000, 1.0000], [1.0000, 1.0000, 1.5000], [1.0000, 1.5000, 1.0000], [1.5000, 1.0000, 1.0000]], device='cuda:0') >>> faces[0] tensor([[0, 1, 2], [3, 2, 1], [4, 0, 2], [4, 2, 3], [0, 5, 1], [5, 3, 1], [4, 5, 0], [5, 4, 3]], device='cuda:0') .. _Marching cubes, A high resolution 3D surface construction algorithm: https://www.researchgate.net/publication/202232897_Marching_Cubes_A_High_Resolution_3D_Surface_Construction_Algorithm |
4,705 | import torch
from kaolin import _C
class _PackedSimpleSumCuda(torch.autograd.Function):
"""torch.autograd.function wrapper for :func:`tile_to_packed` CUDA implementations"""
def forward(ctx, inputs, numel_per_tensor):
inputs = inputs.contiguous()
numel_per_tensor = numel_per_tensor.contiguous()
output = _C.ops.packed_simple_sum_cuda(inputs, numel_per_tensor)
if inputs.dtype == torch.half:
output = output.to(torch.half)
ctx.save_for_backward(numel_per_tensor)
ctx.inputs_shape = inputs.shape
ctx.inputs_dtype = inputs.dtype
return output
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
numel_per_tensor, = ctx.saved_tensors
grad_inputs = torch.empty(ctx.inputs_shape, dtype=ctx.inputs_dtype, device=grad_output.device)
_C.ops.tile_to_packed_out_cuda(grad_output, numel_per_tensor, grad_inputs)
return grad_inputs, None
The provided code snippet includes necessary dependencies for implementing the `packed_simple_sum` function. Write a Python function `def packed_simple_sum(tensor, numel_per_tensor)` to solve the following problem:
Sum of each subtensor in a packed tensor with last_dim=1. Args: tensor (torch.Tensor): The input :ref:`packed_tensor<packed>` numel_per_tensor (torch.LongTensor): Tensor containing the number of element per sub-tensor. Returns: (torch.Tensor): A 1D tensor of size ``tensor.shape[0]``, containing the sum of each sub-tensor in the input tensor.
Here is the function:
def packed_simple_sum(tensor, numel_per_tensor):
"""Sum of each subtensor in a packed tensor with last_dim=1.
Args:
tensor (torch.Tensor): The input :ref:`packed_tensor<packed>`
numel_per_tensor (torch.LongTensor):
Tensor containing the number of element per sub-tensor.
Returns:
(torch.Tensor):
A 1D tensor of size ``tensor.shape[0]``,
containing the sum of each sub-tensor in the input tensor.
"""
assert tensor.shape[-1] == 1
if torch.cuda.is_available() and tensor.is_cuda and not numel_per_tensor.is_cuda:
output = _PackedSimpleSumCuda.apply(tensor, numel_per_tensor)
else:
output = []
last_id = 0
for i, numel in enumerate(numel_per_tensor):
first_id = last_id
last_id += int(numel)
output.append(torch.sum(tensor[first_id:last_id]))
output = torch.stack(output, dim=0)
return output | Sum of each subtensor in a packed tensor with last_dim=1. Args: tensor (torch.Tensor): The input :ref:`packed_tensor<packed>` numel_per_tensor (torch.LongTensor): Tensor containing the number of element per sub-tensor. Returns: (torch.Tensor): A 1D tensor of size ``tensor.shape[0]``, containing the sum of each sub-tensor in the input tensor. |
4,706 | import torch
import torch.nn.functional as F
from scipy import ndimage
def _force_float(input_tensor):
r""" Cast the tensor to the smallest floating point dtype if it's a torch.BoolTensor.
If it's a torch.BoolTensor on cpu then cast to torch.float,
If it's a torch.cuda.BoolTensor then cast to torch.half,
otherwise don't cast.
Args:
input_tensor (torch.Tensor)
Returns:
torch.Tensor: The cast tensor of either type torch.half or torch.float if input
is of type torch.bool, depending on the device. Else, voxelgrids
type is unchanged.
"""
input_dtype = input_tensor.dtype
if input_dtype == torch.bool:
output_dtype = torch.half if input_tensor.is_cuda else torch.float
input_tensor = input_tensor.type(output_dtype)
return input_tensor
The provided code snippet includes necessary dependencies for implementing the `downsample` function. Write a Python function `def downsample(voxelgrids, scale)` to solve the following problem:
r"""Downsamples a voxelgrids, given a (down)scaling factor for each dimension. .. Note:: The voxelgrids output is not thresholded. Args: voxelgrids (torch.Tensor): voxelgrids to be downsampled, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. scale (list or tuple or int): List or tuple of int of length 3 to scale each dimension down. or an int to scale down for every dimension. Returns: (torch.Tensor): Downsampled voxelgrids. Example: >>> voxelgrids2 = torch.zeros((1, 4, 4, 4)) >>> voxelgrids2[0, 0] = 1 >>> voxelgrids2[0, 1] = 0.4 >>> voxelgrids2[0, 3] = 0.8 >>> downsample(voxelgrids2, 2) tensor([[[[0.7000, 0.7000], [0.7000, 0.7000]], <BLANKLINE> [[0.4000, 0.4000], [0.4000, 0.4000]]]])
Here is the function:
def downsample(voxelgrids, scale):
r"""Downsamples a voxelgrids, given a (down)scaling factor for each
dimension.
.. Note::
The voxelgrids output is not thresholded.
Args:
voxelgrids (torch.Tensor): voxelgrids to be downsampled, of shape
:math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`.
scale (list or tuple or int): List or tuple of int of length 3 to scale each dimension down.
or an int to scale down for every dimension.
Returns:
(torch.Tensor): Downsampled voxelgrids.
Example:
>>> voxelgrids2 = torch.zeros((1, 4, 4, 4))
>>> voxelgrids2[0, 0] = 1
>>> voxelgrids2[0, 1] = 0.4
>>> voxelgrids2[0, 3] = 0.8
>>> downsample(voxelgrids2, 2)
tensor([[[[0.7000, 0.7000],
[0.7000, 0.7000]],
<BLANKLINE>
[[0.4000, 0.4000],
[0.4000, 0.4000]]]])
"""
voxelgrids = _force_float(voxelgrids)
try:
output = F.avg_pool3d(voxelgrids.unsqueeze(1), kernel_size=scale,
stride=scale, padding=0)
except RuntimeError as err:
if isinstance(scale, list) and len(scale) != 3:
scale_length = len(scale)
raise ValueError(f"Expected scale to have 3 dimensions "
f"but got {scale_length} dimensions.")
if voxelgrids.ndim != 4:
voxelgrids_dim = voxelgrids.ndim
raise ValueError(f"Expected voxelgrids to have 4 dimensions "
f"but got {voxelgrids_dim} dimensions.")
for i in range(3):
if scale[i] < 1:
scale_value = scale[i]
raise ValueError(f"Downsample ratio must be at least 1 "
f"along every dimension but got {scale_value} at "
f"index {i}.")
if scale[i] > voxelgrids.shape[i + 1]:
voxelgrids_shape_val = voxelgrids.shape[i + 1]
scale_val = scale[i]
raise ValueError(f"Downsample ratio must be less than voxelgrids "
f"shape of {voxelgrids_shape_val} at index {i}, but got {scale_val}.")
raise err # unknown error
except TypeError as err:
if not isinstance(scale, list) and not isinstance(scale, int):
scale_type = type(scale)
raise TypeError(f"Expected scale to be type list or int "
f"but got {scale_type}.")
raise err # unknown error
return output.squeeze(1) | r"""Downsamples a voxelgrids, given a (down)scaling factor for each dimension. .. Note:: The voxelgrids output is not thresholded. Args: voxelgrids (torch.Tensor): voxelgrids to be downsampled, of shape :math:`(\text{batch_size}, \text{X}, \text{Y}, \text{Z})`. scale (list or tuple or int): List or tuple of int of length 3 to scale each dimension down. or an int to scale down for every dimension. Returns: (torch.Tensor): Downsampled voxelgrids. Example: >>> voxelgrids2 = torch.zeros((1, 4, 4, 4)) >>> voxelgrids2[0, 0] = 1 >>> voxelgrids2[0, 1] = 0.4 >>> voxelgrids2[0, 3] = 0.8 >>> downsample(voxelgrids2, 2) tensor([[[[0.7000, 0.7000], [0.7000, 0.7000]], <BLANKLINE> [[0.4000, 0.4000], [0.4000, 0.4000]]]]) |
4,707 | import torch
import torch.nn.functional as F
from scipy import ndimage
def _force_float(input_tensor):
r""" Cast the tensor to the smallest floating point dtype if it's a torch.BoolTensor.
If it's a torch.BoolTensor on cpu then cast to torch.float,
If it's a torch.cuda.BoolTensor then cast to torch.half,
otherwise don't cast.
Args:
input_tensor (torch.Tensor)
Returns:
torch.Tensor: The cast tensor of either type torch.half or torch.float if input
is of type torch.bool, depending on the device. Else, voxelgrids
type is unchanged.
"""
input_dtype = input_tensor.dtype
if input_dtype == torch.bool:
output_dtype = torch.half if input_tensor.is_cuda else torch.float
input_tensor = input_tensor.type(output_dtype)
return input_tensor
The provided code snippet includes necessary dependencies for implementing the `extract_surface` function. Write a Python function `def extract_surface(voxelgrids, mode="wide")` to solve the following problem:
r"""Removes any internal structure(s) from a voxelgrids. Args: voxelgrids (torch.Tensor): Binary voxelgrids of shape (N, X, Y ,Z) from which to extract surface mode (str): Either "wide" or "thin". Each voxel can be seen as a cube in a grid. "wide" mode keeps each filled voxel with at least one vertex in contact with an empty voxel. "thin" mode keeps each filled voxel with at least one face in contact with an empty voxel. Returns: torch.BoolTensor: binary surface voxelgrids tensor Example: >>> voxelgrids = torch.ones((1, 3, 3, 3)) >>> output = extract_surface(voxelgrids) >>> output[0] tensor([[[ True, True, True], [ True, True, True], [ True, True, True]], <BLANKLINE> [[ True, True, True], [ True, False, True], [ True, True, True]], <BLANKLINE> [[ True, True, True], [ True, True, True], [ True, True, True]]])
Here is the function:
def extract_surface(voxelgrids, mode="wide"):
r"""Removes any internal structure(s) from a voxelgrids.
Args:
voxelgrids (torch.Tensor): Binary voxelgrids of shape (N, X, Y ,Z)
from which to extract surface
mode (str): Either "wide" or "thin". Each voxel can be seen as a cube in a grid.
"wide" mode keeps each filled voxel with at least one vertex in contact
with an empty voxel. "thin" mode keeps each filled voxel with at least
one face in contact with an empty voxel.
Returns:
torch.BoolTensor: binary surface voxelgrids tensor
Example:
>>> voxelgrids = torch.ones((1, 3, 3, 3))
>>> output = extract_surface(voxelgrids)
>>> output[0]
tensor([[[ True, True, True],
[ True, True, True],
[ True, True, True]],
<BLANKLINE>
[[ True, True, True],
[ True, False, True],
[ True, True, True]],
<BLANKLINE>
[[ True, True, True],
[ True, True, True],
[ True, True, True]]])
"""
voxelgrids = _force_float(voxelgrids)
if voxelgrids.ndim != 4:
voxelgrids_dim = voxelgrids.ndim
raise ValueError(f"Expected voxelgrids to have 4 dimensions "
f"but got {voxelgrids_dim} dimensions.")
if mode == "wide":
output = F.avg_pool3d(voxelgrids.unsqueeze(1), kernel_size=(3, 3, 3), padding=1, stride=1).squeeze(1)
output = (output < 1) * voxelgrids.bool()
elif mode == "thin":
output_x = F.avg_pool3d(voxelgrids.unsqueeze(1), kernel_size=(3, 1, 1), padding=(1, 0, 0), stride=1).squeeze(1)
output_y = F.avg_pool3d(voxelgrids.unsqueeze(1), kernel_size=(1, 3, 1), padding=(0, 1, 0), stride=1).squeeze(1)
output_z = F.avg_pool3d(voxelgrids.unsqueeze(1), kernel_size=(1, 1, 3), padding=(0, 0, 1), stride=1).squeeze(1)
output = ((output_x < 1) | (output_y < 1) | (output_z < 1)) * voxelgrids.bool()
else:
raise ValueError(f'mode "{mode}" is not supported.')
return output | r"""Removes any internal structure(s) from a voxelgrids. Args: voxelgrids (torch.Tensor): Binary voxelgrids of shape (N, X, Y ,Z) from which to extract surface mode (str): Either "wide" or "thin". Each voxel can be seen as a cube in a grid. "wide" mode keeps each filled voxel with at least one vertex in contact with an empty voxel. "thin" mode keeps each filled voxel with at least one face in contact with an empty voxel. Returns: torch.BoolTensor: binary surface voxelgrids tensor Example: >>> voxelgrids = torch.ones((1, 3, 3, 3)) >>> output = extract_surface(voxelgrids) >>> output[0] tensor([[[ True, True, True], [ True, True, True], [ True, True, True]], <BLANKLINE> [[ True, True, True], [ True, False, True], [ True, True, True]], <BLANKLINE> [[ True, True, True], [ True, True, True], [ True, True, True]]]) |
4,708 | import torch
import torch.nn.functional as F
from scipy import ndimage
The provided code snippet includes necessary dependencies for implementing the `fill` function. Write a Python function `def fill(voxelgrids)` to solve the following problem:
r""" Fills the internal structures in a voxelgrids grid. Used to fill holes and 'solidify' objects. .. Note:: This function is not differentiable. Args: voxelgrids (torch.Tensor): binary voxelgrids of size (N, X, Y, Z) to be filled. Returns: torch.BoolTensor: filled, binary voxelgrids array Example: >>> voxelgrids = torch.Tensor( ... [[[[0., 0., 0., 0., 0.], ... [0., 1., 1., 1., 1.], ... [0., 1., 1., 1., 1.], ... [0., 1., 1., 1., 1.]], ... [[0., 0., 0., 0., 0.], ... [0., 1., 1., 1., 1.], ... [0., 1., 0., 0., 1.], ... [0., 1., 1., 1., 1.]], ... [[0., 0., 0., 0., 0.], ... [0., 1., 1., 1., 1.], ... [0., 1., 1., 1., 1.], ... [0., 1., 1., 1., 1.]]]]) >>> fill(voxelgrids) tensor([[[[False, False, False, False, False], [False, True, True, True, True], [False, True, True, True, True], [False, True, True, True, True]], <BLANKLINE> [[False, False, False, False, False], [False, True, True, True, True], [False, True, True, True, True], [False, True, True, True, True]], <BLANKLINE> [[False, False, False, False, False], [False, True, True, True, True], [False, True, True, True, True], [False, True, True, True, True]]]])
Here is the function:
def fill(voxelgrids):
r""" Fills the internal structures in a voxelgrids grid. Used to fill holes
and 'solidify' objects.
.. Note::
This function is not differentiable.
Args:
voxelgrids (torch.Tensor): binary voxelgrids of size (N, X, Y, Z) to be filled.
Returns:
torch.BoolTensor: filled, binary voxelgrids array
Example:
>>> voxelgrids = torch.Tensor(
... [[[[0., 0., 0., 0., 0.],
... [0., 1., 1., 1., 1.],
... [0., 1., 1., 1., 1.],
... [0., 1., 1., 1., 1.]],
... [[0., 0., 0., 0., 0.],
... [0., 1., 1., 1., 1.],
... [0., 1., 0., 0., 1.],
... [0., 1., 1., 1., 1.]],
... [[0., 0., 0., 0., 0.],
... [0., 1., 1., 1., 1.],
... [0., 1., 1., 1., 1.],
... [0., 1., 1., 1., 1.]]]])
>>> fill(voxelgrids)
tensor([[[[False, False, False, False, False],
[False, True, True, True, True],
[False, True, True, True, True],
[False, True, True, True, True]],
<BLANKLINE>
[[False, False, False, False, False],
[False, True, True, True, True],
[False, True, True, True, True],
[False, True, True, True, True]],
<BLANKLINE>
[[False, False, False, False, False],
[False, True, True, True, True],
[False, True, True, True, True],
[False, True, True, True, True]]]])
"""
if voxelgrids.ndim != 4:
voxelgrids_dim = voxelgrids.ndim
raise ValueError(f"Expected voxelgrids to have 4 dimensions "
f"but got {voxelgrids_dim} dimensions.")
dtype = voxelgrids.dtype
device = voxelgrids.device
if voxelgrids.is_cuda:
raise NotImplementedError("Fill function is not supported on GPU yet.")
voxelgrids = voxelgrids.data.cpu()
output = []
for i in range(voxelgrids.shape[0]):
on = ndimage.binary_fill_holes(voxelgrids[i])
output.append(on)
output = torch.tensor(output, dtype=torch.bool, device=device)
return output | r""" Fills the internal structures in a voxelgrids grid. Used to fill holes and 'solidify' objects. .. Note:: This function is not differentiable. Args: voxelgrids (torch.Tensor): binary voxelgrids of size (N, X, Y, Z) to be filled. Returns: torch.BoolTensor: filled, binary voxelgrids array Example: >>> voxelgrids = torch.Tensor( ... [[[[0., 0., 0., 0., 0.], ... [0., 1., 1., 1., 1.], ... [0., 1., 1., 1., 1.], ... [0., 1., 1., 1., 1.]], ... [[0., 0., 0., 0., 0.], ... [0., 1., 1., 1., 1.], ... [0., 1., 0., 0., 1.], ... [0., 1., 1., 1., 1.]], ... [[0., 0., 0., 0., 0.], ... [0., 1., 1., 1., 1.], ... [0., 1., 1., 1., 1.], ... [0., 1., 1., 1., 1.]]]]) >>> fill(voxelgrids) tensor([[[[False, False, False, False, False], [False, True, True, True, True], [False, True, True, True, True], [False, True, True, True, True]], <BLANKLINE> [[False, False, False, False, False], [False, True, True, True, True], [False, True, True, True, True], [False, True, True, True, True]], <BLANKLINE> [[False, False, False, False, False], [False, True, True, True, True], [False, True, True, True, True], [False, True, True, True, True]]]]) |
4,709 | import torch
import torch.nn.functional as F
from scipy import ndimage
The provided code snippet includes necessary dependencies for implementing the `extract_odms` function. Write a Python function `def extract_odms(voxelgrids)` to solve the following problem:
r"""Extracts orthographic depth maps from voxelgrids. Args: voxelgrids (torch.Tensor): Binary voxelgrids of shape (N, dim, dim, dim) from which odms are extracted. Returns: (torch.LongTensor): Batched ODMs of shape (N, 6, dim, dim) from the 6 primary viewing angles. The face order is z_neg, z_pos, y_neg, y_pos, x_neg, x_pos, denoting the axis and direction we are looking at. Example: >>> voxelgrids = torch.ones((2, 2, 2, 2)) >>> voxelgrids[0, :, 0, :] = 0 # Set the front face to be zeros >>> output = extract_odms(voxelgrids) >>> output tensor([[[[2, 0], [2, 0]], <BLANKLINE> [[2, 0], [2, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[1, 1], [1, 1]], <BLANKLINE> [[2, 2], [0, 0]], <BLANKLINE> [[2, 2], [0, 0]]], <BLANKLINE> <BLANKLINE> [[[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]]]])
Here is the function:
def extract_odms(voxelgrids):
r"""Extracts orthographic depth maps from voxelgrids.
Args:
voxelgrids (torch.Tensor): Binary voxelgrids of shape (N, dim, dim, dim) from
which odms are extracted.
Returns:
(torch.LongTensor): Batched ODMs of shape (N, 6, dim, dim) from the 6 primary viewing angles.
The face order is z_neg, z_pos, y_neg, y_pos, x_neg, x_pos, denoting the axis and direction
we are looking at.
Example:
>>> voxelgrids = torch.ones((2, 2, 2, 2))
>>> voxelgrids[0, :, 0, :] = 0 # Set the front face to be zeros
>>> output = extract_odms(voxelgrids)
>>> output
tensor([[[[2, 0],
[2, 0]],
<BLANKLINE>
[[2, 0],
[2, 0]],
<BLANKLINE>
[[0, 0],
[0, 0]],
<BLANKLINE>
[[1, 1],
[1, 1]],
<BLANKLINE>
[[2, 2],
[0, 0]],
<BLANKLINE>
[[2, 2],
[0, 0]]],
<BLANKLINE>
<BLANKLINE>
[[[0, 0],
[0, 0]],
<BLANKLINE>
[[0, 0],
[0, 0]],
<BLANKLINE>
[[0, 0],
[0, 0]],
<BLANKLINE>
[[0, 0],
[0, 0]],
<BLANKLINE>
[[0, 0],
[0, 0]],
<BLANKLINE>
[[0, 0],
[0, 0]]]])
"""
# Cast input to torch.bool to make it run faster.
voxelgrids = voxelgrids.bool()
device = voxelgrids.device
dtype = voxelgrids.dtype
dim = voxelgrids.shape[-1]
batch_num = voxelgrids.shape[0]
multiplier = torch.arange(1, dim + 1, device=device)
reverse_multiplier = torch.arange(dim, 0, step=-1, device=device)
full_multiplier = torch.cat([multiplier, reverse_multiplier], dim=0)
# z_axis
z_axis = voxelgrids.unsqueeze(1) * full_multiplier.view(1, 2, 1, 1, -1)
z_axis_values, _ = torch.max(z_axis, dim=4)
# y_axis
y_axis = voxelgrids.unsqueeze(1) * full_multiplier.view(1, 2, 1, -1, 1)
y_axis_values, _ = torch.max(y_axis, dim=3)
# x_axis
x_axis = voxelgrids.unsqueeze(1) * full_multiplier.view(1, 2, -1, 1, 1)
x_axis_values, _ = torch.max(x_axis, dim=2)
return dim - torch.cat([z_axis_values, y_axis_values, x_axis_values], dim=1) | r"""Extracts orthographic depth maps from voxelgrids. Args: voxelgrids (torch.Tensor): Binary voxelgrids of shape (N, dim, dim, dim) from which odms are extracted. Returns: (torch.LongTensor): Batched ODMs of shape (N, 6, dim, dim) from the 6 primary viewing angles. The face order is z_neg, z_pos, y_neg, y_pos, x_neg, x_pos, denoting the axis and direction we are looking at. Example: >>> voxelgrids = torch.ones((2, 2, 2, 2)) >>> voxelgrids[0, :, 0, :] = 0 # Set the front face to be zeros >>> output = extract_odms(voxelgrids) >>> output tensor([[[[2, 0], [2, 0]], <BLANKLINE> [[2, 0], [2, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[1, 1], [1, 1]], <BLANKLINE> [[2, 2], [0, 0]], <BLANKLINE> [[2, 2], [0, 0]]], <BLANKLINE> <BLANKLINE> [[[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]], <BLANKLINE> [[0, 0], [0, 0]]]]) |
4,710 | import torch
import torch.nn.functional as F
from scipy import ndimage
The provided code snippet includes necessary dependencies for implementing the `project_odms` function. Write a Python function `def project_odms(odms, voxelgrids=None, votes=1)` to solve the following problem:
r"""Projects orthographic depth map onto voxelgrids. .. Note:: If no voxelgrids is provided, we project onto a completely filled grids. Args: odms (torch.Tensor): Batched ODMs of shape (N, 6, dim, dim) from the 6 primary viewing angles. The face order is z_neg, z_pos, y_neg, y_pos, x_neg, x_pos, denoting the axis and direction we are looking at. voxelgrids (torch.Tensor): Binary voxelgrids onto which ODMs are projected. votes (int): int from range(0, 7). Votes needed to substract a voxel to 0. Returns: (torch.BoolTensor): Updated binary voxel grid. Example: >>> odms = torch.zeros((1, 6, 2, 2)) # empty odms >>> odms[0, 1, 1, 1] = 2 # Change z_pos surface >>> project_odms(odms) tensor([[[[ True, True], [ True, True]], <BLANKLINE> [[ True, True], [False, False]]]]) >>> project_odms(odms, votes=2) tensor([[[[True, True], [True, True]], <BLANKLINE> [[True, True], [True, True]]]])
Here is the function:
def project_odms(odms, voxelgrids=None, votes=1):
r"""Projects orthographic depth map onto voxelgrids.
.. Note::
If no voxelgrids is provided, we project onto a completely filled grids.
Args:
odms (torch.Tensor): Batched ODMs of shape (N, 6, dim, dim) from the 6 primary viewing angles.
The face order is z_neg, z_pos, y_neg, y_pos, x_neg, x_pos, denoting the axis and direction
we are looking at.
voxelgrids (torch.Tensor): Binary voxelgrids onto which ODMs are projected.
votes (int): int from range(0, 7). Votes needed to substract a voxel to 0.
Returns:
(torch.BoolTensor): Updated binary voxel grid.
Example:
>>> odms = torch.zeros((1, 6, 2, 2)) # empty odms
>>> odms[0, 1, 1, 1] = 2 # Change z_pos surface
>>> project_odms(odms)
tensor([[[[ True, True],
[ True, True]],
<BLANKLINE>
[[ True, True],
[False, False]]]])
>>> project_odms(odms, votes=2)
tensor([[[[True, True],
[True, True]],
<BLANKLINE>
[[True, True],
[True, True]]]])
"""
# Check the second dimension of odms
if odms.shape[1] != 6:
raise ValueError(f"Expected odms' second dimension to be 6, "
f"but got {odms.shape[1]} instead.")
device = odms.device
dtype = odms.dtype
batch_size = odms.shape[0]
dim = odms.shape[-1]
if voxelgrids is None:
voxelgrids = torch.ones((batch_size, dim, dim, dim), dtype=torch.bool, device=device)
else:
voxel_batch = voxelgrids.shape[0]
if batch_size != voxelgrids.shape[0]:
raise ValueError(f"Expected voxelgrids and odms' batch size to be the same, "
f"but got {batch_size} for odms and {voxel_batch} for voxelgrid.")
for i in voxelgrids.shape[1:]:
if i != dim:
raise ValueError(f"Expected voxelgrids and odms' dimension size to be the same, "
f"but got {dim} for odms and {i} for voxelgrid.")
updated_odms = odms.clone()
updated_odms = updated_odms.view(batch_size, 3, 2, dim, dim)
updated_odms[:, :, 0] = dim - updated_odms[:, :, 0]
updated_odms = updated_odms.view(batch_size, 6, dim, dim)
base_idx = torch.arange(dim, device=device)
pos0 = updated_odms[:, 0]
pos1 = updated_odms[:, 1]
pos2 = updated_odms[:, 2]
pos3 = updated_odms[:, 3]
pos4 = updated_odms[:, 4]
pos5 = updated_odms[:, 5]
z_neg_mask = (base_idx.view(1, 1, 1, -1) >= pos0.unsqueeze(-1)).byte() # shape (2, 3, 3, 3)
z_pos_mask = (base_idx.view(1, 1, 1, -1) < pos1.unsqueeze(-1)).byte()
y_neg_mask = (base_idx.view(1, 1, -1, 1) >= pos2.unsqueeze(-2)).byte()
y_pos_mask = (base_idx.view(1, 1, -1, 1) < pos3.unsqueeze(-2)).byte()
x_neg_mask = (base_idx.view(1, -1, 1, 1) >= pos4.unsqueeze(-3)).byte()
x_pos_mask = (base_idx.view(1, -1, 1, 1) < pos5.unsqueeze(-3)).byte()
sum_of_mask = z_neg_mask + z_pos_mask + y_neg_mask + y_pos_mask + x_neg_mask + x_pos_mask
voxelgrids = (voxelgrids * votes - sum_of_mask) > 0
return voxelgrids | r"""Projects orthographic depth map onto voxelgrids. .. Note:: If no voxelgrids is provided, we project onto a completely filled grids. Args: odms (torch.Tensor): Batched ODMs of shape (N, 6, dim, dim) from the 6 primary viewing angles. The face order is z_neg, z_pos, y_neg, y_pos, x_neg, x_pos, denoting the axis and direction we are looking at. voxelgrids (torch.Tensor): Binary voxelgrids onto which ODMs are projected. votes (int): int from range(0, 7). Votes needed to substract a voxel to 0. Returns: (torch.BoolTensor): Updated binary voxel grid. Example: >>> odms = torch.zeros((1, 6, 2, 2)) # empty odms >>> odms[0, 1, 1, 1] = 2 # Change z_pos surface >>> project_odms(odms) tensor([[[[ True, True], [ True, True]], <BLANKLINE> [[ True, True], [False, False]]]]) >>> project_odms(odms, votes=2) tensor([[[[True, True], [True, True]], <BLANKLINE> [[True, True], [True, True]]]]) |
4,711 | from abc import abstractmethod
from collections.abc import Sequence
from io import BytesIO
import math
import traceback
import warnings
from PIL import Image as PILImage
import torch
from ..render.camera import CameraExtrinsics
from ..ops.coords import spherical2cartesian, cartesian2spherical
def update_canvas(canvas, image):
assert isinstance(image, torch.Tensor) and image.dtype == torch.uint8, \
"image must be a torch.Tensor of uint8 "
assert isinstance(canvas, Canvas)
f = BytesIO()
PILImage.fromarray(image.cpu().numpy()).save(
f, "PNG", quality=100)
image = ImageWidget(value=f.getvalue())
with hold_canvas(canvas):
canvas.clear_rect(0, 0, canvas.width, canvas.height)
canvas.draw_image(image, 0, 0, canvas.width, canvas.height) | null |
4,712 | from abc import abstractmethod
from collections.abc import Sequence
from io import BytesIO
import math
import traceback
import warnings
from PIL import Image as PILImage
import torch
from ..render.camera import CameraExtrinsics
from ..ops.coords import spherical2cartesian, cartesian2spherical
The provided code snippet includes necessary dependencies for implementing the `_print_item_pixel_info` function. Write a Python function `def _print_item_pixel_info(canvas, item, x, y)` to solve the following problem:
helper function to print info of items produced by render
Here is the function:
def _print_item_pixel_info(canvas, item, x, y):
"""helper function to print info of items produced by render"""
if torch.is_tensor(item):
assert len(item.shape) in [2, 3], f"item is of shape {item.shape}"
item_height = item.shape[0]
item_width = item.shape[1]
if item_height == canvas.height and item_width == canvas.width:
print(f"{item[y, x]}")
else:
scaled_x = int(x * item_width / canvas.width)
scaled_y = int(y * item_height / canvas.height)
print(f"{item[scaled_y, scaled_x]} (coords scaled to {scaled_x, scaled_y})")
else:
print(f"{item}") | helper function to print info of items produced by render |
4,713 | from abc import abstractmethod
from collections.abc import Sequence
from io import BytesIO
import math
import traceback
import warnings
from PIL import Image as PILImage
import torch
from ..render.camera import CameraExtrinsics
from ..ops.coords import spherical2cartesian, cartesian2spherical
def make_quaternion_rotation(angle: float, vec: torch.Tensor):
r"""Represent a rotation around axis as a quaternion.
Args:
angle (float): angle of rotation.
vec (torch.Tensor):
axis around which the rotation is done,
of shape :math:`(\text{batch_size}, 3)`
Returns:
(torch.Tensor): A quaternion of shape :math:`(\text{batch_size}, 4)`
"""
half_angle = angle / 2
sin_half_angle = math.sin(half_angle)
cos_half_angle = math.cos(half_angle)
return torch.stack([
vec[:, 0] * sin_half_angle,
vec[:, 1] * sin_half_angle,
vec[:, 2] * sin_half_angle,
torch.full((vec.shape[0],), cos_half_angle, dtype=vec.dtype, device=vec.device)
], dim=-1)
def conjugate(quat: torch.Tensor):
r"""Return the conjugate of a quaternion.
Args:
quat (torch.Tensor): The quaternion, of shape :math:`(\text{batch_size}, 4)`.
Returns:
(torch.Tensor): the conjugate, of shape :math:`(\text{batch_size}, 4)`.
"""
return torch.stack([-quat[:, 0], -quat[:, 1], -quat[:, 2], quat[:, 3]], dim=-1)
def mulqv(q: torch.Tensor, v: torch.Tensor):
r"""Return the product of a quaternion with a 3D vector.
Support broadcasting.
Args:
q (torch.Tensor): The quaternion, of shape :math:`(\text{batch_size}, 4)`.
v (torch.Tensor): The vector, of shape :math:`(\text{batch_size}, 3)`.
Return:
(torch.Tensor): A quaternion, of shape :math:`(\text{batch_size}, 4)`.
"""
output = torch.stack([
q[:, 3] * v[:, 0] + q[:, 1] * v[:, 2] - q[:, 2] * v[:, 1],
q[:, 3] * v[:, 1] + q[:, 2] * v[:, 0] - q[:, 0] * v[:, 2],
q[:, 3] * v[:, 2] + q[:, 0] * v[:, 1] - q[:, 1] * v[:, 0],
- q[:, 0] * v[:, 0] - q[:, 1] * v[:, 1] - q[:, 2] * v[:, 2],
], dim=-1)
return output
def mulqq(l: torch.Tensor, r: torch.Tensor):
r"""Return the product of two quaternions.
Support broadcasting.
Args:
l (torch.Tensor): The quaternion, of shape :math:`(\text{batch_size}, 4)`.
r (torch.Tensor): The quaternion, of shape :math:`(\text{batch_size}, 4)`.
Returns:
(torch.Tensor): A quaternion, of shape :math:`(\text{batch_size}, 4)`.
"""
output = torch.stack([
l[:, 0] * r[:, 3] + l[:, 3] * r[:, 0] + l[:, 1] * r[:, 2] - l[:, 2] * r[:, 1],
l[:, 1] * r[:, 3] + l[:, 3] * r[:, 1] + l[:, 2] * r[:, 0] - l[:, 0] * r[:, 2],
l[:, 2] * r[:, 3] + l[:, 3] * r[:, 2] + l[:, 0] * r[:, 1] - l[:, 1] * r[:, 0],
l[:, 3] * r[:, 3] - l[:, 0] * r[:, 0] - l[:, 1] * r[:, 1] - l[:, 2] * l[:, 2],
], dim=-1)
return output
The provided code snippet includes necessary dependencies for implementing the `rotate_around_axis` function. Write a Python function `def rotate_around_axis(point: torch.Tensor, angle: float, axis: torch.Tensor)` to solve the following problem:
r"""Compute the rotation of a point around an axis. Args: point (torch.Tensor): The point to be rotated, of shape :math:`(\text{batch_size}, 3)`. angle (float): The angle of rotation axis (torch.Tensor): The axis around which the point is revolving, of shape :math:`(\text{batch_size}, 3)`. Returns: (torch.Tensor): The rotated point, of shape :math:`(\text{batch_size}, 3)`.
Here is the function:
def rotate_around_axis(point: torch.Tensor, angle: float, axis: torch.Tensor):
r"""Compute the rotation of a point around an axis.
Args:
point (torch.Tensor): The point to be rotated, of shape :math:`(\text{batch_size}, 3)`.
angle (float): The angle of rotation
axis (torch.Tensor): The axis around which the point is revolving,
of shape :math:`(\text{batch_size}, 3)`.
Returns:
(torch.Tensor): The rotated point, of shape :math:`(\text{batch_size}, 3)`.
"""
rot_q = make_quaternion_rotation(angle, axis)
conj_q = conjugate(rot_q)
w = mulqq(mulqv(rot_q, point), conj_q)
return w[:, :-1] | r"""Compute the rotation of a point around an axis. Args: point (torch.Tensor): The point to be rotated, of shape :math:`(\text{batch_size}, 3)`. angle (float): The angle of rotation axis (torch.Tensor): The axis around which the point is revolving, of shape :math:`(\text{batch_size}, 3)`. Returns: (torch.Tensor): The rotated point, of shape :math:`(\text{batch_size}, 3)`. |
4,714 | import glob
import logging
import os
import re
import posixpath
import warnings
from kaolin import io
The provided code snippet includes necessary dependencies for implementing the `_get_timestamps` function. Write a Python function `def _get_timestamps(filenames)` to solve the following problem:
Returns the timestamps of all filenames as a dictionary keyed by filename. Will throw error if files do not exits.
Here is the function:
def _get_timestamps(filenames):
"""
Returns the timestamps of all filenames as a dictionary keyed by
filename. Will throw error if files do not exits.
"""
res = {}
for f in filenames:
res[f] = os.stat(f).st_mtime_ns
return res | Returns the timestamps of all filenames as a dictionary keyed by filename. Will throw error if files do not exits. |
4,715 | from abc import abstractmethod
from collections.abc import Callable, Mapping
import logging
import inspect
import os
from pathlib import Path
from PIL import Image
import PIL
import posixpath
import torch
import warnings
from .usd.utils import create_stage
def _get_shader_parameters(shader, time):
# Get shader parameters
params = {}
inputs = shader.GetInputs()
for i in inputs:
name = i.GetBaseName()
params.setdefault(i.GetBaseName(), {})
if UsdShade.ConnectableAPI.HasConnectedSource(i):
connected_source = UsdShade.ConnectableAPI.GetConnectedSource(i)
connected_inputs = connected_source[0].GetInputs()
while connected_inputs:
connected_input = connected_inputs.pop()
if UsdShade.ConnectableAPI.HasConnectedSource(connected_input):
new_inputs = UsdShade.ConnectableAPI.GetConnectedSource(connected_input)[0].GetInputs()
connected_inputs.extend(new_inputs)
elif connected_input.Get(time=time) is not None:
params[name].setdefault(connected_input.GetBaseName(), {}).update({
'value': connected_input.Get(time=time),
'type': connected_input.GetTypeName().type,
'docs': connected_input.GetDocumentation(),
})
else:
params[name].update({
'value': i.Get(time=time),
'type': i.GetTypeName().type,
'docs': i.GetDocumentation(),
})
return params | null |
4,716 | from abc import abstractmethod
from collections.abc import Callable, Mapping
import logging
import inspect
import os
from pathlib import Path
from PIL import Image
import PIL
import posixpath
import torch
import warnings
from .usd.utils import create_stage
def _to_1d_tensor(data):
if isinstance(data, torch.Tensor):
return data.reshape(-1)
elif data is None:
return None
else:
return torch.tensor(data).reshape(-1) | null |
4,717 | import torch
import warnings
The provided code snippet includes necessary dependencies for implementing the `heterogeneous_mesh_handler_skip` function. Write a Python function `def heterogeneous_mesh_handler_skip(*args, **kwargs)` to solve the following problem:
r"""Skip heterogeneous meshes.
Here is the function:
def heterogeneous_mesh_handler_skip(*args, **kwargs):
r"""Skip heterogeneous meshes."""
return None | r"""Skip heterogeneous meshes. |
4,718 | import torch
import warnings
def mesh_handler_naive_triangulate(vertices, face_vertex_counts, *features, face_assignments=None):
r"""Triangulate a list of faces containing polygons of varying number of edges using naive fan
triangulation.
Args:
vertices (torch.FloatTensor): Vertices with shape ``(N, 3)``.
face_vertex_counts (torch.LongTensor): Number of vertices for each face with shape ``(M)``
for ``M`` faces.
features: Variable length features that need to be handled as 1D Tensor ``(num_face_vertices)``,
with one feature per face vertex. For example, faces as a tensor
``[face0_vertex0_id, face0_vertex1_id, face0_vertex2_id, face1_vertex0_id...]`` or as UV indices:
``[face0_vertex0_uv_idx, face0_vertex1_uv_idx, ...]``.
face_assignments (dict): mapping from key to torch.LongTensor, where each value of the tensor corresponds
to a face index. These indices will be expanded and rewritten to include triangulated face indices.
Two modes are supported for face_assignments:
1) if 1D tensor, each face idx will be replaced with indices of faces it was split into
2) if 2D tensor, expects shape (K, 2), where [x, i] will be replaced with index of the first face
[x, i] was split into, effectively supporting tensors containing (start,end].
Returns:
(tuple):
Homogeneous list of attributes with exactly same type and number as function inputs.
- **vertices** (torch.Tensor): unchanged `vertices` of shape ``(N, 3)``
- **face_vertex_counts** (torch.LongTensor): tensor of length ``new_num_faces`` filled with 3.
- **features** (torch.Tensor): of same type as input and shape ``(new_num_faces, 3)``
- **face_assignments** (dict): returned only if face_assignments is set, with each value containing
new face indices equivalent to the prior assignments (see two modes for ``face_assignments``)
"""
def _homogenize(attr, face_vertex_counts):
if attr is not None:
attr = attr if isinstance(attr, list) else attr.tolist()
idx = 0
new_attr = []
for face_vertex_count in face_vertex_counts:
attr_face = attr[idx:(idx + face_vertex_count)]
idx += face_vertex_count
while len(attr_face) >= 3:
new_attr.append(attr_face[:3])
attr_face.pop(1)
return torch.tensor(new_attr)
else:
return None
def _homogenize_counts(face_vertex_counts, compute_face_id_mappings=False):
mappings = [] # mappings[i] = [new face ids that i was split into]
num_faces = 0
for face_vertex_count in face_vertex_counts:
attr_face = list(range(0, face_vertex_count))
new_indices = []
while len(attr_face) >= 3:
if compute_face_id_mappings:
new_indices.append(num_faces)
num_faces += 1
attr_face.pop(1)
if compute_face_id_mappings:
mappings.append(new_indices)
return torch.full((num_faces,), 3, dtype=torch.long), mappings
new_attrs = [_homogenize(a, face_vertex_counts) for a in features]
new_counts, face_idx_mappings = _homogenize_counts(face_vertex_counts,
face_assignments is not None and len(face_assignments) > 0)
if face_assignments is None:
# Note: for python > 3.8 can do "return vertices, new_counts, *new_attrs"
return tuple([vertices, new_counts] + new_attrs)
# TODO: this is inefficient and could be improved
new_assignments = {}
for k, v in face_assignments.items():
if len(v.shape) == 1:
new_idx = []
for old_idx in v:
new_idx.extend(face_idx_mappings[old_idx])
new_idx = torch.LongTensor(new_idx)
else:
# We support this (start, end] mode for efficiency of OBJ readers
assert len(v.shape) == 2 and v.shape[1] == 2, 'Expects shape (K,) or (K, 2) for face_assignments'
new_idx = torch.zeros_like(v)
for row in range(v.shape[0]):
old_idx_start = v[row, 0]
old_idx_end = v[row, 1] - 1
new_idx[row, 0] = face_idx_mappings[old_idx_start][0]
new_idx[row, 1] = face_idx_mappings[old_idx_end][-1] + 1
new_assignments[k] = new_idx
# Note: for python > 3.8 can do "return vertices, new_counts, *new_attrs, new_assignments"
return tuple([vertices, new_counts] + new_attrs + [new_assignments])
The provided code snippet includes necessary dependencies for implementing the `heterogeneous_mesh_handler_naive_homogenize` function. Write a Python function `def heterogeneous_mesh_handler_naive_homogenize(*args, **kwargs)` to solve the following problem:
r"""Same as :func:`mesh_handler_naive_triangulate`, see docs. .. deprecated:: 0.14.0
Here is the function:
def heterogeneous_mesh_handler_naive_homogenize(*args, **kwargs):
r"""Same as :func:`mesh_handler_naive_triangulate`, see docs.
.. deprecated:: 0.14.0
"""
warnings.warn("heterogeneous_mesh_handler_naive_homogenize is deprecated, "
"please use kaolin.io.utils.mesh_handler_naive_triangulate instead",
DeprecationWarning, stacklevel=2)
return mesh_handler_naive_triangulate(*args, **kwargs) | r"""Same as :func:`mesh_handler_naive_triangulate`, see docs. .. deprecated:: 0.14.0 |
4,719 | from pygltflib import GLTF2, Scene, ImageFormat, BufferFormat, \
SHORT, UNSIGNED_SHORT, UNSIGNED_INT, \
BYTE, UNSIGNED_BYTE, FLOAT, SCALAR, VEC2, VEC3, VEC4
import os
import copy
import warnings
import time
import torch
import numpy as np
from PIL import Image
import base64
from io import BytesIO
from .materials import PBRMaterial
from ..rep import SurfaceMesh
def _make_rotation_mat(quat):
x2 = quat[0] + quat[0]
y2 = quat[1] + quat[1]
z2 = quat[2] + quat[2]
xx2 = quat[0] * x2
xy2 = quat[0] * y2
xz2 = quat[0] * z2
yy2 = quat[1] * y2
yz2 = quat[1] * z2
zz2 = quat[2] * z2
sx2 = quat[3] * x2
sy2 = quat[3] * y2
sz2 = quat[3] * z2
return torch.tensor([
[1 - (yy2 + zz2), xy2 + sz2, xz2 - sy2, 0],
[xy2 - sz2, 1 - (xx2 + zz2), yz2 + sx2, 0],
[xz2 + sy2, yz2 - sx2, 1 - (xx2 + yy2), 0],
[0, 0, 0, 1]
], dtype=torch.double)
def _get_materials(gltf):
"""get all materials from a pygltflib.GLTF2
Arguments:
gltf (pygltflib.GLTF2): the file to import from.
Returns:
(list of :class:`PBRMaterial`): The materials.
"""
gltf.convert_images(ImageFormat.BUFFERVIEW)
materials = []
for mat in gltf.materials:
d = {}
# TODO(cfujitsang): add occlusion map
# Prioritize the Kronos extension for specular-glossiness workflow
# Some materials contains both metallic-roughness and specular-glossiness
# but specular-glossiness can contain more information
if 'KHR_materials_pbrSpecularGlossiness' in mat.extensions:
d.update(_load_specular_workflow_material(
gltf, mat.extensions['KHR_materials_pbrSpecularGlossiness']))
elif mat.pbrMetallicRoughness is not None:
d.update(_load_metallic_workflow_material(
gltf, mat.pbrMetallicRoughness))
if mat.normalTexture is not None:
d['normals_texture'] = (
_load_img(gltf, mat.normalTexture.index, False) * (2. / 255.) - 1.
) * torch.tensor(
[mat.normalTexture.scale, mat.normalTexture.scale, 1.]
).reshape(1, 1, 3)
pbr_mat = PBRMaterial(**d)
materials.append(pbr_mat)
return materials
def _join_meshes(meshes):
""""""
has_tangents = False
has_uvs = False
has_normals = False
# We need to checks all the meshes first to presence of
# tangents / uvs / normals to know if we want to
# compute the missing ones
for mesh in meshes:
if mesh.has_attribute('vertex_tangents'):
has_tangents = True
if mesh.has_attribute('uvs'):
has_uvs = True
if mesh.has_attribute('normals'):
has_normals = True
cur_num_vertices = 0
cur_num_uvs = 0
faces = []
vertices = []
face_uvs_idx = [] if has_uvs else None
uvs = [] if has_uvs else None
tangents = [] if has_tangents else None
normals = [] if has_normals else None
material_assignments = []
for mesh_idx, mesh in enumerate(meshes):
faces.append(mesh.faces + cur_num_vertices)
cur_num_vertices += mesh.vertices.shape[0]
vertices.append(mesh.vertices)
if has_uvs:
_face_uvs_idx = mesh.face_uvs_idx
if _face_uvs_idx is None:
_face_uvs_idx = torch.full_like(mesh.faces, -1)
face_uvs_idx.append(_face_uvs_idx + cur_num_uvs)
_uvs = mesh.uvs
if _uvs is None:
_uvs = torch.empty((0, 2))
cur_num_uvs += _uvs.shape[0]
uvs.append(_uvs)
if has_tangents:
_tangents = mesh.vertex_tangents
if _tangents is None:
_tangents = torch.zeros_like(mesh.vertices)
tangents.append(_tangents)
if has_normals:
normals.append(mesh.vertex_normals)
material_assignments.append(mesh.material_assignments)
faces = torch.cat(faces, dim=0)
vertices = torch.cat(vertices, dim=0)
if has_tangents:
tangents = torch.cat(tangents, dim=0)
if has_normals:
normals = torch.cat(normals, dim=0)
if has_uvs:
face_uvs_idx = torch.cat(face_uvs_idx, dim=0)
uvs = torch.cat(uvs, dim=0)
material_assignments = torch.cat(material_assignments, dim=0)
return SurfaceMesh(
faces=faces, vertices=vertices, vertex_tangents=tangents,
face_uvs_idx=face_uvs_idx, uvs=uvs, vertex_normals=normals,
material_assignments=material_assignments
)
def _get_meshes(gltf):
"""get all meshes from a pygltflib.GLTF2
Arguments:
gltf (pygltflib.GLTF2): the file to import from.
Returns:
(list of :class:`SurfaceMesh`): The meshes in the gltf file.
"""
meshes = []
for mesh_idx, mesh in enumerate(gltf.meshes):
sub_meshes = []
skip_mesh = True
for j, primitive in enumerate(mesh.primitives):
if primitive.mode != 4:
warnings.warn(f"mode {primitive.mode} is currently not supported",
UserWarning)
faces = torch.empty((0, 3), dtype=torch.long)
else:
skip_mesh = False
faces = _get_tensor(gltf, primitive.indices).reshape(-1, 3).long()
material_idx = primitive.material if primitive.material is not None else -1
material_assignments = torch.full(
(faces.shape[0],), material_idx, dtype=torch.short)
vertices = _get_tensor(gltf, primitive.attributes.POSITION)
if primitive.attributes.COLOR_0 is not None:
warnings.warn(
"gltf loader don't support vertex color yet. " +
"Please make a github request if needed.",
UserWarning
)
if primitive.attributes.TANGENT is not None:
tangents = _get_tensor(gltf, primitive.attributes.TANGENT)
tangents = tangents[..., :3] * tangents[..., -1:]
else:
tangents = None
if primitive.attributes.TEXCOORD_0 is not None:
uvs = _get_tensor(gltf, primitive.attributes.TEXCOORD_0)
face_uvs_idx = faces
else:
uvs = None
face_uvs_idx = None
if primitive.attributes.NORMAL is not None:
normals = _get_tensor(gltf, primitive.attributes.NORMAL)
else:
normals = None
if primitive.attributes.JOINTS_0 is not None:
warnings.warn(
"gltf loader don't support vertex skinning yet. " +
"This mesh might appear in canonical pose. " +
"Please make a github request if needed.",
UserWarning
)
sub_meshes.append(SurfaceMesh(
faces=faces, vertices=vertices, vertex_tangents=tangents,
face_uvs_idx=face_uvs_idx, uvs=uvs, vertex_normals=normals,
material_assignments=material_assignments
))
if skip_mesh:
meshes.append(None)
else:
meshes.append(sub_meshes)
output = []
for m_group in meshes:
if m_group is None:
output.append(None)
else:
output.append(_join_meshes(m_group))
return output
The provided code snippet includes necessary dependencies for implementing the `import_mesh` function. Write a Python function `def import_mesh(path)` to solve the following problem:
Import mesh from a gltf (.glb or .gltf) file. Arguments: path (str): path to the gltf file. Returns: (kaolin.rep.SurfaceMesh): The imported mesh.
Here is the function:
def import_mesh(path):
"""Import mesh from a gltf (.glb or .gltf) file.
Arguments:
path (str): path to the gltf file.
Returns:
(kaolin.rep.SurfaceMesh): The imported mesh.
"""
gltf = GLTF2.load(path)
gltf.convert_buffers(BufferFormat.BINARYBLOB)
materials = _get_materials(gltf)
meshes = _get_meshes(gltf)
for sampler in gltf.samplers:
if sampler.wrapS != 10497 or sampler.wrapT != 10497:
warnings.warn(
"wrapping mode is not support yet. Please make a github request if needed.",
UserWarning
)
default_scene = gltf.scenes[gltf.scene]
scene_meshes = []
has_tangents = False
has_uvs = False
has_normals = False
for mesh in meshes:
if mesh.has_attribute('vertex_tangents'):
has_tangents = True
if mesh.has_attribute('uvs'):
has_uvs = True
if mesh.has_attribute('normals'):
has_normals = True
def _traverse_scene(node_idx, cur_transform):
node = gltf.nodes[node_idx]
if node.matrix is not None:
node_transform = torch.tensor(node.matrix, dtype=torch.double).reshape(4, 4)
else:
node_transform = None
if node.scale is not None:
node_transform = torch.tensor([
[node.scale[0], 0., 0., 0.],
[0., node.scale[1], 0., 0.],
[0., 0., node.scale[2], 0.],
[0., 0., 0., 1.]
], dtype=torch.double)
if node.rotation is not None:
rotation_mat = _make_rotation_mat(node.rotation)
if node_transform is None:
node_transform = rotation_mat
else:
node_transform = node_transform @ rotation_mat
if node.translation is not None:
translation_mat = torch.tensor([
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[node.translation[0], node.translation[1], node.translation[2], 1.]
], dtype=torch.double)
if node_transform is None:
node_transform = translation_mat
else:
node_transform = node_transform @ translation_mat
if node_transform is not None:
cur_transform = node_transform @ cur_transform
if node.mesh is not None:
mesh = copy.copy(meshes[node.mesh])
if mesh is not None:
vertices = torch.nn.functional.pad(
mesh.vertices, (0, 1), value=1., mode='constant')
vertices = vertices @ cur_transform.float()
mesh.vertices = vertices[..., :3]
if has_tangents:
tangents = mesh.vertex_tangents @ cur_transform[:3, :3].float()
mesh.vertex_tangents = torch.nn.functional.normalize(
tangents, dim=-1)
if has_normals:
inv_cur_transform = torch.linalg.inv(cur_transform[:3, :3]).T.float()
normals = mesh.vertex_normals @ inv_cur_transform
mesh.vertex_normals = torch.nn.functional.normalize(
normals, dim=-1)
scene_meshes.append(mesh)
for next_node_idx in node.children:
_traverse_scene(next_node_idx, cur_transform)
for node_idx in default_scene.nodes:
_traverse_scene(node_idx, torch.eye(4, dtype=torch.double))
outputs = _join_meshes(scene_meshes)
outputs.materials = materials
return outputs | Import mesh from a gltf (.glb or .gltf) file. Arguments: path (str): path to the gltf file. Returns: (kaolin.rep.SurfaceMesh): The imported mesh. |
4,720 | from pygltflib import GLTF2, Scene, ImageFormat, BufferFormat, \
SHORT, UNSIGNED_SHORT, UNSIGNED_INT, \
BYTE, UNSIGNED_BYTE, FLOAT, SCALAR, VEC2, VEC3, VEC4
import os
import copy
import warnings
import time
import torch
import numpy as np
from PIL import Image
import base64
from io import BytesIO
from .materials import PBRMaterial
from ..rep import SurfaceMesh
def _get_materials(gltf):
"""get all materials from a pygltflib.GLTF2
Arguments:
gltf (pygltflib.GLTF2): the file to import from.
Returns:
(list of :class:`PBRMaterial`): The materials.
"""
gltf.convert_images(ImageFormat.BUFFERVIEW)
materials = []
for mat in gltf.materials:
d = {}
# TODO(cfujitsang): add occlusion map
# Prioritize the Kronos extension for specular-glossiness workflow
# Some materials contains both metallic-roughness and specular-glossiness
# but specular-glossiness can contain more information
if 'KHR_materials_pbrSpecularGlossiness' in mat.extensions:
d.update(_load_specular_workflow_material(
gltf, mat.extensions['KHR_materials_pbrSpecularGlossiness']))
elif mat.pbrMetallicRoughness is not None:
d.update(_load_metallic_workflow_material(
gltf, mat.pbrMetallicRoughness))
if mat.normalTexture is not None:
d['normals_texture'] = (
_load_img(gltf, mat.normalTexture.index, False) * (2. / 255.) - 1.
) * torch.tensor(
[mat.normalTexture.scale, mat.normalTexture.scale, 1.]
).reshape(1, 1, 3)
pbr_mat = PBRMaterial(**d)
materials.append(pbr_mat)
return materials
def _get_meshes(gltf):
"""get all meshes from a pygltflib.GLTF2
Arguments:
gltf (pygltflib.GLTF2): the file to import from.
Returns:
(list of :class:`SurfaceMesh`): The meshes in the gltf file.
"""
meshes = []
for mesh_idx, mesh in enumerate(gltf.meshes):
sub_meshes = []
skip_mesh = True
for j, primitive in enumerate(mesh.primitives):
if primitive.mode != 4:
warnings.warn(f"mode {primitive.mode} is currently not supported",
UserWarning)
faces = torch.empty((0, 3), dtype=torch.long)
else:
skip_mesh = False
faces = _get_tensor(gltf, primitive.indices).reshape(-1, 3).long()
material_idx = primitive.material if primitive.material is not None else -1
material_assignments = torch.full(
(faces.shape[0],), material_idx, dtype=torch.short)
vertices = _get_tensor(gltf, primitive.attributes.POSITION)
if primitive.attributes.COLOR_0 is not None:
warnings.warn(
"gltf loader don't support vertex color yet. " +
"Please make a github request if needed.",
UserWarning
)
if primitive.attributes.TANGENT is not None:
tangents = _get_tensor(gltf, primitive.attributes.TANGENT)
tangents = tangents[..., :3] * tangents[..., -1:]
else:
tangents = None
if primitive.attributes.TEXCOORD_0 is not None:
uvs = _get_tensor(gltf, primitive.attributes.TEXCOORD_0)
face_uvs_idx = faces
else:
uvs = None
face_uvs_idx = None
if primitive.attributes.NORMAL is not None:
normals = _get_tensor(gltf, primitive.attributes.NORMAL)
else:
normals = None
if primitive.attributes.JOINTS_0 is not None:
warnings.warn(
"gltf loader don't support vertex skinning yet. " +
"This mesh might appear in canonical pose. " +
"Please make a github request if needed.",
UserWarning
)
sub_meshes.append(SurfaceMesh(
faces=faces, vertices=vertices, vertex_tangents=tangents,
face_uvs_idx=face_uvs_idx, uvs=uvs, vertex_normals=normals,
material_assignments=material_assignments
))
if skip_mesh:
meshes.append(None)
else:
meshes.append(sub_meshes)
output = []
for m_group in meshes:
if m_group is None:
output.append(None)
else:
output.append(_join_meshes(m_group))
return output
The provided code snippet includes necessary dependencies for implementing the `import_meshes` function. Write a Python function `def import_meshes(path)` to solve the following problem:
Import meshes from a gltf (.glb or .gltf) file without them being composed in a scene. Arguments: path (str): path to the gltf file. Returns: (list of kaolin.rep.SurfaceMesh): The imported meshes.
Here is the function:
def import_meshes(path):
"""Import meshes from a gltf (.glb or .gltf) file without them being composed in a scene.
Arguments:
path (str): path to the gltf file.
Returns:
(list of kaolin.rep.SurfaceMesh): The imported meshes.
"""
gltf = GLTF2.load(path)
gltf.convert_buffers(BufferFormat.BINARYBLOB)
materials = _get_materials(gltf)
meshes = _get_meshes(gltf)
for m in meshes:
global_assignments, local_assignments = torch.unique(m.material_assignments, return_inverse=True)
m.materials = [materials[idx] for idx in global_assignments]
m.material_assignments = local_assignments
return meshes | Import meshes from a gltf (.glb or .gltf) file without them being composed in a scene. Arguments: path (str): path to the gltf file. Returns: (list of kaolin.rep.SurfaceMesh): The imported meshes. |
4,721 | from collections import namedtuple
import torch
return_type = namedtuple('return_type',
['vertices', 'faces', 'face_colors'])
def _is_void(splitted_str):
return len(splitted_str) == 0 or splitted_str[0].startswith('#')
The provided code snippet includes necessary dependencies for implementing the `import_mesh` function. Write a Python function `def import_mesh(path, with_face_colors=False)` to solve the following problem:
r"""Load data from an off file as a single mesh. Args: path (str): path to the obj file (with extension). with_face_colors (bool): if True, load face colors. Default: False. Returns: (off.return_type): nametuple of: - **vertices** (torch.FloatTensor): of shape :math:`(\text{num_vertices}, 3)`. - **faces** (torch.LongTensor): of shape :math:`(\text{num_faces}, \text{face_size})`. - **face_colors** (torch.LongTensor): in the range :math:`[0, 255]`, of shape :math:`(\text{num_faces}, 3)`.
Here is the function:
def import_mesh(path, with_face_colors=False):
r"""Load data from an off file as a single mesh.
Args:
path (str): path to the obj file (with extension).
with_face_colors (bool): if True, load face colors. Default: False.
Returns:
(off.return_type):
nametuple of:
- **vertices** (torch.FloatTensor): of shape :math:`(\text{num_vertices}, 3)`.
- **faces** (torch.LongTensor): of shape :math:`(\text{num_faces}, \text{face_size})`.
- **face_colors** (torch.LongTensor):
in the range :math:`[0, 255]`, of shape :math:`(\text{num_faces}, 3)`.
"""
vertices = []
uvs = []
f = open(path, 'r', encoding='utf-8')
# Get metadata (number of vertices / faces (/ edges))
for line in f:
data = line.split()
if _is_void(data):
continue
if data[0].startswith('OFF'):
# ModelNet40 have some OFFnum_vertices num_faces
if len(data[0][3:]) > 0:
num_vertices = int(data[0][3:])
num_faces = int(data[1])
break
elif len(data) > 1:
num_vertices = int(data[1])
num_faces = int(data[2])
break
continue
num_vertices = int(data[0])
num_faces = int(data[1])
break
# Get vertices
for line in f:
data = line.split()
if _is_void(data):
continue
vertices.append([float(d) for d in data[:3]])
if len(vertices) == num_vertices:
break
vertices = torch.FloatTensor(vertices)
# Get faces
faces = []
face_colors = []
for line in f:
data = line.split()
if _is_void(data):
continue
face_size = int(data[0])
faces.append([int(d) for d in data[1:face_size + 1]])
if with_face_colors:
face_colors.append([
int(d) for d in data[face_size + 1:face_size + 4]
])
if len(faces) == num_faces:
break
faces = torch.LongTensor(faces)
if with_face_colors:
face_colors = torch.LongTensor(face_colors)
else:
face_colors = None
f.close()
return return_type(vertices, faces, face_colors) | r"""Load data from an off file as a single mesh. Args: path (str): path to the obj file (with extension). with_face_colors (bool): if True, load face colors. Default: False. Returns: (off.return_type): nametuple of: - **vertices** (torch.FloatTensor): of shape :math:`(\text{num_vertices}, 3)`. - **faces** (torch.LongTensor): of shape :math:`(\text{num_faces}, \text{face_size})`. - **face_colors** (torch.LongTensor): in the range :math:`[0, 255]`, of shape :math:`(\text{num_faces}, 3)`. |
4,722 | import itertools
import os
import re
import torch
def _get_stage_from_maybe_file(file_path_or_stage):
"""Returns a stage from a file or itself if input is a Usd.Stage
Args:
file_path_or_stage (str or Usd.Stage):
Path to usd file (/*.usd, /*.usda) or :class:`Usd.Stage`.
Returns:
(Usd.Stage): The output stage.
"""
if isinstance(file_path_or_stage, Usd.Stage):
return file_path_or_stage
else:
assert os.path.exists(file_path_or_stage)
return Usd.Stage.Open(file_path_or_stage)
def get_scene_paths(file_path_or_stage, scene_path_regex=None, prim_types=None,
conditional=lambda x: True):
r"""Return all scene paths contained in specified file or stage. Filter paths with regular
expression in `scene_path_regex` if provided.
Args:
file_path_or_stage (str or Usd.Stage):
Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`.
scene_path_regex (str, optional):
Optional regular expression used to select returned scene paths.
prim_types (list of str, str, optional):
Optional list of valid USD Prim types used to select scene paths, or a single USD Prim type string.
conditional (function path: Bool): Custom conditionals to check
Returns:
(list of str): List of filtered scene paths.
Example:
>>> # Create a stage with some meshes
>>> vertices_list = [torch.rand(3, 3) for _ in range(3)]
>>> faces_list = [torch.tensor([[0, 1, 2]]) for _ in range(3)]
>>> stage = export_meshes('./new_stage.usd', vertices=vertices_list, faces=faces_list)
>>> # Retrieve scene paths
>>> get_scene_paths('./new_stage.usd', prim_types=['Mesh'])
[Sdf.Path('/World/Meshes/mesh_0'), Sdf.Path('/World/Meshes/mesh_1'), Sdf.Path('/World/Meshes/mesh_2')]
>>> get_scene_paths('./new_stage.usd', scene_path_regex=r'.*_0', prim_types=['Mesh'])
[Sdf.Path('/World/Meshes/mesh_0')]
"""
stage = _get_stage_from_maybe_file(file_path_or_stage)
if scene_path_regex is None:
scene_path_regex = '.*'
if prim_types is not None:
if isinstance(prim_types, str):
prim_types = [prim_types]
prim_types = [pt.lower() for pt in prim_types]
scene_paths = []
for p in stage.Traverse():
is_valid_prim_type = prim_types is None or p.GetTypeName().lower() in prim_types
is_valid_scene_path = re.match(scene_path_regex, str(p.GetPath()))
passes_conditional = conditional(p)
if is_valid_prim_type and is_valid_scene_path and passes_conditional:
scene_paths.append(p.GetPath())
return scene_paths
The provided code snippet includes necessary dependencies for implementing the `get_authored_time_samples` function. Write a Python function `def get_authored_time_samples(file_path_or_stage)` to solve the following problem:
r""" Returns *all* authored time samples within the USD, aggregated across all primitives. Args: file_path_or_stage (str or Usd.Stage): Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`. Returns: (list)
Here is the function:
def get_authored_time_samples(file_path_or_stage):
r"""
Returns *all* authored time samples within the USD, aggregated across all primitives.
Args:
file_path_or_stage (str or Usd.Stage):
Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`.
Returns:
(list)
"""
stage = _get_stage_from_maybe_file(file_path_or_stage)
scene_paths = get_scene_paths(stage)
res = set()
for scene_path in scene_paths:
prim = stage.GetPrimAtPath(scene_path)
attr = prim.GetAttributes()
res.update(set(itertools.chain.from_iterable([x.GetTimeSamples() for x in attr])))
return sorted(res) | r""" Returns *all* authored time samples within the USD, aggregated across all primitives. Args: file_path_or_stage (str or Usd.Stage): Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`. Returns: (list) |
4,723 | import os
import warnings
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import torch
from kaolin.io import materials as usd_materials
from kaolin.io import utils
from kaolin.rep import SurfaceMesh
from .utils import _get_stage_from_maybe_file, get_scene_paths, create_stage
def import_meshes(file_path_or_stage, scene_paths=None, with_materials=False, with_normals=False,
heterogeneous_mesh_handler=None, times=None, triangulate=False):
r"""Import one or more meshes from a USD file or Stage in an unbatched representation.
Supports homogeneous meshes (meshes with consistent numbers of vertices per face). Custom handling of
heterogeneous meshes can be achieved by passing a function through the ``heterogeneous_mesh_handler`` argument.
The following interpolation types are supported for UV coordinates: `vertex`, `varying` and `faceVarying`.
For each scene path specified in `scene_paths`, sub-meshes (if any) are flattened to a single mesh.
Prims with no meshes or with heterogenous faces are skipped. Returns an unbatched attributes as CPU torch
tensors in a list of easy-to-manage :class:`kaolin.rep.SurfaceMesh` containers.
Args:
file_path_or_stage (str or Usd.Stage):
Path to usd file (`\*.usd`, `\*.usda`) or :class:`Usd.Stage`.
scene_paths (list of str, optional): Scene path(s) within the USD file indicating which primitive(s)
to import. If None, all prims of type `Mesh` will be imported.
with_materials (bool): if True, load materials. Default: False.
with_normals (bool): if True, load vertex normals. Default: False.
heterogeneous_mesh_handler (Callable, optional):
function that handles a heterogeneous mesh, homogenizing, returning None or throwing error,
with the following signature:
``heterogeneous_mesh_handler(vertices, face_vertex_counts, *args, face_assignments)``
for example, see :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`
and :func:`heterogeneous_mesh_handler_skip <kaolin.io.utils.heterogeneous_mesh_handler_skip>`.
Default: will raise a NonHomogeneousMeshError.
times (list of int): Positive integers indicating the time at which to retrieve parameters.
triangulate: if True, will triangulate all non-triangular meshes using same logic as
:func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`.
If `heterogeneous_mesh_handler` is not set, this flag will cause non-homogeneous meshes to
be triangulated and loaded without error; otherwise triangulation executes after `heterogeneous_mesh_handler`,
which may skip or throw an error.
Returns:
(a list of SurfaceMesh):
a list of unbatched instances of :class:`kaolin.rep.SurfaceMesh`, where:
* **normals** and **face_normals_idx** will only be filled if `with_normals=True`
* **materials** will be a list
of :class:`kaolin.io.materials.Material` sorted by their `material_name`;
filled only if `with_materials=True`.
* **material_assignments** will be a tensor
of shape ``(num_faces,)`` containing the index
of the material (in the `materials` list) assigned to the corresponding face,
or `-1` if no material was assigned; filled only if `with_materials=True`.
.. rubric:: Examples
To export and then import USD meshes::
>>> # Create a stage with some meshes
>>> vertices_list = [torch.rand(3, 3) for _ in range(3)]
>>> faces_list = [torch.tensor([[0, 1, 2]]) for _ in range(3)]
>>> stage = export_meshes('./new_stage.usd', vertices=vertices_list, faces=faces_list)
>>> # Import meshes
>>> meshes = import_meshes('./new_stage.usd')
>>> len(meshes)
3
>>> meshes[0].vertices.shape
torch.Size([3, 3])
>>> [m.faces for m in meshes]
[tensor([[0, 1, 2]]), tensor([[0, 1, 2]]), tensor([[0, 1, 2]])]
To load multiple meshes from file, including materials and normals, while homongenizing and triangulating::
>>> from kaolin.io.usd.mesh import import_meshes
>>> from kaolin.io.utils import mesh_handler_naive_triangulate
>>> meshes = import_meshes('sample_data/meshes/amsterdam.usda',
with_normals=True, with_materials=True,
heterogeneous_mesh_handler=mesh_handler_naive_triangulate,
triangulate=True)
>>> len(meshes)
18
>>> print(meshes[0])
SurfaceMesh object with batching strategy NONE
vertices: [4, 3] (torch.float32)[cpu]
face_normals: [2, 3, 3] (torch.float32)[cpu]
uvs: [4, 2] (torch.float32)[cpu]
faces: [2, 3] (torch.int64)[cpu]
face_uvs_idx: [2, 3] (torch.int64)[cpu]
material_assignments: [2] (torch.int16)[cpu]
materials: list of length 1
face_vertices: if possible, computed on access from: (faces, vertices)
vertex_normals: if possible, computed on access from: (faces, face_normals)
face_uvs: if possible, computed on access from: (uvs, face_uvs_idx)
>>> # If needed, concatenate meshes into a batch
>>> from kaolin.rep import SurfaceMesh
>>> mesh = SurfaceMesh.cat(meshes, fixed_topology=False)
>>> print(mesh)
SurfaceMesh object with batching strategy LIST
vertices: [
0: [4, 3] (torch.float32)[cpu]
1: [98, 3] (torch.float32)[cpu]
...
17: [4, 3] (torch.float32)[cpu]
]
face_normals: [
0: [2, 3, 3] (torch.float32)[cpu]
...
"""
triangulate_handler = None if not triangulate else utils.mesh_handler_naive_triangulate
if heterogeneous_mesh_handler is None:
heterogeneous_mesh_handler = triangulate_handler
# TODO add arguments to selectively import UVs and normals
stage = _get_stage_from_maybe_file(file_path_or_stage)
# Remove `instanceable` flags
# USD Scene Instances are an optimization to avoid duplicating mesh data in memory
# Removing the instanceable flag allows for easy retrieval of mesh data
for p in stage.Traverse():
p.SetInstanceable(False)
if scene_paths is None:
scene_paths = get_scene_paths(stage, prim_types=['Mesh'])
if times is None:
times = [Usd.TimeCode.Default()] * len(scene_paths)
results = []
silence_tqdm = len(scene_paths) < 10 # Silence tqdm if fewer than 10 paths are found
for scene_path, time in zip(tqdm(scene_paths, desc="Importing from USD", unit="mesh", disable=silence_tqdm), times):
# Returns (any may be None):
# vertices, faces, face_sizes, face_normals, uvs, face_uvs_idx, materials_dict, material_assignments_dict
mesh_attr = _get_flattened_mesh_attributes(stage, scene_path, with_materials, with_normals, time=time)
vertices = mesh_attr['vertices']
faces = mesh_attr['faces']
face_sizes = mesh_attr['face_sizes']
face_normals = mesh_attr['face_normals']
uvs = mesh_attr['uvs']
face_uvs_idx = mesh_attr['face_uvs_idx']
materials_dict = mesh_attr['materials_dict'] or {}
material_assignments_dict = mesh_attr['material_assignments_dict'] or {}
# Handle attributes that require faces
nfaces = 0
facesize = 0
if faces is not None:
if face_sizes is not None and face_sizes.shape[0] > 0:
facesize = face_sizes[0]
if not torch.all(face_sizes == facesize):
if heterogeneous_mesh_handler is None:
raise utils.NonHomogeneousMeshError(
f'Mesh at {scene_path} is non-homogeneous '
f'and cannot be imported from {repr(file_path_or_stage)}.')
else:
mesh = heterogeneous_mesh_handler(vertices, face_sizes, faces, face_uvs_idx, face_normals,
face_assignments=material_assignments_dict)
if mesh is None:
continue
vertices, face_sizes, faces, face_uvs_idx, face_normals, material_assignments_dict = mesh
facesize = faces.shape[-1]
if triangulate_handler is not None and not torch.all(face_sizes == 3):
mesh = triangulate_handler(vertices, face_sizes, faces, face_uvs_idx, face_normals,
face_assignments=material_assignments_dict)
if mesh is None:
continue
vertices, face_sizes, faces, face_uvs_idx, face_normals, material_assignments_dict = mesh
facesize = 3
faces = faces.view(-1 if len(faces) > 0 else 0, facesize) # Nfaces x facesize
nfaces = faces.shape[0]
# Process face-related attributes, correctly handling absence of face information
if face_uvs_idx is not None and face_uvs_idx.size(0) > 0:
uvs = uvs.reshape(-1, 2)
face_uvs_idx = face_uvs_idx.reshape(-1, max(1, facesize))
else:
uvs = None
face_uvs_idx = None
if face_normals is not None and face_normals.size(0) > 0:
face_normals = face_normals.reshape((nfaces, -1, 3) if nfaces > 0 else (-1, 1, 3))
else:
face_normals = None
materials = None
material_assignments = None
if with_materials and nfaces > 0:
# TODO: add support for custom material error_handler
def _default_error_handler(error, **kwargs):
raise error
materials, material_assignments = usd_materials.process_materials_and_assignments(
materials_dict, material_assignments_dict, _default_error_handler, nfaces,
error_context_str=scene_path)
results.append(SurfaceMesh(
vertices=vertices, faces=faces, uvs=uvs, face_uvs_idx=face_uvs_idx, face_normals=face_normals,
material_assignments=material_assignments, materials=materials,
unset_attributes_return_none=True)) # for greater backward compatibility
return results
def _get_stage_from_maybe_file(file_path_or_stage):
"""Returns a stage from a file or itself if input is a Usd.Stage
Args:
file_path_or_stage (str or Usd.Stage):
Path to usd file (/*.usd, /*.usda) or :class:`Usd.Stage`.
Returns:
(Usd.Stage): The output stage.
"""
if isinstance(file_path_or_stage, Usd.Stage):
return file_path_or_stage
else:
assert os.path.exists(file_path_or_stage)
return Usd.Stage.Open(file_path_or_stage)
The provided code snippet includes necessary dependencies for implementing the `import_mesh` function. Write a Python function `def import_mesh(file_path_or_stage, scene_path=None, with_materials=False, with_normals=False, heterogeneous_mesh_handler=None, time=None, triangulate=False)` to solve the following problem:
r"""Import a single mesh from a USD file of Stage in an unbatched representation. Supports homogeneous meshes (meshes with consistent numbers of vertices per face). All sub-meshes found under the `scene_path` are flattened to a single mesh. The following interpolation types are supported for UV coordinates: `vertex`, `varying` and `faceVarying`. Returns an unbatched attributes as CPU torch tensors in an easy-to-manage :class:`kaolin.rep.SurfaceMesh` container. Args: file_path_or_stage (str, Usd.Stage): Path to usd file (`\*.usd`, `\*.usda`) or :class:`Usd.Stage`. scene_path (str, optional): Scene path within the USD file indicating which primitive to import. If not specified, the all meshes in the scene will be imported and flattened into a single mesh. with_materials (bool): if True, load materials. Default: False. with_normals (bool): if True, load vertex normals. Default: False. heterogeneous_mesh_handler (Callable, optional): function that handles a heterogeneous mesh, homogenizing, returning None or throwing error, with the following signature: ``heterogeneous_mesh_handler(vertices, face_vertex_counts, *args, face_assignments)`` for example, see :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>` and :func:`heterogeneous_mesh_handler_skip <kaolin.io.utils.heterogeneous_mesh_handler_skip>`. Default: will raise a NonHomogeneousMeshError. time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters. triangulate: if True, will triangulate all non-triangular meshes using same logic as :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`. If `heterogeneous_mesh_handler` is not set, this flag will cause non-homogeneous meshes to be triangulated and loaded without error; otherwise triangulation executes after `heterogeneous_mesh_handler`, which may skip or throw an error. Returns: (SurfaceMesh): an unbatched instance of :class:`kaolin.rep.SurfaceMesh`, where: * **normals** and **face_normals_idx** will only be filled if `with_normals=True` * **materials** will be a list of :class:`kaolin.io.materials.Material` sorted by their `material_name`; filled only if `with_materials=True`. * **material_assignments** will be a tensor of shape ``(num_faces,)`` containing the index of the material (in the `materials` list) assigned to the corresponding face, or `-1` if no material was assigned; filled only if `with_materials=True`. .. rubric:: Examples To load a mesh without loading normals or materials:: >>> from kaolin.io.usd.mesh import import_mesh >>> mesh = import_mesh("sample_data/meshes/pizza.usda") >>> print(mesh) SurfaceMesh object with batching strategy NONE vertices: [482, 3] (torch.float32)[cpu] uvs: [2880, 2] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_uvs_idx: [960, 3] (torch.int64)[cpu] face_vertices: if possible, computed on access from: (faces, vertices) face_normals: if possible, computed on access from: (normals, face_normals_idx) or (vertices, faces) vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) >>> mesh.face_normals # Causes face_normals and any attributes required to compute it to be auto-computed >>> mesh.to_batched() # Apply fixed topology batching, unsqueezing most attributes >>> mesh = mesh.cuda(attributes=["vertices"]) # Moves just vertices to GPU >>> print(mesh) SurfaceMesh object with batching strategy FIXED vertices: [1, 482, 3] (torch.float32)[cuda:0] face_vertices: [1, 960, 3, 3] (torch.float32)[cpu] face_normals: [1, 960, 3, 3] (torch.float32)[cpu] uvs: [1, 2880, 2] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_uvs_idx: [1, 960, 3] (torch.int64)[cpu] vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) To load a mesh with normals and materials, while triangulating and homogenizing if needed:: >>> from kaolin.io.usd.mesh import import_mesh >>> from kaolin.io.utils import mesh_handler_naive_triangulate >>> mesh = import_mesh("sample_data/meshes/pizza.usda", with_normals=True, with_materials=True, heterogeneous_mesh_handler=mesh_handler_naive_triangulate, triangulate=True) >>> print(mesh) SurfaceMesh object with batching strategy NONE vertices: [482, 3] (torch.float32)[cpu] face_normals: [960, 3, 3] (torch.float32)[cpu] uvs: [2880, 2] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_uvs_idx: [960, 3] (torch.int64)[cpu] material_assignments: [960] (torch.int16)[cpu] materials: list of length 2 face_vertices: if possible, computed on access from: (faces, vertices) vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx)
Here is the function:
def import_mesh(file_path_or_stage, scene_path=None, with_materials=False, with_normals=False,
heterogeneous_mesh_handler=None, time=None, triangulate=False):
r"""Import a single mesh from a USD file of Stage in an unbatched representation.
Supports homogeneous meshes (meshes with consistent numbers of vertices per face).
All sub-meshes found under the `scene_path` are flattened to a single mesh. The following
interpolation types are supported for UV coordinates: `vertex`, `varying` and `faceVarying`.
Returns an unbatched attributes as CPU torch tensors in an easy-to-manage
:class:`kaolin.rep.SurfaceMesh` container.
Args:
file_path_or_stage (str, Usd.Stage):
Path to usd file (`\*.usd`, `\*.usda`) or :class:`Usd.Stage`.
scene_path (str, optional): Scene path within the USD file indicating which primitive to import.
If not specified, the all meshes in the scene will be imported and flattened into a single mesh.
with_materials (bool): if True, load materials. Default: False.
with_normals (bool): if True, load vertex normals. Default: False.
heterogeneous_mesh_handler (Callable, optional):
function that handles a heterogeneous mesh, homogenizing, returning None or throwing error,
with the following signature:
``heterogeneous_mesh_handler(vertices, face_vertex_counts, *args, face_assignments)``
for example, see :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`
and :func:`heterogeneous_mesh_handler_skip <kaolin.io.utils.heterogeneous_mesh_handler_skip>`.
Default: will raise a NonHomogeneousMeshError.
time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters.
triangulate: if True, will triangulate all non-triangular meshes using same logic as
:func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`.
If `heterogeneous_mesh_handler` is not set, this flag will cause non-homogeneous meshes to
be triangulated and loaded without error; otherwise triangulation executes after `heterogeneous_mesh_handler`,
which may skip or throw an error.
Returns:
(SurfaceMesh):
an unbatched instance of :class:`kaolin.rep.SurfaceMesh`, where:
* **normals** and **face_normals_idx** will only be filled if `with_normals=True`
* **materials** will be a list
of :class:`kaolin.io.materials.Material` sorted by their `material_name`;
filled only if `with_materials=True`.
* **material_assignments** will be a tensor
of shape ``(num_faces,)`` containing the index
of the material (in the `materials` list) assigned to the corresponding face,
or `-1` if no material was assigned; filled only if `with_materials=True`.
.. rubric:: Examples
To load a mesh without loading normals or materials::
>>> from kaolin.io.usd.mesh import import_mesh
>>> mesh = import_mesh("sample_data/meshes/pizza.usda")
>>> print(mesh)
SurfaceMesh object with batching strategy NONE
vertices: [482, 3] (torch.float32)[cpu]
uvs: [2880, 2] (torch.float32)[cpu]
faces: [960, 3] (torch.int64)[cpu]
face_uvs_idx: [960, 3] (torch.int64)[cpu]
face_vertices: if possible, computed on access from: (faces, vertices)
face_normals: if possible, computed on access from: (normals, face_normals_idx) or (vertices, faces)
vertex_normals: if possible, computed on access from: (faces, face_normals)
face_uvs: if possible, computed on access from: (uvs, face_uvs_idx)
>>> mesh.face_normals # Causes face_normals and any attributes required to compute it to be auto-computed
>>> mesh.to_batched() # Apply fixed topology batching, unsqueezing most attributes
>>> mesh = mesh.cuda(attributes=["vertices"]) # Moves just vertices to GPU
>>> print(mesh)
SurfaceMesh object with batching strategy FIXED
vertices: [1, 482, 3] (torch.float32)[cuda:0]
face_vertices: [1, 960, 3, 3] (torch.float32)[cpu]
face_normals: [1, 960, 3, 3] (torch.float32)[cpu]
uvs: [1, 2880, 2] (torch.float32)[cpu]
faces: [960, 3] (torch.int64)[cpu]
face_uvs_idx: [1, 960, 3] (torch.int64)[cpu]
vertex_normals: if possible, computed on access from: (faces, face_normals)
face_uvs: if possible, computed on access from: (uvs, face_uvs_idx)
To load a mesh with normals and materials, while triangulating and homogenizing if needed::
>>> from kaolin.io.usd.mesh import import_mesh
>>> from kaolin.io.utils import mesh_handler_naive_triangulate
>>> mesh = import_mesh("sample_data/meshes/pizza.usda",
with_normals=True, with_materials=True,
heterogeneous_mesh_handler=mesh_handler_naive_triangulate,
triangulate=True)
>>> print(mesh)
SurfaceMesh object with batching strategy NONE
vertices: [482, 3] (torch.float32)[cpu]
face_normals: [960, 3, 3] (torch.float32)[cpu]
uvs: [2880, 2] (torch.float32)[cpu]
faces: [960, 3] (torch.int64)[cpu]
face_uvs_idx: [960, 3] (torch.int64)[cpu]
material_assignments: [960] (torch.int16)[cpu]
materials: list of length 2
face_vertices: if possible, computed on access from: (faces, vertices)
vertex_normals: if possible, computed on access from: (faces, face_normals)
face_uvs: if possible, computed on access from: (uvs, face_uvs_idx)
"""
# TODO add arguments to selectively import UVs
stage = _get_stage_from_maybe_file(file_path_or_stage)
if scene_path is None:
scene_path = stage.GetPseudoRoot().GetPath()
if time is None:
time = Usd.TimeCode.Default()
meshes_list = import_meshes(stage, [scene_path],
heterogeneous_mesh_handler=heterogeneous_mesh_handler,
with_materials=with_materials,
with_normals=with_normals, times=[time], triangulate=triangulate)
return meshes_list[0] | r"""Import a single mesh from a USD file of Stage in an unbatched representation. Supports homogeneous meshes (meshes with consistent numbers of vertices per face). All sub-meshes found under the `scene_path` are flattened to a single mesh. The following interpolation types are supported for UV coordinates: `vertex`, `varying` and `faceVarying`. Returns an unbatched attributes as CPU torch tensors in an easy-to-manage :class:`kaolin.rep.SurfaceMesh` container. Args: file_path_or_stage (str, Usd.Stage): Path to usd file (`\*.usd`, `\*.usda`) or :class:`Usd.Stage`. scene_path (str, optional): Scene path within the USD file indicating which primitive to import. If not specified, the all meshes in the scene will be imported and flattened into a single mesh. with_materials (bool): if True, load materials. Default: False. with_normals (bool): if True, load vertex normals. Default: False. heterogeneous_mesh_handler (Callable, optional): function that handles a heterogeneous mesh, homogenizing, returning None or throwing error, with the following signature: ``heterogeneous_mesh_handler(vertices, face_vertex_counts, *args, face_assignments)`` for example, see :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>` and :func:`heterogeneous_mesh_handler_skip <kaolin.io.utils.heterogeneous_mesh_handler_skip>`. Default: will raise a NonHomogeneousMeshError. time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters. triangulate: if True, will triangulate all non-triangular meshes using same logic as :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`. If `heterogeneous_mesh_handler` is not set, this flag will cause non-homogeneous meshes to be triangulated and loaded without error; otherwise triangulation executes after `heterogeneous_mesh_handler`, which may skip or throw an error. Returns: (SurfaceMesh): an unbatched instance of :class:`kaolin.rep.SurfaceMesh`, where: * **normals** and **face_normals_idx** will only be filled if `with_normals=True` * **materials** will be a list of :class:`kaolin.io.materials.Material` sorted by their `material_name`; filled only if `with_materials=True`. * **material_assignments** will be a tensor of shape ``(num_faces,)`` containing the index of the material (in the `materials` list) assigned to the corresponding face, or `-1` if no material was assigned; filled only if `with_materials=True`. .. rubric:: Examples To load a mesh without loading normals or materials:: >>> from kaolin.io.usd.mesh import import_mesh >>> mesh = import_mesh("sample_data/meshes/pizza.usda") >>> print(mesh) SurfaceMesh object with batching strategy NONE vertices: [482, 3] (torch.float32)[cpu] uvs: [2880, 2] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_uvs_idx: [960, 3] (torch.int64)[cpu] face_vertices: if possible, computed on access from: (faces, vertices) face_normals: if possible, computed on access from: (normals, face_normals_idx) or (vertices, faces) vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) >>> mesh.face_normals # Causes face_normals and any attributes required to compute it to be auto-computed >>> mesh.to_batched() # Apply fixed topology batching, unsqueezing most attributes >>> mesh = mesh.cuda(attributes=["vertices"]) # Moves just vertices to GPU >>> print(mesh) SurfaceMesh object with batching strategy FIXED vertices: [1, 482, 3] (torch.float32)[cuda:0] face_vertices: [1, 960, 3, 3] (torch.float32)[cpu] face_normals: [1, 960, 3, 3] (torch.float32)[cpu] uvs: [1, 2880, 2] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_uvs_idx: [1, 960, 3] (torch.int64)[cpu] vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) To load a mesh with normals and materials, while triangulating and homogenizing if needed:: >>> from kaolin.io.usd.mesh import import_mesh >>> from kaolin.io.utils import mesh_handler_naive_triangulate >>> mesh = import_mesh("sample_data/meshes/pizza.usda", with_normals=True, with_materials=True, heterogeneous_mesh_handler=mesh_handler_naive_triangulate, triangulate=True) >>> print(mesh) SurfaceMesh object with batching strategy NONE vertices: [482, 3] (torch.float32)[cpu] face_normals: [960, 3, 3] (torch.float32)[cpu] uvs: [2880, 2] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_uvs_idx: [960, 3] (torch.int64)[cpu] material_assignments: [960] (torch.int16)[cpu] materials: list of length 2 face_vertices: if possible, computed on access from: (faces, vertices) vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) |
4,724 | import os
import warnings
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import torch
from kaolin.io import materials as usd_materials
from kaolin.io import utils
from kaolin.rep import SurfaceMesh
from .utils import _get_stage_from_maybe_file, get_scene_paths, create_stage
def add_mesh(stage, scene_path, vertices=None, faces=None, uvs=None, face_uvs_idx=None, face_normals=None,
material_assignments=None, materials=None, time=None):
r"""Add a mesh to an existing USD stage. The stage is modified but not saved to disk.
Args:
stage (Usd.Stage): Stage onto which to add the mesh.
scene_path (str): Absolute path of mesh within the USD file scene. Must be a valid ``Sdf.Path``.
vertices (torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``.
faces (torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``.
Mesh must be homogenous (consistent number of vertices per face).
uvs (torch.FloatTensor, optional): of shape ``(num_uvs, 2)``.
face_uvs_idx (torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, ``uvs`` must also
be specified.
face_normals (torch.Tensor, optional): of shape ``(num_faces, face_size, 3)``.
materials (list of Material): list of material objects
material_assignments (torch.ShortTensor): of shape ``(num_faces,)`` containing index of the
material (in the above list) assigned to the corresponding face, or `-1` if no material was assigned
time (convertible to float, optional): Positive integer defining the time at which the supplied parameters
correspond to.
Returns:
(Usd.Stage)
Example:
>>> vertices = torch.rand(3, 3)
>>> faces = torch.tensor([[0, 1, 2]])
>>> stage = create_stage('./new_stage.usd')
>>> mesh = add_mesh(stage, '/World/mesh', vertices, faces)
>>> stage.Save()
"""
if time is None:
time = Usd.TimeCode.Default()
usd_mesh = UsdGeom.Mesh.Define(stage, scene_path)
if faces is not None:
num_faces = faces.size(0)
face_vertex_counts = [faces.size(1)] * num_faces
faces_list = faces.view(-1).cpu().long().numpy()
usd_mesh.GetFaceVertexCountsAttr().Set(face_vertex_counts, time=time)
usd_mesh.GetFaceVertexIndicesAttr().Set(faces_list, time=time)
if vertices is not None:
vertices_list = vertices.detach().cpu().float().numpy()
usd_mesh.GetPointsAttr().Set(Vt.Vec3fArray.FromNumpy(vertices_list), time=time)
if uvs is not None:
uvs_list = uvs.view(-1, 2).detach().cpu().float().numpy()
pv = UsdGeom.PrimvarsAPI(usd_mesh.GetPrim()).CreatePrimvar(
'st', Sdf.ValueTypeNames.Float2Array)
pv.Set(uvs_list, time=time)
if vertices is not None and uvs.size(0) == vertices.size(0):
pv.SetInterpolation('vertex')
elif faces is not None and uvs.view(-1, 2).size(0) == faces.size(0):
pv.SetInterpolation('uniform')
else:
pv.SetInterpolation('faceVarying')
if face_uvs_idx is not None:
if uvs is not None:
pv.SetIndices(Vt.IntArray.FromNumpy(face_uvs_idx.view(-1).cpu().long().numpy()), time=time)
else:
warnings.warn('If providing "face_uvs_idx", "uvs" must also be provided.')
if face_normals is not None:
# Note: normals are stored as (num_faces * face_sizes) x 3 array
face_normals = face_normals.view(-1, 3).cpu().float().numpy()
usd_mesh.GetNormalsAttr().Set(face_normals, time=time)
UsdGeom.PointBased(usd_mesh).SetNormalsInterpolation('faceVarying')
if faces is not None and material_assignments is not None and materials is not None:
stage.DefinePrim(f'{scene_path}/Looks', 'Scope')
# Create submeshes
for i, material in enumerate(materials):
# Note: without int(x) for ... fails in Set with type mismatch
face_idx = [int(x) for x in list((material_assignments == i).nonzero().squeeze().numpy())]
subset_prim = stage.DefinePrim(f'{scene_path}/subset_{i}', 'GeomSubset')
subset_prim.GetAttribute('indices').Set(face_idx)
if isinstance(material, usd_materials.Material):
# TODO: should be write_to_usd
material._write_usd_preview_surface(stage, f'{scene_path}/Looks/material_{i}',
[subset_prim], time, texture_dir=f'material_{i}',
texture_file_prefix='') # TODO allow users to pass root path to save textures to
return usd_mesh.GetPrim()
def create_stage(file_path, up_axis='Y'):
r"""Create a new USD file and return an empty stage.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
up_axis (['Y', 'Z']): Specify the stage up axis. Choose from ``['Y', 'Z']``.
Returns:
(Usd.Stage)
Example:
>>> stage = create_stage('./new_stage.usd', up_axis='Z')
>>> type(stage)
<class 'pxr.Usd.Stage'>
"""
assert os.path.exists(os.path.dirname(file_path)), f'Directory {os.path.dirname(file_path)} not found.'
stage = Usd.Stage.CreateNew(str(file_path))
world = stage.DefinePrim('/World', 'Xform')
stage.SetDefaultPrim(world)
UsdGeom.SetStageUpAxis(stage, up_axis)
return stage
The provided code snippet includes necessary dependencies for implementing the `export_mesh` function. Write a Python function `def export_mesh(file_path, scene_path='/World/Meshes/mesh_0', vertices=None, faces=None, uvs=None, face_uvs_idx=None, face_normals=None, material_assignments=None, materials=None, up_axis='Y', time=None)` to solve the following problem:
r"""Export a single mesh to USD and save the stage to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). scene_path (str, optional): Absolute path of mesh within the USD file scene. Must be a valid ``Sdf.Path``. If no path is provided, a default path is used. vertices (torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``. faces (torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``. Mesh must be homogenous (consistent number of vertices per face). uvs (torch.FloatTensor, optional): of shape ``(num_uvs, 2)``. face_uvs_idx (torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, `uvs` must also be specified. face_normals (torch.Tensor, optional): of shape ``(num_vertices, num_faces, 3)``. materials (list of Material): list of material objects material_assignments (torch.ShortTensor): of shape ``(num_faces,)`` containing index of the material (in the above list) assigned to the corresponding face, or `-1` if no material was assigned up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']`` time (convertible to float, optional): Positive integer defining the time at which the supplied parameters correspond to. Returns: (Usd.Stage) Example: >>> vertices = torch.rand(3, 3) >>> faces = torch.tensor([[0, 1, 2]]) >>> stage = export_mesh('./new_stage.usd', vertices=vertices, faces=faces)
Here is the function:
def export_mesh(file_path, scene_path='/World/Meshes/mesh_0', vertices=None, faces=None,
uvs=None, face_uvs_idx=None, face_normals=None, material_assignments=None, materials=None,
up_axis='Y', time=None):
r"""Export a single mesh to USD and save the stage to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
scene_path (str, optional):
Absolute path of mesh within the USD file scene. Must be a valid ``Sdf.Path``.
If no path is provided, a default path is used.
vertices (torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``.
faces (torch.LongTensor, optional):
Vertex indices for each face with shape ``(num_faces, face_size)``.
Mesh must be homogenous (consistent number of vertices per face).
uvs (torch.FloatTensor, optional): of shape ``(num_uvs, 2)``.
face_uvs_idx (torch.LongTensor, optional):
of shape ``(num_faces, face_size)``.
If provided, `uvs` must also be specified.
face_normals (torch.Tensor, optional): of shape ``(num_vertices, num_faces, 3)``.
materials (list of Material): list of material objects
material_assignments (torch.ShortTensor): of shape ``(num_faces,)`` containing index of the
material (in the above list) assigned to the corresponding face, or `-1` if no material was assigned
up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']``
time (convertible to float, optional):
Positive integer defining the time at which the supplied parameters correspond to.
Returns:
(Usd.Stage)
Example:
>>> vertices = torch.rand(3, 3)
>>> faces = torch.tensor([[0, 1, 2]])
>>> stage = export_mesh('./new_stage.usd', vertices=vertices, faces=faces)
"""
assert isinstance(scene_path, str)
if time is None:
time = Usd.TimeCode.Default()
if os.path.exists(file_path):
stage = Usd.Stage.Open(file_path)
UsdGeom.SetStageUpAxis(stage, up_axis)
else:
stage = create_stage(file_path, up_axis)
add_mesh(stage, scene_path, vertices, faces, uvs, face_uvs_idx,
face_normals, material_assignments, materials, time=time)
stage.Save()
return stage | r"""Export a single mesh to USD and save the stage to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). scene_path (str, optional): Absolute path of mesh within the USD file scene. Must be a valid ``Sdf.Path``. If no path is provided, a default path is used. vertices (torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``. faces (torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``. Mesh must be homogenous (consistent number of vertices per face). uvs (torch.FloatTensor, optional): of shape ``(num_uvs, 2)``. face_uvs_idx (torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, `uvs` must also be specified. face_normals (torch.Tensor, optional): of shape ``(num_vertices, num_faces, 3)``. materials (list of Material): list of material objects material_assignments (torch.ShortTensor): of shape ``(num_faces,)`` containing index of the material (in the above list) assigned to the corresponding face, or `-1` if no material was assigned up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']`` time (convertible to float, optional): Positive integer defining the time at which the supplied parameters correspond to. Returns: (Usd.Stage) Example: >>> vertices = torch.rand(3, 3) >>> faces = torch.tensor([[0, 1, 2]]) >>> stage = export_mesh('./new_stage.usd', vertices=vertices, faces=faces) |
4,725 | import os
import warnings
from collections import namedtuple
import numpy as np
from tqdm import tqdm
import torch
from kaolin.io import materials as usd_materials
from kaolin.io import utils
from kaolin.rep import SurfaceMesh
from .utils import _get_stage_from_maybe_file, get_scene_paths, create_stage
def add_mesh(stage, scene_path, vertices=None, faces=None, uvs=None, face_uvs_idx=None, face_normals=None,
material_assignments=None, materials=None, time=None):
r"""Add a mesh to an existing USD stage. The stage is modified but not saved to disk.
Args:
stage (Usd.Stage): Stage onto which to add the mesh.
scene_path (str): Absolute path of mesh within the USD file scene. Must be a valid ``Sdf.Path``.
vertices (torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``.
faces (torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``.
Mesh must be homogenous (consistent number of vertices per face).
uvs (torch.FloatTensor, optional): of shape ``(num_uvs, 2)``.
face_uvs_idx (torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, ``uvs`` must also
be specified.
face_normals (torch.Tensor, optional): of shape ``(num_faces, face_size, 3)``.
materials (list of Material): list of material objects
material_assignments (torch.ShortTensor): of shape ``(num_faces,)`` containing index of the
material (in the above list) assigned to the corresponding face, or `-1` if no material was assigned
time (convertible to float, optional): Positive integer defining the time at which the supplied parameters
correspond to.
Returns:
(Usd.Stage)
Example:
>>> vertices = torch.rand(3, 3)
>>> faces = torch.tensor([[0, 1, 2]])
>>> stage = create_stage('./new_stage.usd')
>>> mesh = add_mesh(stage, '/World/mesh', vertices, faces)
>>> stage.Save()
"""
if time is None:
time = Usd.TimeCode.Default()
usd_mesh = UsdGeom.Mesh.Define(stage, scene_path)
if faces is not None:
num_faces = faces.size(0)
face_vertex_counts = [faces.size(1)] * num_faces
faces_list = faces.view(-1).cpu().long().numpy()
usd_mesh.GetFaceVertexCountsAttr().Set(face_vertex_counts, time=time)
usd_mesh.GetFaceVertexIndicesAttr().Set(faces_list, time=time)
if vertices is not None:
vertices_list = vertices.detach().cpu().float().numpy()
usd_mesh.GetPointsAttr().Set(Vt.Vec3fArray.FromNumpy(vertices_list), time=time)
if uvs is not None:
uvs_list = uvs.view(-1, 2).detach().cpu().float().numpy()
pv = UsdGeom.PrimvarsAPI(usd_mesh.GetPrim()).CreatePrimvar(
'st', Sdf.ValueTypeNames.Float2Array)
pv.Set(uvs_list, time=time)
if vertices is not None and uvs.size(0) == vertices.size(0):
pv.SetInterpolation('vertex')
elif faces is not None and uvs.view(-1, 2).size(0) == faces.size(0):
pv.SetInterpolation('uniform')
else:
pv.SetInterpolation('faceVarying')
if face_uvs_idx is not None:
if uvs is not None:
pv.SetIndices(Vt.IntArray.FromNumpy(face_uvs_idx.view(-1).cpu().long().numpy()), time=time)
else:
warnings.warn('If providing "face_uvs_idx", "uvs" must also be provided.')
if face_normals is not None:
# Note: normals are stored as (num_faces * face_sizes) x 3 array
face_normals = face_normals.view(-1, 3).cpu().float().numpy()
usd_mesh.GetNormalsAttr().Set(face_normals, time=time)
UsdGeom.PointBased(usd_mesh).SetNormalsInterpolation('faceVarying')
if faces is not None and material_assignments is not None and materials is not None:
stage.DefinePrim(f'{scene_path}/Looks', 'Scope')
# Create submeshes
for i, material in enumerate(materials):
# Note: without int(x) for ... fails in Set with type mismatch
face_idx = [int(x) for x in list((material_assignments == i).nonzero().squeeze().numpy())]
subset_prim = stage.DefinePrim(f'{scene_path}/subset_{i}', 'GeomSubset')
subset_prim.GetAttribute('indices').Set(face_idx)
if isinstance(material, usd_materials.Material):
# TODO: should be write_to_usd
material._write_usd_preview_surface(stage, f'{scene_path}/Looks/material_{i}',
[subset_prim], time, texture_dir=f'material_{i}',
texture_file_prefix='') # TODO allow users to pass root path to save textures to
return usd_mesh.GetPrim()
def create_stage(file_path, up_axis='Y'):
r"""Create a new USD file and return an empty stage.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
up_axis (['Y', 'Z']): Specify the stage up axis. Choose from ``['Y', 'Z']``.
Returns:
(Usd.Stage)
Example:
>>> stage = create_stage('./new_stage.usd', up_axis='Z')
>>> type(stage)
<class 'pxr.Usd.Stage'>
"""
assert os.path.exists(os.path.dirname(file_path)), f'Directory {os.path.dirname(file_path)} not found.'
stage = Usd.Stage.CreateNew(str(file_path))
world = stage.DefinePrim('/World', 'Xform')
stage.SetDefaultPrim(world)
UsdGeom.SetStageUpAxis(stage, up_axis)
return stage
The provided code snippet includes necessary dependencies for implementing the `export_meshes` function. Write a Python function `def export_meshes(file_path, scene_paths=None, vertices=None, faces=None, uvs=None, face_uvs_idx=None, face_normals=None, material_assignments=None, materials=None, up_axis='Y', times=None)` to solve the following problem:
r"""Export multiple meshes to a new USD stage. Export multiple meshes defined by lists vertices and faces and save the stage to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). scene_paths (list of str, optional): Absolute paths of meshes within the USD file scene. Must have the same number ofpaths as the number of meshes ``N``. Must be a valid Sdf.Path. If no path is provided, a default path is used. vertices (list of torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``. faces (list of torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``. Mesh must be homogenous (consistent number of vertices per face). uvs (list of torch.FloatTensor, optional): of shape ``(num_uvs, 2)``. face_uvs_idx (list of torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, `uvs` must also be specified. face_normals (list of torch.Tensor, optional): of shape ``(num_faces, face_size, 3)``. materials (list of list of Material): list of material objects material_assignments (list of torch.ShortTensor): of shape `(\text{num_faces},)` containing index of the material (in the above list) assigned to the corresponding face, or `-1` if no material was assigned up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']``. times (list of int, optional): Positive integers defining the time at which the supplied parameters correspond to. Returns: (Usd.Stage) Example: >>> vertices_list = [torch.rand(3, 3) for _ in range(3)] >>> faces_list = [torch.tensor([[0, 1, 2]]) for _ in range(3)] >>> stage = export_meshes('./new_stage.usd', vertices=vertices_list, faces=faces_list)
Here is the function:
def export_meshes(file_path, scene_paths=None, vertices=None, faces=None,
uvs=None, face_uvs_idx=None, face_normals=None, material_assignments=None, materials=None,
up_axis='Y', times=None):
r"""Export multiple meshes to a new USD stage.
Export multiple meshes defined by lists vertices and faces and save the stage to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
scene_paths (list of str, optional): Absolute paths of meshes within the USD file scene. Must have the same
number ofpaths as the number of meshes ``N``. Must be a valid Sdf.Path. If no path is provided, a default
path is used.
vertices (list of torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``.
faces (list of torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``.
Mesh must be homogenous (consistent number of vertices per face).
uvs (list of torch.FloatTensor, optional): of shape ``(num_uvs, 2)``.
face_uvs_idx (list of torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, `uvs`
must also be specified.
face_normals (list of torch.Tensor, optional): of shape ``(num_faces, face_size, 3)``.
materials (list of list of Material): list of material objects
material_assignments (list of torch.ShortTensor): of shape `(\text{num_faces},)` containing index of the
material (in the above list) assigned to the corresponding face, or `-1` if no material was assigned
up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']``.
times (list of int, optional): Positive integers defining the time at which the supplied parameters
correspond to.
Returns:
(Usd.Stage)
Example:
>>> vertices_list = [torch.rand(3, 3) for _ in range(3)]
>>> faces_list = [torch.tensor([[0, 1, 2]]) for _ in range(3)]
>>> stage = export_meshes('./new_stage.usd', vertices=vertices_list, faces=faces_list)
"""
stage = create_stage(file_path, up_axis)
num_meshes = -1
# TODO: might want to consider sharing materials
for param in [scene_paths, vertices, faces, uvs, face_uvs_idx,
face_normals, material_assignments, materials, times]:
if param is not None:
if not type(param) == list:
raise TypeError(f'Unexpected type {type(param)} input to export_meshes (list expected)')
if num_meshes == -1:
num_meshes = len(param)
else:
assert len(param) == num_meshes, f'All list inputs to export_meshes must have same length'
if scene_paths is None:
if not stage.GetPrimAtPath('/World/Meshes'):
stage.DefinePrim('/World/Meshes', 'Xform')
scene_paths = [f'/World/Meshes/mesh_{i}' for i in range(len(vertices))]
if times is None:
times = [Usd.TimeCode.Default()] * len(scene_paths)
for i, scene_path in enumerate(tqdm(scene_paths, desc="Exporting to USD", unit="mesh")):
# Note: we make parameters explicit to ensure tests catch any API changes reliably
add_mesh(stage, scene_path,
vertices=None if vertices is None else vertices[i],
faces=None if faces is None else faces[i],
uvs=None if uvs is None else uvs[i],
face_uvs_idx=None if face_uvs_idx is None else face_uvs_idx[i],
face_normals=None if face_normals is None else face_normals[i],
material_assignments=None if material_assignments is None else material_assignments[i],
materials=None if materials is None else materials[i],
time=times[i])
stage.Save()
return stage | r"""Export multiple meshes to a new USD stage. Export multiple meshes defined by lists vertices and faces and save the stage to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). scene_paths (list of str, optional): Absolute paths of meshes within the USD file scene. Must have the same number ofpaths as the number of meshes ``N``. Must be a valid Sdf.Path. If no path is provided, a default path is used. vertices (list of torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``. faces (list of torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``. Mesh must be homogenous (consistent number of vertices per face). uvs (list of torch.FloatTensor, optional): of shape ``(num_uvs, 2)``. face_uvs_idx (list of torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, `uvs` must also be specified. face_normals (list of torch.Tensor, optional): of shape ``(num_faces, face_size, 3)``. materials (list of list of Material): list of material objects material_assignments (list of torch.ShortTensor): of shape `(\text{num_faces},)` containing index of the material (in the above list) assigned to the corresponding face, or `-1` if no material was assigned up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']``. times (list of int, optional): Positive integers defining the time at which the supplied parameters correspond to. Returns: (Usd.Stage) Example: >>> vertices_list = [torch.rand(3, 3) for _ in range(3)] >>> faces_list = [torch.tensor([[0, 1, 2]]) for _ in range(3)] >>> stage = export_meshes('./new_stage.usd', vertices=vertices_list, faces=faces_list) |
4,726 | from collections import namedtuple
import numpy as np
import torch
from .utils import _get_stage_from_maybe_file, get_scene_paths, create_stage
The provided code snippet includes necessary dependencies for implementing the `get_pointcloud_bracketing_time_samples` function. Write a Python function `def get_pointcloud_bracketing_time_samples(stage, scene_path, target_time)` to solve the following problem:
Returns two time samples that bracket ``target_time`` for point cloud attributes at a specified scene_path. Args: stage (Usd.Stage) scene_path (str) target_time (Number) Returns: (iterable of 2 numbers)
Here is the function:
def get_pointcloud_bracketing_time_samples(stage, scene_path, target_time):
"""Returns two time samples that bracket ``target_time`` for point cloud
attributes at a specified scene_path.
Args:
stage (Usd.Stage)
scene_path (str)
target_time (Number)
Returns:
(iterable of 2 numbers)
"""
# Note: can also get usd_attr.GetTimeSamples()
prim = stage.GetPrimAtPath(scene_path)
if UsdGeom.Points(prim):
geom_points = UsdGeom.Points(prim)
result = geom_points.GetPointsAttr().GetBracketingTimeSamples(target_time)
elif UsdGeom.PointInstancer(prim):
instancer = UsdGeom.PointInstancer(prim)
result = instancer.GetPositionsAttr().GetBracketingTimeSamples(target_time)
else:
raise TypeError('The prim is neither UsdGeomPoints nor UsdGeomPointInstancer.')
return result | Returns two time samples that bracket ``target_time`` for point cloud attributes at a specified scene_path. Args: stage (Usd.Stage) scene_path (str) target_time (Number) Returns: (iterable of 2 numbers) |
4,727 | from collections import namedtuple
import numpy as np
import torch
from .utils import _get_stage_from_maybe_file, get_scene_paths, create_stage
pointcloud_return_type = namedtuple('pointcloud_return_type', ['points', 'colors', 'normals'])
def import_pointclouds(file_path_or_stage, scene_paths=None, times=None):
r"""Import one or more pointclouds from a USD file or stage.
Assumes that pointclouds are interpreted using point instancers or UsdGeomPoints. Converts the coordinates
of each point instance to a point within the output pointcloud.
Args:
file_path_or_stage (str or Usd.Stage):
Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`.
scene_paths (list of str, optional): Scene path(s) within the USD file indicating which primitive(s)
to import. If None, will return all pointclouds found based on PointInstancer or UsdGeomPoints prims with
`kaolin_type` primvar set to `PointCloud`.
times (list of int): Positive integers indicating the time at which to retrieve parameters.
Returns:
list of namedtuple of:
- **points** (list of torch.FloatTensor): of shape (num_points, 3)
- **colors** (list of torch.FloatTensor): of shape (num_points, 3)
- **normals** (list of torch.FloatTensor): of shape (num_points, 2) (not yet implemented)
Example:
>>> points = torch.rand(100, 3)
>>> stage = export_pointclouds('./new_stage.usd', [points, points, points])
>>> pointclouds = import_pointclouds(file_path='./new_stage.usd')[0]
>>> len(pointclouds)
3
>>> pointclouds[0].shape
torch.Size([100, 3])
"""
stage = _get_stage_from_maybe_file(file_path_or_stage)
if scene_paths is None:
scene_paths = get_pointcloud_scene_paths(stage)
if times is None:
times = [Usd.TimeCode.Default()] * len(scene_paths)
pointclouds = []
colors = []
normals = []
for scene_path, time in zip(scene_paths, times):
prim = stage.GetPrimAtPath(scene_path)
assert prim, f'The prim at {scene_path} does not exist.'
if UsdGeom.Points(prim):
geom_points = UsdGeom.Points(prim)
pointclouds.append(torch.tensor(geom_points.GetPointsAttr().Get(time=time)))
color = geom_points.GetDisplayColorAttr().Get(time=time)
if color is None:
colors.append(color)
else:
colors.append(torch.tensor(color))
elif UsdGeom.PointInstancer(prim):
instancer = UsdGeom.PointInstancer(prim)
pointclouds.append(torch.tensor(instancer.GetPositionsAttr().Get(time=time)))
colors.append(None)
else:
raise TypeError('The prim is neither UsdGeomPoints nor UsdGeomPointInstancer.')
# TODO: place holders for normals for now
normals = [None] * len(colors)
params = [pointclouds, colors, normals]
return [pointcloud_return_type(p, c, n) for p, c, n in zip(*params)]
The provided code snippet includes necessary dependencies for implementing the `import_pointcloud` function. Write a Python function `def import_pointcloud(file_path_or_stage, scene_path, time=None)` to solve the following problem:
r"""Import a single pointcloud from a USD file or stage. Assumes that the USD pointcloud is interpreted using a point instancer or UsdGeomPoints. Converts the coordinates of each point instance to a point within the output pointcloud. Args: file_path_or_stage (str or Usd.Stage): Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`. scene_path (str): Scene path within the USD file indicating which primitive to import. time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters. Returns: namedtuple of: - **points** (torch.FloatTensor): of shape (num_points, 3) - **colors** (torch.FloatTensor): of shape (num_points, 3) - **normals** (torch.FloatTensor): of shape (num_points, 3) (not yet implemented) Example: >>> points = torch.rand(100, 3) >>> stage = export_pointcloud('./new_stage.usd', points, scene_path='/World/pointcloud') >>> points_imp = import_pointcloud(file_path='./new_stage.usd', ... scene_path='/World/pointcloud')[0] >>> points_imp.shape torch.Size([100, 3])
Here is the function:
def import_pointcloud(file_path_or_stage, scene_path, time=None):
r"""Import a single pointcloud from a USD file or stage.
Assumes that the USD pointcloud is interpreted using a point instancer or UsdGeomPoints. Converts the coordinates
of each point instance to a point within the output pointcloud.
Args:
file_path_or_stage (str or Usd.Stage):
Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`.
scene_path (str): Scene path within the USD file indicating which primitive to import.
time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters.
Returns:
namedtuple of:
- **points** (torch.FloatTensor): of shape (num_points, 3)
- **colors** (torch.FloatTensor): of shape (num_points, 3)
- **normals** (torch.FloatTensor): of shape (num_points, 3) (not yet implemented)
Example:
>>> points = torch.rand(100, 3)
>>> stage = export_pointcloud('./new_stage.usd', points, scene_path='/World/pointcloud')
>>> points_imp = import_pointcloud(file_path='./new_stage.usd',
... scene_path='/World/pointcloud')[0]
>>> points_imp.shape
torch.Size([100, 3])
"""
if time is None:
time = Usd.TimeCode.Default()
pointcloud_list = import_pointclouds(file_path_or_stage, [scene_path], times=[time])
return pointcloud_return_type(*pointcloud_list[0]) | r"""Import a single pointcloud from a USD file or stage. Assumes that the USD pointcloud is interpreted using a point instancer or UsdGeomPoints. Converts the coordinates of each point instance to a point within the output pointcloud. Args: file_path_or_stage (str or Usd.Stage): Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`. scene_path (str): Scene path within the USD file indicating which primitive to import. time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters. Returns: namedtuple of: - **points** (torch.FloatTensor): of shape (num_points, 3) - **colors** (torch.FloatTensor): of shape (num_points, 3) - **normals** (torch.FloatTensor): of shape (num_points, 3) (not yet implemented) Example: >>> points = torch.rand(100, 3) >>> stage = export_pointcloud('./new_stage.usd', points, scene_path='/World/pointcloud') >>> points_imp = import_pointcloud(file_path='./new_stage.usd', ... scene_path='/World/pointcloud')[0] >>> points_imp.shape torch.Size([100, 3]) |
4,728 | from collections import namedtuple
import numpy as np
import torch
from .utils import _get_stage_from_maybe_file, get_scene_paths, create_stage
def export_pointclouds(file_path, pointclouds, scene_paths=None, colors=None, times=None,
points_type='point_instancer'):
r"""Export one or more pointclouds to a USD scene.
Export one or more pointclouds to USD. The pointclouds will be added to the USD stage and represented
by point instances of a sphere centered at each point coordinate. The stage is then saved to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
pointclouds (list of torch.FloatTensor): List of pointcloud tensors of length ``N`` defining N pointclouds.
scene_paths (list of str, optional): Absolute path(s) of pointcloud(s) within the USD file scene.
Must be a valid Sdf.Path. If no path is provided, a default path is used.
times (list of int): Positive integers defining the time at which the supplied parameters correspond to.
colors (list of tensors, optional): Lits of RGB colors of length ``N``, each corresponding to a pointcloud
in the pointcloud list. colors only works if points_type is 'usd_geom_points'.
points_type (str): String that indicates whether to save pointcloud as UsdGeomPoints or PointInstancer.
'usd_geom_points' indicates UsdGeomPoints and 'point_instancer' indicates PointInstancer.
Please refer here for UsdGeomPoints:
https://graphics.pixar.com/usd/docs/api/class_usd_geom_points.html and here for PointInstancer
https://graphics.pixar.com/usd/docs/api/class_usd_geom_point_instancer.html. Default: 'point_instancer'.
Returns:
(Usd.Stage)
Example:
>>> points = torch.rand(100, 3)
>>> stage = export_pointcloud('./new_stage.usd', points)
"""
if scene_paths is None:
scene_paths = [f'/World/PointClouds/pointcloud_{i}' for i in range(len(pointclouds))]
if times is None:
times = [Usd.TimeCode.Default()] * len(scene_paths)
if colors is None:
colors = [None] * len(scene_paths)
assert len(pointclouds) == len(scene_paths)
stage = create_stage(file_path)
for scene_path, points, color, time in zip(scene_paths, pointclouds, colors, times):
add_pointcloud(stage, points, scene_path, color, time=time, points_type=points_type)
stage.Save()
return stage
The provided code snippet includes necessary dependencies for implementing the `export_pointcloud` function. Write a Python function `def export_pointcloud(file_path, pointcloud, scene_path='/World/PointClouds/pointcloud_0', color=None, time=None, points_type='point_instancer')` to solve the following problem:
r"""Export a single pointcloud to a USD scene. Export a single pointclouds to USD. The pointcloud will be added to the USD stage and represented by point instances of a sphere centered at each point coordinate. The stage is then saved to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). pointcloud (torch.FloatTensor): Pointcloud tensor containing ``N`` points of shape ``(N, 3)``. scene_path (str, optional): Absolute path of pointcloud within the USD file scene. Must be a valid Sdf.Path. If no path is provided, a default path is used. color (torch.FloatTensor, optional): Color tensor corresponding each point in the pointcloud tensor of shape ``(N, 3)``. colors only works if points_type is 'usd_geom_points'. time (convertible to float): Positive integer defining the time at which the supplied parameters correspond to. points_type (str): String that indicates whether to save pointcloud as UsdGeomPoints or PointInstancer. 'usd_geom_points' indicates UsdGeomPoints and 'point_instancer' indicates PointInstancer. Please refer here for UsdGeomPoints: https://graphics.pixar.com/usd/docs/api/class_usd_geom_points.html and here for PointInstancer https://graphics.pixar.com/usd/docs/api/class_usd_geom_point_instancer.html. Default: 'point_instancer'. Returns: (Usd.Stage) Example: >>> points = torch.rand(100, 3) >>> stage = export_pointcloud('./new_stage.usd', points)
Here is the function:
def export_pointcloud(file_path, pointcloud, scene_path='/World/PointClouds/pointcloud_0',
color=None, time=None, points_type='point_instancer'):
r"""Export a single pointcloud to a USD scene.
Export a single pointclouds to USD. The pointcloud will be added to the USD stage and represented
by point instances of a sphere centered at each point coordinate. The stage is then saved to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
pointcloud (torch.FloatTensor): Pointcloud tensor containing ``N`` points of shape ``(N, 3)``.
scene_path (str, optional): Absolute path of pointcloud within the USD file scene. Must be a valid Sdf.Path.
If no path is provided, a default path is used.
color (torch.FloatTensor, optional): Color tensor corresponding each point in the pointcloud
tensor of shape ``(N, 3)``. colors only works if points_type is 'usd_geom_points'.
time (convertible to float): Positive integer defining the time at which the supplied parameters correspond to.
points_type (str): String that indicates whether to save pointcloud as UsdGeomPoints or PointInstancer.
'usd_geom_points' indicates UsdGeomPoints and 'point_instancer' indicates PointInstancer.
Please refer here for UsdGeomPoints:
https://graphics.pixar.com/usd/docs/api/class_usd_geom_points.html and here for PointInstancer
https://graphics.pixar.com/usd/docs/api/class_usd_geom_point_instancer.html. Default: 'point_instancer'.
Returns:
(Usd.Stage)
Example:
>>> points = torch.rand(100, 3)
>>> stage = export_pointcloud('./new_stage.usd', points)
"""
stage = export_pointclouds(file_path, [pointcloud], [scene_path], colors=[color], times=[time],
points_type=points_type)
return stage | r"""Export a single pointcloud to a USD scene. Export a single pointclouds to USD. The pointcloud will be added to the USD stage and represented by point instances of a sphere centered at each point coordinate. The stage is then saved to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). pointcloud (torch.FloatTensor): Pointcloud tensor containing ``N`` points of shape ``(N, 3)``. scene_path (str, optional): Absolute path of pointcloud within the USD file scene. Must be a valid Sdf.Path. If no path is provided, a default path is used. color (torch.FloatTensor, optional): Color tensor corresponding each point in the pointcloud tensor of shape ``(N, 3)``. colors only works if points_type is 'usd_geom_points'. time (convertible to float): Positive integer defining the time at which the supplied parameters correspond to. points_type (str): String that indicates whether to save pointcloud as UsdGeomPoints or PointInstancer. 'usd_geom_points' indicates UsdGeomPoints and 'point_instancer' indicates PointInstancer. Please refer here for UsdGeomPoints: https://graphics.pixar.com/usd/docs/api/class_usd_geom_points.html and here for PointInstancer https://graphics.pixar.com/usd/docs/api/class_usd_geom_point_instancer.html. Default: 'point_instancer'. Returns: (Usd.Stage) Example: >>> points = torch.rand(100, 3) >>> stage = export_pointcloud('./new_stage.usd', points) |
4,729 | import torch
import numpy as np
from .utils import _get_stage_from_maybe_file, get_scene_paths, create_stage
def import_voxelgrids(file_path_or_stage, scene_paths=None, times=None):
r"""Import one or more voxelgrids from a USD file.
Assumes that the USD voxelgrid is defined by a point instancer. Converts the coordinates
of each point instance to an occupied voxel. The output grid size is determined from the `grid_size`
primvar. If not specified, grid size will be determined by the axis with the largest number of occupied
voxels. The output voxelgrid will be of shape ``[grid_size, grid_size, grid_size]``.
Args:
file_path_or_stage (str or Usd.Stage):
Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`.
scene_paths (list of str, optional): Scene path(s) within the USD file indicating which PointInstancer
primitive(s) to import. If None, will return all pointclouds found based on PointInstancer
prims with `kaolin_type` primvar set to `VoxelGrid`.
times (list of int): Positive integers indicating the time at which to retrieve parameters.
Returns:
(list of torch.BoolTensor)
Example:
>>> voxelgrid_1 = torch.rand(32, 32, 32) > 0.5
>>> voxelgrid_2 = torch.rand(32, 32, 32) > 0.5
>>> stage = export_voxelgrids('./new_stage.usd', [voxelgrid_1, voxelgrid_2])
>>> voxelgrid_imp = import_voxelgrids('./new_stage.usd')
>>> len(voxelgrid_imp)
2
>>> voxelgrid_imp[0].shape
torch.Size([32, 32, 32])
"""
stage = _get_stage_from_maybe_file(file_path_or_stage)
# If scene path not specified, find all point clouds
if scene_paths is None:
scene_paths = []
for p in stage.Traverse():
is_point_instancer = UsdGeom.PointInstancer(p)
if is_point_instancer and p.GetAttribute('primvars:kaolin_type').Get() == 'VoxelGrid':
scene_paths.append(p.GetPath())
if times is None:
times = [Usd.TimeCode.Default()] * len(scene_paths)
voxelgrids = []
for scene_path, time in zip(scene_paths, times):
prim = stage.GetPrimAtPath(scene_path)
assert prim, f'The prim at {scene_path} does not exist.'
instancer = UsdGeom.PointInstancer(prim)
assert instancer # Currently only support pointclouds from point instancers
voxel_indices = torch.from_numpy(np.array(instancer.GetPositionsAttr().Get(time=time), dtype=np.int64))
bounds = voxel_indices.max(dim=0)[0]
max_bound = bounds.max()
grid_size = prim.GetAttribute('primvars:grid_size').Get(time=time)
if grid_size is not None:
assert max_bound < grid_size
else:
grid_size = max_bound
voxelgrid = torch.zeros([grid_size, grid_size, grid_size], dtype=torch.bool)
voxelgrid[voxel_indices[:, 0], voxel_indices[:, 1], voxel_indices[:, 2]] = 1.
voxelgrids.append(voxelgrid)
return voxelgrids
The provided code snippet includes necessary dependencies for implementing the `import_voxelgrid` function. Write a Python function `def import_voxelgrid(file_path_or_stage, scene_path, time=None)` to solve the following problem:
r"""Import a single voxelgrid from a USD file or stage. Assumes that the USD voxelgrid is defined by a point instancer. Converts the coordinates of each point instance to an occupied voxel. The output grid size is determined by the `grid_size` primvar. If not specified, grid size will be determined by the axis with the largest number of occupied voxels. The output voxelgrid will be of shape ``[grid_size, grid_size, grid_size]``. Args: file_path_or_stage (str or Usd.Stage): Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`. scene_path (str): Scene path within the USD file indicating which PointInstancer primitive to import as a voxelgrid. time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters. Returns: torch.BoolTensor Example: >>> voxelgrid = torch.rand(32, 32, 32) > 0.5 >>> stage = export_voxelgrid('./new_stage.usd', voxelgrid, scene_path='/World/voxelgrid') >>> voxelgrid_imp = import_voxelgrid('./new_stage.usd', ... scene_path='/World/voxelgrid') >>> voxelgrid_imp.shape torch.Size([32, 32, 32])
Here is the function:
def import_voxelgrid(file_path_or_stage, scene_path, time=None):
r"""Import a single voxelgrid from a USD file or stage.
Assumes that the USD voxelgrid is defined by a point instancer. Converts the coordinates
of each point instance to an occupied voxel. The output grid size is determined by the `grid_size`
primvar. If not specified, grid size will be determined by the axis with the largest number of occupied
voxels. The output voxelgrid will be of shape ``[grid_size, grid_size, grid_size]``.
Args:
file_path_or_stage (str or Usd.Stage):
Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`.
scene_path (str): Scene path within the USD file indicating which PointInstancer primitive
to import as a voxelgrid.
time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters.
Returns:
torch.BoolTensor
Example:
>>> voxelgrid = torch.rand(32, 32, 32) > 0.5
>>> stage = export_voxelgrid('./new_stage.usd', voxelgrid, scene_path='/World/voxelgrid')
>>> voxelgrid_imp = import_voxelgrid('./new_stage.usd',
... scene_path='/World/voxelgrid')
>>> voxelgrid_imp.shape
torch.Size([32, 32, 32])
"""
if time is None:
time = Usd.TimeCode.Default()
voxelgrid_list = import_voxelgrids(file_path_or_stage, [scene_path], times=[time])
return voxelgrid_list[0] | r"""Import a single voxelgrid from a USD file or stage. Assumes that the USD voxelgrid is defined by a point instancer. Converts the coordinates of each point instance to an occupied voxel. The output grid size is determined by the `grid_size` primvar. If not specified, grid size will be determined by the axis with the largest number of occupied voxels. The output voxelgrid will be of shape ``[grid_size, grid_size, grid_size]``. Args: file_path_or_stage (str or Usd.Stage): Path to usd file (\*.usd, \*.usda) or :class:`Usd.Stage`. scene_path (str): Scene path within the USD file indicating which PointInstancer primitive to import as a voxelgrid. time (convertible to float, optional): Positive integer indicating the time at which to retrieve parameters. Returns: torch.BoolTensor Example: >>> voxelgrid = torch.rand(32, 32, 32) > 0.5 >>> stage = export_voxelgrid('./new_stage.usd', voxelgrid, scene_path='/World/voxelgrid') >>> voxelgrid_imp = import_voxelgrid('./new_stage.usd', ... scene_path='/World/voxelgrid') >>> voxelgrid_imp.shape torch.Size([32, 32, 32]) |
4,730 | import torch
import numpy as np
from .utils import _get_stage_from_maybe_file, get_scene_paths, create_stage
def export_voxelgrids(file_path, voxelgrids, scene_paths=None, times=None):
r"""Export one or more voxelgrids to a USD scene.
Export one or more binary voxelgrids where occupied voxels are defined by non-zero values. The voxelgrids are
represented by point instances of a cube centered at each occupied index coordinates and scaled. The voxelgrids
will be scaled so that it fits within a unit cube. The stage is then saved to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
voxelgrids (list of torch.BoolTensor): List of binary voxelgrid(s) of shape ``(N, N, N)``.
scene_path (list of str, optional): Absolute path(s) of voxelgrid within the USD file scene.
Must be a valid Sdf.Path. If no path is provided, a default path is used.
times (list of int): Positive integers defining the time at which the supplied parameters correspond to.
Returns:
(Usd.Stage)
Example:
>>> voxelgrid_1 = torch.rand(32, 32, 32) > 0.5
>>> voxelgrid_2 = torch.rand(32, 32, 32) > 0.5
>>> stage = export_voxelgrids('./new_stage.usd', [voxelgrid_1, voxelgrid_2])
"""
if scene_paths is None:
scene_paths = [f'/World/VoxelGrids/voxelgrid_{i}' for i in range(len(voxelgrids))]
if times is None:
times = [Usd.TimeCode.Default()] * len(scene_paths)
assert len(voxelgrids) == len(scene_paths)
stage = create_stage(file_path)
for scene_path, voxelgrid, time in zip(scene_paths, voxelgrids, times):
add_voxelgrid(stage, voxelgrid, scene_path, time=time)
stage.Save()
return stage
The provided code snippet includes necessary dependencies for implementing the `export_voxelgrid` function. Write a Python function `def export_voxelgrid(file_path, voxelgrid, scene_path='/World/VoxelGrids/voxelgrid_0', time=None)` to solve the following problem:
r"""Export a single voxelgrid to a USD scene. Export a binary voxelgrid where occupied voxels are defined by non-zero values. The voxelgrid is represented by point instances of a cube centered at each occupied index coordinates. The voxelgrid will be scaled so that it fits within a unit cube. The stage is then saved to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). voxelgrid (torch.BoolTensor): Binary voxelgrid of shape ``(N, N, N)``. scene_path (str, optional): Absolute path of voxelgrid within the USD file scene. Must be a valid Sdf.Path. If no path is provided, a default path is used. time (convertible to float, optional): Positive integer defining the time at which the supplied parameters correspond to. Returns: (Usd.Stage) Example: >>> voxelgrid = torch.rand(32, 32, 32) > 0.5 >>> stage = export_voxelgrid('./new_stage.usd', voxelgrid)
Here is the function:
def export_voxelgrid(file_path, voxelgrid, scene_path='/World/VoxelGrids/voxelgrid_0', time=None):
r"""Export a single voxelgrid to a USD scene.
Export a binary voxelgrid where occupied voxels are defined by non-zero values. The voxelgrid is
represented by point instances of a cube centered at each occupied index coordinates. The voxelgrid
will be scaled so that it fits within a unit cube. The stage is then saved to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
voxelgrid (torch.BoolTensor): Binary voxelgrid of shape ``(N, N, N)``.
scene_path (str, optional): Absolute path of voxelgrid within the USD file scene. Must be a valid Sdf.Path.
If no path is provided, a default path is used.
time (convertible to float, optional): Positive integer defining the time at which the supplied parameters
correspond to.
Returns:
(Usd.Stage)
Example:
>>> voxelgrid = torch.rand(32, 32, 32) > 0.5
>>> stage = export_voxelgrid('./new_stage.usd', voxelgrid)
"""
if time is None:
time = Usd.TimeCode.Default()
stage = export_voxelgrids(file_path, [voxelgrid], [scene_path], times=[time])
return stage | r"""Export a single voxelgrid to a USD scene. Export a binary voxelgrid where occupied voxels are defined by non-zero values. The voxelgrid is represented by point instances of a cube centered at each occupied index coordinates. The voxelgrid will be scaled so that it fits within a unit cube. The stage is then saved to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). voxelgrid (torch.BoolTensor): Binary voxelgrid of shape ``(N, N, N)``. scene_path (str, optional): Absolute path of voxelgrid within the USD file scene. Must be a valid Sdf.Path. If no path is provided, a default path is used. time (convertible to float, optional): Positive integer defining the time at which the supplied parameters correspond to. Returns: (Usd.Stage) Example: >>> voxelgrid = torch.rand(32, 32, 32) > 0.5 >>> stage = export_voxelgrid('./new_stage.usd', voxelgrid) |
4,731 | import json
import math
import os
import torch
import numpy as np
from PIL import Image
from ..render.camera import generate_perspective_projection
The provided code snippet includes necessary dependencies for implementing the `import_synthetic_view` function. Write a Python function `def import_synthetic_view(root_dir, idx, rgb=True, depth_linear=False, semantic=False, instance=False, normals=False, bbox_2d_tight=False, bbox_2d_loose=False)` to solve the following problem:
Import views of synthetic data simulating sensors on 3D models, following the format output by the Data Generator extension in the `Omniverse Kaolin App`_. Args: root_dir (str): path to the root directory containin the views. idx (int): index of the view selected. rgb (bool, optional): if True, load RGB image. Default: True. depth_linear (bool, optional): if True, load depth map with linear scaling. Default: False. semantic (bool, optional): if True, load semantic segmentation map. Default: False. instance (bool, optional): if True, load instance segmentation map. Default: False. normals (bool, optional): if True, load normals map. Default: False. bbox_2d_tight (bool, optional): if True, load tight 2d bounding box. Default: False. bbox_2d_loose (bool, optional): if True, load loose 2d bounding box. Default: False. Returns: (dict): A dictionary of all the sensors selected depending on the arguments: - **rgb** (torch.FloatTensor): the RGB image, of shape :math:`(B, H, W, 3)`. - **depth_linear** (torch.FloatTensor): the depth map with linear scaling, of shape :math:`(B, H, W)`. - **semantic** (torch.IntTensor): the semantic segmentation map, of shape :math:`(B, H, W)`. - **instance** (torch.IntTensor): the instance segmentation map, of shape :math:`(B, H, W)`. - **bbox_2d_tight** (dict): the bounding box, as 4 floats (xmin, xmax, ymin, ymax). - **normals** (torch.FloatTensor): the normals map, of shape :math:`(B, H, W, 3)`. - And **metadata**, a dictionary containing: - **assets_transform** (torch.FloatTensor): the transformation matrix of the combined assets transformations. - **cam_transform** (torch.FloatTensor): the transformation matrix, of shape :math:`(4, 3)`. - **cam_proj** (torch.FloatTensor): the projection matrix, of shape :math:`(3, 1)`. - **clipping_range** (list of float): the range at which the object are seen, as a list of (min, max). .. _Omniverse Kaolin App: https://docs.omniverse.nvidia.com/app_kaolin/app_kaolin/user_manual.html#data-generator
Here is the function:
def import_synthetic_view(root_dir, idx, rgb=True, depth_linear=False,
semantic=False, instance=False, normals=False,
bbox_2d_tight=False, bbox_2d_loose=False):
"""Import views of synthetic data simulating sensors on 3D models,
following the format output by the Data Generator extension in the `Omniverse Kaolin App`_.
Args:
root_dir (str): path to the root directory containin the views.
idx (int): index of the view selected.
rgb (bool, optional): if True, load RGB image. Default: True.
depth_linear (bool, optional): if True, load depth map with linear scaling. Default: False.
semantic (bool, optional): if True, load semantic segmentation map. Default: False.
instance (bool, optional): if True, load instance segmentation map. Default: False.
normals (bool, optional): if True, load normals map. Default: False.
bbox_2d_tight (bool, optional): if True, load tight 2d bounding box. Default: False.
bbox_2d_loose (bool, optional): if True, load loose 2d bounding box. Default: False.
Returns:
(dict):
A dictionary of all the sensors selected depending on the arguments:
- **rgb** (torch.FloatTensor): the RGB image, of shape :math:`(B, H, W, 3)`.
- **depth_linear** (torch.FloatTensor):
the depth map with linear scaling, of shape :math:`(B, H, W)`.
- **semantic** (torch.IntTensor):
the semantic segmentation map, of shape :math:`(B, H, W)`.
- **instance** (torch.IntTensor):
the instance segmentation map, of shape :math:`(B, H, W)`.
- **bbox_2d_tight** (dict):
the bounding box, as 4 floats (xmin, xmax, ymin, ymax).
- **normals** (torch.FloatTensor):
the normals map, of shape :math:`(B, H, W, 3)`.
- And **metadata**, a dictionary containing:
- **assets_transform** (torch.FloatTensor):
the transformation matrix of the combined assets transformations.
- **cam_transform** (torch.FloatTensor):
the transformation matrix, of shape :math:`(4, 3)`.
- **cam_proj** (torch.FloatTensor):
the projection matrix, of shape :math:`(3, 1)`.
- **clipping_range** (list of float):
the range at which the object are seen, as a list of (min, max).
.. _Omniverse Kaolin App:
https://docs.omniverse.nvidia.com/app_kaolin/app_kaolin/user_manual.html#data-generator
"""
output = {}
aspect_ratio = None
def _import_npy(cat):
path = os.path.join(root_dir, f'{idx}_{cat}.npy')
if os.path.exists(path):
output[cat] = torch.from_numpy(np.load(path))
else:
output[cat] = None
def _import_png(cat):
path = os.path.join(root_dir, f'{idx}_{cat}.png')
if os.path.exists(path):
output[cat] = torch.from_numpy(
np.array(Image.open(path))
)[:, :, :3].float() / 255.
else:
output[cat] = None
if rgb:
_import_png('rgb')
if depth_linear:
_import_npy('depth_linear')
if semantic:
_import_npy('semantic')
if instance:
_import_npy('instance')
if normals:
_import_png('normals')
with open(os.path.join(root_dir, f'{idx}_metadata.json'), 'r') as f:
fmetadata = json.load(f)
asset_transforms = torch.FloatTensor(fmetadata['asset_transforms'][0][1])
cam_transform = torch.FloatTensor(fmetadata['camera_properties']['tf_mat'])
aspect_ratio = (fmetadata['camera_properties']['resolution']['width'] /
fmetadata['camera_properties']['resolution']['height'])
focal_length = fmetadata['camera_properties']['focal_length']
horizontal_aperture = fmetadata['camera_properties']['horizontal_aperture']
fov = 2 * math.atan(horizontal_aperture / (2 * focal_length))
output['metadata'] = {
'cam_transform': cam_transform[:, :3],
'asset_transforms': asset_transforms,
'cam_proj': generate_perspective_projection(fov, aspect_ratio),
'clipping_range': fmetadata['camera_properties']['clipping_range']
}
if bbox_2d_tight:
output['bbox_2d_tight'] = fmetadata['bbox_2d_tight']
if bbox_2d_loose:
output['bbox_2d_loose'] = fmetadata['bbox_2d_loose']
return output | Import views of synthetic data simulating sensors on 3D models, following the format output by the Data Generator extension in the `Omniverse Kaolin App`_. Args: root_dir (str): path to the root directory containin the views. idx (int): index of the view selected. rgb (bool, optional): if True, load RGB image. Default: True. depth_linear (bool, optional): if True, load depth map with linear scaling. Default: False. semantic (bool, optional): if True, load semantic segmentation map. Default: False. instance (bool, optional): if True, load instance segmentation map. Default: False. normals (bool, optional): if True, load normals map. Default: False. bbox_2d_tight (bool, optional): if True, load tight 2d bounding box. Default: False. bbox_2d_loose (bool, optional): if True, load loose 2d bounding box. Default: False. Returns: (dict): A dictionary of all the sensors selected depending on the arguments: - **rgb** (torch.FloatTensor): the RGB image, of shape :math:`(B, H, W, 3)`. - **depth_linear** (torch.FloatTensor): the depth map with linear scaling, of shape :math:`(B, H, W)`. - **semantic** (torch.IntTensor): the semantic segmentation map, of shape :math:`(B, H, W)`. - **instance** (torch.IntTensor): the instance segmentation map, of shape :math:`(B, H, W)`. - **bbox_2d_tight** (dict): the bounding box, as 4 floats (xmin, xmax, ymin, ymax). - **normals** (torch.FloatTensor): the normals map, of shape :math:`(B, H, W, 3)`. - And **metadata**, a dictionary containing: - **assets_transform** (torch.FloatTensor): the transformation matrix of the combined assets transformations. - **cam_transform** (torch.FloatTensor): the transformation matrix, of shape :math:`(4, 3)`. - **cam_proj** (torch.FloatTensor): the projection matrix, of shape :math:`(3, 1)`. - **clipping_range** (list of float): the range at which the object are seen, as a list of (min, max). .. _Omniverse Kaolin App: https://docs.omniverse.nvidia.com/app_kaolin/app_kaolin/user_manual.html#data-generator |
4,732 | import os
import hashlib
import warnings
import copy
from collections.abc import Sequence
from abc import abstractmethod
from collections import namedtuple
from pathlib import Path
import shutil
from tqdm import tqdm
import torch
from torch.multiprocessing import Pool
from torch.utils.data import Dataset
from ..utils.testing import contained_torch_equal
def _save_task(cache_dir, idx, getitem, to_save_on_disk, to_not_save):
with torch.no_grad():
if len(to_save_on_disk) > 0:
data_dir = cache_dir / str(idx)
data_dir.mkdir(exist_ok=True)
data = getitem(idx)
outputs = {}
for k, v in data.items():
if k in to_save_on_disk:
torch.save(v, data_dir / f'{k}.pt')
elif k not in to_not_save:
outputs[k] = v
return outputs
def _parallel_save_task(args):
torch.set_num_threads(1)
return _save_task(*args) | null |
4,733 | import os
import hashlib
import warnings
import copy
from collections.abc import Sequence
from abc import abstractmethod
from collections import namedtuple
from pathlib import Path
import shutil
from tqdm import tqdm
import torch
from torch.multiprocessing import Pool
from torch.utils.data import Dataset
from ..utils.testing import contained_torch_equal
def contained_torch_equal(elem, other, approximate=False, print_error_context=None, **allclose_args):
"""Check for equality (or allclose if approximate) of two objects potentially containing tensors.
:func:`torch.equal` do not support data structure like dictionary / arrays
and `==` is ambiguous on :class:`torch.Tensor`.
This class will try to apply recursion through :class:`collections.abc.Mapping`,
:class:`collections.abc.Sequence`, :func:`torch.equal` if the objects are `torch.Tensor`,
of else `==` operator.
Args:
elem (object, dict, list, tuple): The first object
other (object, dict, list, tuple): The other object to compare to ``elem``
approximate (bool): if requested will use allclose for comparison instead (default=False)
print_error_context (str): set to any string value to print the context for the first nested failed match
allclose_args: arguments to `torch.allclose` if approximate comparison requested
Return (bool): the comparison result
"""
def _maybe_print(val, extra_context='', prefix_string='Failed match for '):
if not val and print_error_context is not None: # match failed
print(f'{prefix_string}{print_error_context}{extra_context}')
return val
elem_type = type(elem)
if elem_type != type(other):
return _maybe_print(False)
def _tensor_compare(a, b):
if not approximate:
return torch.equal(a, b)
else:
return torch.allclose(a, b, **allclose_args)
def _number_compare(a, b):
return _tensor_compare(torch.tensor([a]), torch.tensor([b]))
def _attrs_to_dict(a, attrs):
return {k : getattr(a, k) for k in attrs if hasattr(a, k)}
def _recursive_error_context(append_context):
if print_error_context is None:
return None
return f'{print_error_context}{append_context}'
recursive_args = copy.copy(allclose_args)
recursive_args['approximate'] = approximate
if isinstance(elem, torch.Tensor):
return _maybe_print(_tensor_compare(elem, other))
elif isinstance(elem, str):
return _maybe_print(elem == other, extra_context=f': {elem} vs {other}')
elif isinstance(elem, float):
return _maybe_print(_number_compare(elem, other), extra_context=f': {elem} vs {other}')
elif isinstance(elem, collections.abc.Mapping):
if elem.keys() != other.keys():
return _maybe_print(False, f': {elem.keys()} vs {other.keys()}', 'Different keys for ')
return all(contained_torch_equal(
elem[key], other[key],
print_error_context=_recursive_error_context(f'[{key}]'), **recursive_args) for key in elem)
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
if set(elem._fields) != set(other._fields):
return _maybe_print(False, f': {elem._fields} vs {other._fields}', 'Different fields for ')
return all(contained_torch_equal(
getattr(elem, f), getattr(other, f),
print_error_context=_recursive_error_context(f'.{f}'), **recursive_args) for f in elem._fields)
elif isinstance(elem, collections.abc.Sequence):
if len(elem) != len(other):
return _maybe_print(False, ': {len(elem)} vs {len(other)}', 'Different length for ')
return all(contained_torch_equal(
a, b, print_error_context=_recursive_error_context(f'[{i}]'), **recursive_args)
for i, (a, b) in enumerate(zip(elem, other)))
elif hasattr(elem, '__slots__'):
return contained_torch_equal(_attrs_to_dict(elem, elem.__slots__), _attrs_to_dict(other, other.__slots__),
print_error_context=print_error_context, **recursive_args)
else:
return _maybe_print(elem == other)
def _get_saving_actions(dataset, cache_dir, save_on_disk=False,
force_overwrite=False, ignore_diff_error=False):
size = len(dataset)
# Is there anything to save on disk?
if isinstance(save_on_disk, bool):
any_save_on_disk = save_on_disk
elif isinstance(save_on_disk, Sequence):
any_save_on_disk = len(save_on_disk) > 0
else:
raise TypeError("save_on_disk must be a boolean or a sequence of str")
# We need to query the data from dataset[0] for sanity check
# of saved files and arguments such as `save_on_disk`
_data = dataset[0]
if not isinstance(_data, dict):
raise TypeError("the dataset.__getitem__ must output a dictionary")
# Convert save_on_disk to a set of strings
if isinstance(save_on_disk, bool):
save_on_disk = set(_data.keys()) if save_on_disk else set()
else:
save_on_disk = set(save_on_disk)
to_save_on_ram = set(_data.keys()).difference(save_on_disk)
to_not_save = set() # Values that are already saved on disk
to_save_on_disk = set() # Values that will be force stored on disk
if any_save_on_disk:
if cache_dir is None:
raise ValueError("cache_dir should be provided with save_on_disk")
cache_dir.mkdir(parents=True, exist_ok=True)
cached_ids = list(p.stem for p in cache_dir.glob(r'*'))
# Check that the keys on save_on_disk are actual outputs from preprocessing
for k in save_on_disk:
if k not in _data.keys():
raise ValueError(f"the dataset doesn't provide an output field '{k}'")
if force_overwrite:
if len(cached_ids) != len(dataset):
shutil.rmtree(cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)
to_save_on_disk = save_on_disk
else:
# If the number of folder (len(dataset)) is different,
# something is probably wrong with the existing data
# note: reporting error directory with POSIX path to ease regex matching without raising encoding errors due to Windows backslashes
if len(cached_ids) > 0 and len(cached_ids) != len(dataset):
raise RuntimeError(f"{len(cached_ids)} files already exist on "
f"{cache_dir.resolve().as_posix()} this dataset as {len(dataset)} files "
"so caching is too ambiguous and error-prone "
"please force rewriting by setting 'force_overwrite'")
# We accept that the cache has partial values defined,
# for instance if we add/remove a key from save_on_disk
# TODO(cfujitsang): need to check if avoiding store isn't over-optimization
# at the cost of potential user errors
# since we are not avoiding to run the preprocessing
for k, v in _data.items():
if k in save_on_disk:
path:Path = cache_dir / '0' / f'{k}.pt'
# note: reporting error directory with POSIX path to ease regex matching without raising encoding errors due to Windows backslashes
path_str = path.resolve().as_posix()
if path.exists(): # There is already a file for a given key
# Is the value stored the same than the one from the data?
assert ignore_diff_error or contained_torch_equal(v, torch.load(path)), \
f"file '{path_str}' is different than " \
"its matching field from the input dataset, set 'force_overwriting' " \
"to True to overwrite the files cached."
to_not_save.add(k)
else:
to_save_on_disk.add(k)
return to_save_on_disk, to_save_on_ram, to_not_save | null |
4,734 | import os
import hashlib
import warnings
import copy
from collections.abc import Sequence
from abc import abstractmethod
from collections import namedtuple
from pathlib import Path
import shutil
from tqdm import tqdm
import torch
from torch.multiprocessing import Pool
from torch.utils.data import Dataset
from ..utils.testing import contained_torch_equal
The provided code snippet includes necessary dependencies for implementing the `_get_hash` function. Write a Python function `def _get_hash(x)` to solve the following problem:
Generate a hash from a string, or dictionary.
Here is the function:
def _get_hash(x):
"""Generate a hash from a string, or dictionary.
"""
if isinstance(x, dict):
x = tuple(sorted(pair for pair in x.items()))
return hashlib.md5(bytes(repr(x), 'utf-8')).hexdigest() | Generate a hash from a string, or dictionary. |
4,735 | import os
import hashlib
import warnings
import copy
from collections.abc import Sequence
from abc import abstractmethod
from collections import namedtuple
from pathlib import Path
import shutil
from tqdm import tqdm
import torch
from torch.multiprocessing import Pool
from torch.utils.data import Dataset
from ..utils.testing import contained_torch_equal
def _preprocess_task(args):
torch.set_num_threads(1)
with torch.no_grad():
idx, get_data, get_cache_key, cache_transform = args
key = get_cache_key(idx)
data = get_data(idx)
cache_transform(key, data) | null |
4,736 | import os
import hashlib
import warnings
import copy
from collections.abc import Sequence
from abc import abstractmethod
from collections import namedtuple
from pathlib import Path
import shutil
from tqdm import tqdm
import torch
from torch.multiprocessing import Pool
from torch.utils.data import Dataset
from ..utils.testing import contained_torch_equal
def _get_data(dataset, index):
return dataset.get_data(index) if hasattr(dataset, 'get_data') else \
dataset[index] | null |
4,737 | import os
import hashlib
import warnings
import copy
from collections.abc import Sequence
from abc import abstractmethod
from collections import namedtuple
from pathlib import Path
import shutil
from tqdm import tqdm
import torch
from torch.multiprocessing import Pool
from torch.utils.data import Dataset
from ..utils.testing import contained_torch_equal
def _get_attributes(dataset, index):
return dataset.get_attributes(index) if hasattr(dataset, 'get_attributes') \
else {} | null |
4,738 | import os
import hashlib
import warnings
import copy
from collections.abc import Sequence
from abc import abstractmethod
from collections import namedtuple
from pathlib import Path
import shutil
from tqdm import tqdm
import torch
from torch.multiprocessing import Pool
from torch.utils.data import Dataset
from ..utils.testing import contained_torch_equal
def _get_cache_key(dataset, index):
return dataset.get_cache_key(index) if hasattr(dataset, 'get_cache_key') \
else str(index) | null |
4,739 | import os
from typing import Callable
import warnings
from pathlib import Path
from torch.utils.data import Dataset
from kaolin.io.dataset import KaolinDataset, KaolinDatasetItem
from kaolin.io.obj import import_mesh, ignore_error_handler
label_to_synset = {label: synset for synset, labels in synset_to_labels.items() for label in labels}
def _convert_categories(categories):
if not (c in synset_to_label.keys() + label_to_synset.keys()
for c in categories):
warnings.warn('Some or all of the categories requested are not part of \
Shrec16. Data loading may fail if these categories are not avaliable.')
synsets = [label_to_synset[c] if c in label_to_synset.keys()
else c for c in categories]
return synsets | null |
4,740 | import os
import warnings
from collections import namedtuple
import numpy as np
import torch
from PIL import Image
from kaolin.io.materials import MaterialLoadError, MaterialFileError, MaterialNotFoundError, \
process_materials_and_assignments
from kaolin.io import utils
from kaolin.rep import SurfaceMesh
The provided code snippet includes necessary dependencies for implementing the `ignore_error_handler` function. Write a Python function `def ignore_error_handler(error, **kwargs)` to solve the following problem:
Simple error handler to use in :func:`load_obj` that ignore all errors
Here is the function:
def ignore_error_handler(error, **kwargs):
"""Simple error handler to use in :func:`load_obj` that ignore all errors"""
pass | Simple error handler to use in :func:`load_obj` that ignore all errors |
4,741 | import os
import warnings
from collections import namedtuple
import numpy as np
import torch
from PIL import Image
from kaolin.io.materials import MaterialLoadError, MaterialFileError, MaterialNotFoundError, \
process_materials_and_assignments
from kaolin.io import utils
from kaolin.rep import SurfaceMesh
The provided code snippet includes necessary dependencies for implementing the `skip_error_handler` function. Write a Python function `def skip_error_handler(error, **kwargs)` to solve the following problem:
Simple error handler to use in :func:`load_obj` that skips all errors and logs them as warnings.
Here is the function:
def skip_error_handler(error, **kwargs):
"""Simple error handler to use in :func:`load_obj` that skips all errors
and logs them as warnings."""
warnings.warn(error.args[0], UserWarning) | Simple error handler to use in :func:`load_obj` that skips all errors and logs them as warnings. |
4,742 | import os
import warnings
from collections import namedtuple
import numpy as np
import torch
from PIL import Image
from kaolin.io.materials import MaterialLoadError, MaterialFileError, MaterialNotFoundError, \
process_materials_and_assignments
from kaolin.io import utils
from kaolin.rep import SurfaceMesh
class MaterialLoadError(MaterialError):
pass
class MaterialFileError(MaterialError):
pass
class MaterialNotFoundError(MaterialError):
pass
The provided code snippet includes necessary dependencies for implementing the `create_missing_materials_error_handler` function. Write a Python function `def create_missing_materials_error_handler(error, **kwargs)` to solve the following problem:
Error error_handler to be provided to obj.read_mesh that can handle MaterialNotFound error, returning a dummy material with a random diffuse color instead. Material will contain an additional "error" field. MaterialFileError and MaterialLoadError will print a warning and be ignored.
Here is the function:
def create_missing_materials_error_handler(error, **kwargs):
"""Error error_handler to be provided to obj.read_mesh that can handle MaterialNotFound error,
returning a dummy material with a random diffuse color instead. Material will contain
an additional "error" field. MaterialFileError and MaterialLoadError will print a warning
and be ignored."""
if type(error) == MaterialNotFoundError:
warnings.warn(f'{error.args[0]}, creating dummy material instead', UserWarning)
return {'Ka': torch.rand((3,)), 'error': f'Dummy material created for missing material: {error}'}
elif type(error) in [MaterialFileError, MaterialLoadError]:
warnings.warn(error.args[0], UserWarning)
else:
raise error | Error error_handler to be provided to obj.read_mesh that can handle MaterialNotFound error, returning a dummy material with a random diffuse color instead. Material will contain an additional "error" field. MaterialFileError and MaterialLoadError will print a warning and be ignored. |
4,743 | import os
import warnings
from collections import namedtuple
import numpy as np
import torch
from PIL import Image
from kaolin.io.materials import MaterialLoadError, MaterialFileError, MaterialNotFoundError, \
process_materials_and_assignments
from kaolin.io import utils
from kaolin.rep import SurfaceMesh
def default_error_handler(error, **kwargs):
"""Simple error handle to use in :func:`load_obj` that raises all errors."""
raise error
def flatten_feature(feature):
"""Flatten the nested list of a feature.
"""
if feature is None or len(feature) == 0:
return None
else:
return [item for sublist in feature for item in sublist]
def load_mtl(mtl_path, error_handler):
"""Load and parse a Material file.
Followed format described in: https://people.sc.fsu.edu/~jburkardt/data/mtl/mtl.html.
Currently only support diffuse, ambient and specular parameters (Kd, Ka, Ks)
through single RGB values or texture maps.
Args:
mtl_path (str): Path to the mtl file.
Returns:
(dict):
Dictionary of materials, which are dictionary of properties with optional torch.Tensor values:
- **Kd**: diffuse color of shape (3)
- **map_Kd**: diffuse texture map of shape (H, W, 3)
- **Ks**: specular color of shape (3)
- **map_Ks**: specular texture map of shape (H', W', 3)
- **Ka**: ambient color of shape (3)
- **map_Ka**: ambient texture map of shape (H'', W'', 3)
- **material_name**: string name of the material
Raises:
MaterialFileError:
Failed to open material path.
MaterialLoadError:
Failed to load material, very often due to path to map_Kd/map_Ka/map_Ks being invalid.
"""
mtl_data = {}
root_dir = os.path.dirname(mtl_path)
try:
f = open(mtl_path, 'r', encoding='utf-8')
except Exception as e:
error_handler(MaterialFileError(
f"Failed to load material at path '{mtl_path}':\n{e}"),
mtl_path=mtl_path, mtl_data=mtl_data)
else:
for line in f.readlines():
data = line.split()
if len(data) == 0:
continue
try:
if data[0] == 'newmtl':
material_name = data[1]
mtl_data[material_name] = {'material_name': material_name}
elif data[0] in {'map_Kd', 'map_Ka', 'map_Ks'}:
texture_path = os.path.join(root_dir, data[1])
img = Image.open(texture_path)
if img.mode != 'RGB':
img = img.convert('RGB')
mtl_data[material_name][data[0]] = torch.from_numpy(
np.array(img))
elif data[0] in {'Kd', 'Ka', 'Ks'}:
mtl_data[material_name][data[0]] = torch.tensor(
[float(val) for val in data[1:]])
except Exception as e:
error_handler(MaterialLoadError(
f"Failed to load material at path '{mtl_path}':\n{e}"),
data=data, mtl_data=mtl_data)
f.close()
return mtl_data
def process_materials_and_assignments(materials_dict, material_assignments_dict, error_handler, num_faces,
error_context_str=''):
"""Converts dictionary style materials and assignments to final format (see args/return values).
Args:
materials_dict (dict of str to dict): mapping from material name to material parameters
material_assignments_dict (dict of str to torch.LongTensor): mapping from material name to either
1) a K x 2 tensor with start and end face indices of the face ranges assigned to that material or
2) a K, tensor with face indices assigned to that material
error_handler: handler able to handle MaterialNotFound error - error can be thrown, ignored, or the
handler can return a dummy material for material not found (if this is not the case, assignments to
non-existent materials will be lost), e.g. obj.create_missing_materials_error_handler.
num_faces: total number of faces in the model
error_context_str (str): any extra info to attach to thrown errors
Returns:
(tuple) of:
- **materials** (list): list of material parameters, sorted alphabetically by their name
- **material_assignments** (torch.ShortTensor): of shape `(\text{num_faces},)` containing index of the
material (in the above list) assigned to the corresponding face, or `-1` if no material was assigned.
"""
def _try_to_set_name(generated_material, material_name):
if isinstance(generated_material, Mapping):
generated_material['material_name'] = material_name
else:
try:
generated_material.material_name = material_name
except Exception as e:
warnings.warn(f'Cannot set dummy material_name: {e}')
# Check that all assigned materials exist and if they don't we create a dummy material
missing_materials = []
for mat_name in material_assignments_dict.keys():
if mat_name not in materials_dict:
dummy_material = error_handler(
MaterialNotFoundError(f"'Material {mat_name}' not found, but referenced. {error_context_str}"))
# Either create dummy material or remove assignment
if dummy_material is not None:
_try_to_set_name(dummy_material, mat_name)
materials_dict[mat_name] = dummy_material
else:
missing_materials.append(mat_name)
# Ignore assignments to missing materials (unless handler created dummy material)
for mat_name in missing_materials:
del material_assignments_dict[mat_name]
material_names = sorted(materials_dict.keys())
materials = [materials_dict[name] for name in material_names] # Alphabetically ordered materials
material_assignments = torch.zeros((num_faces,), dtype=torch.int16) - 1
# Process material assignments to use material indices instead
for name, values in material_assignments_dict.items():
mat_idx = material_names.index(name) # Alphabetically sorted material
if len(values.shape) == 1:
indices = values
else:
assert len(values.shape) == 2 and values.shape[-1] == 2, \
f'Unxpected shape {values.shape} for material assignments for material {name} ' \
f'(expected (K,) or (K, 2)). {error_context_str}'
# Rewrite (K, 2) tensor of (face_idx_start, face_idx_end] to (M,) tensor of face_idx
indices = torch.cat(
[torch.arange(values[r, 0], values[r, 1], dtype=torch.long) for r in range(values.shape[0])])
# Use face indices as index to set material_id in face-aligned material assignments
material_assignments[indices] = mat_idx
return materials, material_assignments
The provided code snippet includes necessary dependencies for implementing the `import_mesh` function. Write a Python function `def import_mesh(path, with_materials=False, with_normals=False, error_handler=None, heterogeneous_mesh_handler=None, triangulate=False)` to solve the following problem:
r"""Load data from an obj file as a single mesh, and return data as CPU pytorch tensors in an easy-to-manage :class:`kaolin.rep.SurfaceMesh` container. .. note:: Currently has limited materials support for Kd, Ka, Ks, map_Kd, map_Ka and map_Ks, following the format described in: http://paulbourke.net/dataformats/obj/ Args: path (str): path to the obj file (with extension). with_materials (bool): if True, load materials. Default: False. with_normals (bool): if True, load normals. Default: False. error_handler (Callable, optional): function that handles errors that can be raised (see raised errors, except `NonHomogeneousMeshError` handled separately), with the signature ``error_handler(error: Exception, **kwargs)``. Handler can provide special treatment of :class:`MaterialNotFoundError`, returning a dummy material dictionary instead (if this is not the case, assignments to non-existent materials will be lost). For options see: :func:`create_missing_materials_error_handler`, :func:`skip_error_handler`, :func:`ignore_error_handler`, and :func:`default_error_handler` (**Default** is to raise all errors). heterogeneous_mesh_handler (Callable, optional): function that handles a heterogeneous mesh, homogenizing, returning None or throwing error, with the following signature: ``heterogeneous_mesh_handler(vertices, face_vertex_counts, *args, face_assignments)`` for example, see :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>` and :func:`heterogeneous_mesh_handler_skip <kaolin.io.utils.heterogeneous_mesh_handler_skip>`. Default: will raise a NonHomogeneousMeshError. triangulate: if True, will triangulate all non-triangular meshes using same logic as :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`. If `heterogeneous_mesh_handler` is not set, this flag will cause non-homogeneous meshes to be triangulated and loaded without error; otherwise triangulation executes after `heterogeneous_mesh_handler`, which may skip or throw an error. Returns: (SurfaceMesh): an unbatched instance of :class:`kaolin.rep.SurfaceMesh`, where: * **normals** and **face_normals_idx** will only be filled if `with_normals=True` * **materials** will be a list of materials (see return values of :func:`load_mtl`) sorted by their `material_name`; filled only if `with_materials=True`. * **material_assignments** will be a tensor of shape ``(num_faces,)`` containing the index of the material (in the `materials` list) assigned to the corresponding face, or `-1` if no material was assigned; filled only if `with_materials=True`. Raises: MaterialNotFoundError: The .obj is using a material not parsed from material libraries (set `error_handler` to skip). MaterialFileError: From :func:`load_mtl`: Failed to open material path (set `error_handler` to skip). MaterialLoadError: From :func:`load_mtl`: Failed to load material, very often due to path to map_Kd/map_Ka/map_ks being invalid (set `error_handler` to skip). NonHomogeneousMeshError: The number of vertices were not equal for all faces (set `heterogeneous_mesh_handler` to handle). .. rubric:: Examples To load a mesh without loading normals, materials or UVs:: >>> from kaolin.io.obj import import_mesh >>> mesh = import_mesh("sample_data/meshes/pizza.obj") >>> print(mesh) SurfaceMesh object with batching strategy NONE vertices: [482, 3] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_vertices: if possible, computed on access from: (faces, vertices) face_normals: if possible, computed on access from: (normals, face_normals_idx) or (vertices, faces) vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) >>> mesh.face_normals # Causes face_normals and any attributes required to compute it to be auto-computed >>> mesh.to_batched() # Apply fixed topology batching, unsqueezing most attributes >>> mesh = mesh.cuda(attributes=["vertices"]) # Moves just vertices to GPU >>> print(mesh) SurfaceMesh object with batching strategy FIXED vertices: [1, 482, 3] (torch.float32)[cuda:0] face_vertices: [1, 960, 3, 3] (torch.float32)[cpu] face_normals: [1, 960, 3, 3] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) To load a mesh with normals, materials and UVs, while triangulating and homogenizing if needed:: >>> from kaolin.io.obj import import_mesh >>> from kaolin.io.utils import mesh_handler_naive_triangulate >>> mesh = import_mesh("sample_data/meshes/pizza.obj", with_normals=True, with_materials=True, heterogeneous_mesh_handler=mesh_handler_naive_triangulate, triangulate=True) >>> print(mesh) SurfaceMesh object with batching strategy NONE vertices: [482, 3] (torch.float32)[cpu] normals: [482, 3] (torch.float32)[cpu] uvs: [514, 2] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_normals_idx: [960, 3] (torch.int64)[cpu] face_uvs_idx: [960, 3] (torch.int64)[cpu] material_assignments: [960] (torch.int16)[cpu] materials: list of length 2 face_vertices: if possible, computed on access from: (faces, vertices) face_normals: if possible, computed on access from: (normals, face_normals_idx) or (vertices, faces) vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx)
Here is the function:
def import_mesh(path, with_materials=False, with_normals=False,
error_handler=None, heterogeneous_mesh_handler=None,
triangulate=False):
r"""Load data from an obj file as a single mesh, and return data as CPU pytorch tensors in an easy-to-manage
:class:`kaolin.rep.SurfaceMesh` container.
.. note::
Currently has limited materials support for Kd, Ka, Ks, map_Kd, map_Ka and map_Ks,
following the format described in: http://paulbourke.net/dataformats/obj/
Args:
path (str): path to the obj file (with extension).
with_materials (bool): if True, load materials. Default: False.
with_normals (bool): if True, load normals. Default: False.
error_handler (Callable, optional):
function that handles errors that can be raised (see raised errors, except `NonHomogeneousMeshError`
handled separately), with the signature ``error_handler(error: Exception, **kwargs)``.
Handler can provide special treatment of :class:`MaterialNotFoundError`,
returning a dummy material dictionary instead (if this is not the case, assignments to
non-existent materials will be lost). For options see:
:func:`create_missing_materials_error_handler`, :func:`skip_error_handler`, :func:`ignore_error_handler`,
and :func:`default_error_handler` (**Default** is to raise all errors).
heterogeneous_mesh_handler (Callable, optional):
function that handles a heterogeneous mesh, homogenizing, returning None or throwing error,
with the following signature:
``heterogeneous_mesh_handler(vertices, face_vertex_counts, *args, face_assignments)``
for example, see :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`
and :func:`heterogeneous_mesh_handler_skip <kaolin.io.utils.heterogeneous_mesh_handler_skip>`.
Default: will raise a NonHomogeneousMeshError.
triangulate: if True, will triangulate all non-triangular meshes using same logic as
:func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`.
If `heterogeneous_mesh_handler` is not set, this flag will cause non-homogeneous meshes to
be triangulated and loaded without error; otherwise triangulation executes after `heterogeneous_mesh_handler`,
which may skip or throw an error.
Returns:
(SurfaceMesh):
an unbatched instance of :class:`kaolin.rep.SurfaceMesh`, where:
* **normals** and **face_normals_idx** will only be filled if `with_normals=True`
* **materials** will be a list
of materials (see return values of :func:`load_mtl`) sorted by their `material_name`;
filled only if `with_materials=True`.
* **material_assignments** will be a tensor
of shape ``(num_faces,)`` containing the index
of the material (in the `materials` list) assigned to the corresponding face,
or `-1` if no material was assigned; filled only if `with_materials=True`.
Raises:
MaterialNotFoundError:
The .obj is using a material not parsed from material libraries (set `error_handler` to skip).
MaterialFileError:
From :func:`load_mtl`: Failed to open material path (set `error_handler` to skip).
MaterialLoadError:
From :func:`load_mtl`: Failed to load material, very often due to path to
map_Kd/map_Ka/map_ks being invalid (set `error_handler` to skip).
NonHomogeneousMeshError:
The number of vertices were not equal for all faces (set `heterogeneous_mesh_handler` to handle).
.. rubric:: Examples
To load a mesh without loading normals, materials or UVs::
>>> from kaolin.io.obj import import_mesh
>>> mesh = import_mesh("sample_data/meshes/pizza.obj")
>>> print(mesh)
SurfaceMesh object with batching strategy NONE
vertices: [482, 3] (torch.float32)[cpu]
faces: [960, 3] (torch.int64)[cpu]
face_vertices: if possible, computed on access from: (faces, vertices)
face_normals: if possible, computed on access from: (normals, face_normals_idx) or (vertices, faces)
vertex_normals: if possible, computed on access from: (faces, face_normals)
face_uvs: if possible, computed on access from: (uvs, face_uvs_idx)
>>> mesh.face_normals # Causes face_normals and any attributes required to compute it to be auto-computed
>>> mesh.to_batched() # Apply fixed topology batching, unsqueezing most attributes
>>> mesh = mesh.cuda(attributes=["vertices"]) # Moves just vertices to GPU
>>> print(mesh)
SurfaceMesh object with batching strategy FIXED
vertices: [1, 482, 3] (torch.float32)[cuda:0]
face_vertices: [1, 960, 3, 3] (torch.float32)[cpu]
face_normals: [1, 960, 3, 3] (torch.float32)[cpu]
faces: [960, 3] (torch.int64)[cpu]
vertex_normals: if possible, computed on access from: (faces, face_normals)
face_uvs: if possible, computed on access from: (uvs, face_uvs_idx)
To load a mesh with normals, materials and UVs, while triangulating and homogenizing if needed::
>>> from kaolin.io.obj import import_mesh
>>> from kaolin.io.utils import mesh_handler_naive_triangulate
>>> mesh = import_mesh("sample_data/meshes/pizza.obj",
with_normals=True, with_materials=True,
heterogeneous_mesh_handler=mesh_handler_naive_triangulate,
triangulate=True)
>>> print(mesh)
SurfaceMesh object with batching strategy NONE
vertices: [482, 3] (torch.float32)[cpu]
normals: [482, 3] (torch.float32)[cpu]
uvs: [514, 2] (torch.float32)[cpu]
faces: [960, 3] (torch.int64)[cpu]
face_normals_idx: [960, 3] (torch.int64)[cpu]
face_uvs_idx: [960, 3] (torch.int64)[cpu]
material_assignments: [960] (torch.int16)[cpu]
materials: list of length 2
face_vertices: if possible, computed on access from: (faces, vertices)
face_normals: if possible, computed on access from: (normals, face_normals_idx) or (vertices, faces)
vertex_normals: if possible, computed on access from: (faces, face_normals)
face_uvs: if possible, computed on access from: (uvs, face_uvs_idx)
"""
triangulate_handler = None if not triangulate else utils.mesh_handler_naive_triangulate
if heterogeneous_mesh_handler is None:
heterogeneous_mesh_handler = triangulate_handler
if error_handler is None:
error_handler = default_error_handler
vertices = []
faces = []
uvs = []
# 3 values per face
face_uvs_idx = []
normals = []
# 3 values per face
face_normals_idx = []
# materials_dict contains:
# {material_name: {properties dict}}
materials_dict = {}
# material_assignments contain:
# {material_name: [(face_idx_start, face_idx_end], (face_idx_start, face_idx_end])
material_assignments_dict = {}
material_faceidx_start = None
active_material_name = None
def _maybe_complete_material_assignment():
if active_material_name is not None:
if material_faceidx_start != len(face_uvs_idx): # Only add if at least one face is assigned
material_assignments_dict.setdefault(active_material_name, []).append(
torch.LongTensor([material_faceidx_start, len(face_uvs_idx)]))
with open(path, 'r', encoding='utf-8') as f:
for line in f:
data = line.split()
if len(data) == 0:
continue
if data[0] == 'v':
vertices.append(data[1:4])
elif with_materials and data[0] == 'vt':
uvs.append(data[1:3])
elif with_normals and data[0] == 'vn':
normals.append(data[1:])
elif data[0] == 'f':
data = [da.split('/') for da in data[1:]]
faces.append([int(d[0]) for d in data])
if with_materials:
if len(data[1]) > 1 and data[1][1] != '':
face_uvs_idx.append([int(d[1]) for d in data])
else:
face_uvs_idx.append([0] * len(data))
if with_normals:
if len(data[1]) > 2:
face_normals_idx.append([int(d[2]) for d in data])
else:
face_normals_idx.append([0] * len(data))
elif with_materials and data[0] == 'usemtl':
_maybe_complete_material_assignment()
active_material_name = data[1]
material_faceidx_start = len(face_uvs_idx)
elif with_materials and data[0] == 'mtllib':
mtl_path = os.path.join(os.path.dirname(path), data[1])
materials_dict.update(load_mtl(mtl_path, error_handler))
_maybe_complete_material_assignment()
vertices = torch.FloatTensor([float(el) for sublist in vertices for el in sublist]).view(-1, 3)
face_vertex_counts = torch.IntTensor([len(f) for f in faces])
# key: (Nx2) tensor of (start, end faceidx]
material_assignments_dict = {k: torch.stack(v) for k, v in material_assignments_dict.items()}
def _apply_handler(handler):
all_features = [faces, face_uvs_idx, face_normals_idx]
# Flatten all features
all_features = [flatten_feature(f) for f in all_features]
return handler(vertices, face_vertex_counts, *all_features, face_assignments=material_assignments_dict)
# Handle non-homogeneous meshes
is_heterogeneous = not torch.all(face_vertex_counts == face_vertex_counts[0])
if is_heterogeneous:
if heterogeneous_mesh_handler is None:
raise utils.NonHomogeneousMeshError(f'Mesh is non-homogeneous '
f'and cannot be imported from {path}.'
f'User can set heterogeneous_mesh_handler.'
f'See kaolin.io.utils for the available options')
mesh = _apply_handler(heterogeneous_mesh_handler)
if mesh is None:
warnings.warn(f'Heterogeneous mesh at path {path} not converted by the handler; returning None.')
return None
vertices, face_vertex_counts, faces, face_uvs_idx, face_normals_idx, material_assignments_dict = mesh
if triangulate_handler is not None and not torch.all(face_vertex_counts == 3):
mesh = _apply_handler(triangulate_handler)
if mesh is None:
warnings.warn(f'Non-triangular mesh at path {path} not triangulated; returning None.')
return None
vertices, face_vertex_counts, faces, face_uvs_idx, face_normals_idx, material_assignments_dict = mesh
faces = torch.LongTensor(faces) - 1
if with_materials:
uvs = torch.FloatTensor([float(el) for sublist in uvs
for el in sublist]).view(-1, 2)
face_uvs_idx = torch.LongTensor(face_uvs_idx) - 1
materials, material_assignments = process_materials_and_assignments(
materials_dict, material_assignments_dict, error_handler, faces.shape[0], error_context_str=path)
else:
uvs = None
face_uvs_idx = None
materials = None
material_assignments = None
if with_normals:
normals = torch.FloatTensor(
[float(el) for sublist in normals
for el in sublist]).view(-1, 3)
face_normals_idx = torch.LongTensor(face_normals_idx) - 1
else:
normals = None
face_normals_idx = None
return SurfaceMesh(vertices=vertices, faces=faces, uvs=uvs, face_uvs_idx=face_uvs_idx, materials=materials,
material_assignments=material_assignments, normals=normals, face_normals_idx=face_normals_idx,
unset_attributes_return_none=True) # for greater backward compatibility | r"""Load data from an obj file as a single mesh, and return data as CPU pytorch tensors in an easy-to-manage :class:`kaolin.rep.SurfaceMesh` container. .. note:: Currently has limited materials support for Kd, Ka, Ks, map_Kd, map_Ka and map_Ks, following the format described in: http://paulbourke.net/dataformats/obj/ Args: path (str): path to the obj file (with extension). with_materials (bool): if True, load materials. Default: False. with_normals (bool): if True, load normals. Default: False. error_handler (Callable, optional): function that handles errors that can be raised (see raised errors, except `NonHomogeneousMeshError` handled separately), with the signature ``error_handler(error: Exception, **kwargs)``. Handler can provide special treatment of :class:`MaterialNotFoundError`, returning a dummy material dictionary instead (if this is not the case, assignments to non-existent materials will be lost). For options see: :func:`create_missing_materials_error_handler`, :func:`skip_error_handler`, :func:`ignore_error_handler`, and :func:`default_error_handler` (**Default** is to raise all errors). heterogeneous_mesh_handler (Callable, optional): function that handles a heterogeneous mesh, homogenizing, returning None or throwing error, with the following signature: ``heterogeneous_mesh_handler(vertices, face_vertex_counts, *args, face_assignments)`` for example, see :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>` and :func:`heterogeneous_mesh_handler_skip <kaolin.io.utils.heterogeneous_mesh_handler_skip>`. Default: will raise a NonHomogeneousMeshError. triangulate: if True, will triangulate all non-triangular meshes using same logic as :func:`mesh_handler_naive_triangulate <kaolin.io.utils.mesh_handler_naive_triangulate>`. If `heterogeneous_mesh_handler` is not set, this flag will cause non-homogeneous meshes to be triangulated and loaded without error; otherwise triangulation executes after `heterogeneous_mesh_handler`, which may skip or throw an error. Returns: (SurfaceMesh): an unbatched instance of :class:`kaolin.rep.SurfaceMesh`, where: * **normals** and **face_normals_idx** will only be filled if `with_normals=True` * **materials** will be a list of materials (see return values of :func:`load_mtl`) sorted by their `material_name`; filled only if `with_materials=True`. * **material_assignments** will be a tensor of shape ``(num_faces,)`` containing the index of the material (in the `materials` list) assigned to the corresponding face, or `-1` if no material was assigned; filled only if `with_materials=True`. Raises: MaterialNotFoundError: The .obj is using a material not parsed from material libraries (set `error_handler` to skip). MaterialFileError: From :func:`load_mtl`: Failed to open material path (set `error_handler` to skip). MaterialLoadError: From :func:`load_mtl`: Failed to load material, very often due to path to map_Kd/map_Ka/map_ks being invalid (set `error_handler` to skip). NonHomogeneousMeshError: The number of vertices were not equal for all faces (set `heterogeneous_mesh_handler` to handle). .. rubric:: Examples To load a mesh without loading normals, materials or UVs:: >>> from kaolin.io.obj import import_mesh >>> mesh = import_mesh("sample_data/meshes/pizza.obj") >>> print(mesh) SurfaceMesh object with batching strategy NONE vertices: [482, 3] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_vertices: if possible, computed on access from: (faces, vertices) face_normals: if possible, computed on access from: (normals, face_normals_idx) or (vertices, faces) vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) >>> mesh.face_normals # Causes face_normals and any attributes required to compute it to be auto-computed >>> mesh.to_batched() # Apply fixed topology batching, unsqueezing most attributes >>> mesh = mesh.cuda(attributes=["vertices"]) # Moves just vertices to GPU >>> print(mesh) SurfaceMesh object with batching strategy FIXED vertices: [1, 482, 3] (torch.float32)[cuda:0] face_vertices: [1, 960, 3, 3] (torch.float32)[cpu] face_normals: [1, 960, 3, 3] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) To load a mesh with normals, materials and UVs, while triangulating and homogenizing if needed:: >>> from kaolin.io.obj import import_mesh >>> from kaolin.io.utils import mesh_handler_naive_triangulate >>> mesh = import_mesh("sample_data/meshes/pizza.obj", with_normals=True, with_materials=True, heterogeneous_mesh_handler=mesh_handler_naive_triangulate, triangulate=True) >>> print(mesh) SurfaceMesh object with batching strategy NONE vertices: [482, 3] (torch.float32)[cpu] normals: [482, 3] (torch.float32)[cpu] uvs: [514, 2] (torch.float32)[cpu] faces: [960, 3] (torch.int64)[cpu] face_normals_idx: [960, 3] (torch.int64)[cpu] face_uvs_idx: [960, 3] (torch.int64)[cpu] material_assignments: [960] (torch.int16)[cpu] materials: list of length 2 face_vertices: if possible, computed on access from: (faces, vertices) face_normals: if possible, computed on access from: (normals, face_normals_idx) or (vertices, faces) vertex_normals: if possible, computed on access from: (faces, face_normals) face_uvs: if possible, computed on access from: (uvs, face_uvs_idx) |
4,744 | import os
import warnings
from pathlib import Path
from torch.utils.data import Dataset
from kaolin.io.dataset import KaolinDataset, KaolinDatasetItem
from kaolin.io.obj import import_mesh, ignore_error_handler
synset_to_labels = {
'04379243': ['table'],
'03211117': ['display', 'video display'],
'04401088': ['telephone', 'phone', 'telephone set'],
'04530566': ['vessel', 'watercraft'],
'03001627': ['chair'],
'03636649': ['lamp'],
'03691459': ['loudspeaker', 'speaker', 'speaker unit', 'loudspeaker system', 'speaker system'],
'02828884': ['bench'],
'02691156': ['airplane', 'aeroplane', 'plane'],
'02808440': ['bathtub', 'bathing tub', 'bath', 'tub'],
'02871439': ['bookshelf'],
'02773838': ['bag', 'traveling bag', 'travelling bag', 'grip', 'suitcase'],
'02801938': ['basket', 'handbasket'],
'02880940': ['bowl'],
'02924116': ['bus', 'autobus', 'coach', 'charabanc', 'double-decker', 'jitney',
'motorbus', 'motorcoach', 'omnibus', 'passenger vehi'],
'02933112': ['cabinet'],
'02942699': ['camera', 'photographic camera'],
'02958343': ['car', 'auto', 'automobile', 'machine', 'motorcar'],
'03207941': ['dishwasher', 'dish washer', 'dishwashing machine'],
'03337140': ['file', 'file cabinet', 'filing cabinet'],
'03624134': ['knife'],
'03642806': ['laptop', 'laptop computer'],
'03710193': ['mailbox', 'letter box'],
'03761084': ['microwave', 'microwave oven'],
'03928116': ['piano', 'pianoforte', 'forte-piano'],
'03938244': ['pillow'],
'03948459': ['pistol', 'handgun', 'side arm', 'shooting iron'],
'04004475': ['printer', 'printing machine'],
'04099429': ['rocket', 'projectile'],
'04256520': ['sofa', 'couch', 'lounge'],
'04554684': ['washer', 'automatic washer', 'washing machine'],
'04090263': ['rifle'],
'02946921': ['can', 'tin', 'tin can'],
'04330267': ['stove'],
'02843684': ['birdhouse'],
'03513137': ['helmet'],
'02992529': ['cellular telephone', 'cellular phone', 'cellphone', 'cell', 'mobile phone'],
'03991062': ['pot', 'flowerpot'],
'04074963': ['remote control', 'remote'],
'03790512': ['motorcycle', 'bike'],
'04225987': ['skateboard'],
'03593526': ['jar'],
'02954340': ['cap'],
'03467517': ['guitar'],
'04460130': ['tower'],
'03759954': ['microphone', 'mike'],
'03325088': ['faucet', 'spigot'],
'03797390': ['mug'],
'03046257': ['clock'],
'02747177': ['ashcan', 'trash can', 'garbage can', 'wastebin', 'ash bin', 'ash-bin',
'ashbin', 'dustbin', 'trash barrel', 'trash bin'],
'02818832': ['bed'],
'03085013': ['computer keyboard', 'keypad'],
'02876657': ['bottle'],
'04468005': ['train', 'railroad train'],
'03261776': ['earphone', 'earpiece', 'headphone', 'phone'],
'02834778': ['bicycle', 'bike', 'wheel', 'cycle'],
'02858304': ['boat']
}
label_to_synset = {label: synset for synset, labels in synset_to_labels.items() for label in labels}
def _convert_categories(categories):
for c in categories:
if c not in synset_to_labels.keys() and c not in label_to_synset.keys():
warnings.warn('Some or all of the categories requested are not part of \
ShapeNetCore. Data loading may fail if these categories are not avaliable.')
synsets = [label_to_synset[c] if c in label_to_synset.keys()
else c for c in categories]
return synsets | null |
4,745 | import json
import logging
import numpy as np
from pxr import Usd, UsdGeom
from tornado.websocket import WebSocketHandler
import tornado.gen
import kaolin.io.usd
from kaolin.visualize import TimelapseParser
The provided code snippet includes necessary dependencies for implementing the `meshes_to_binary` function. Write a Python function `def meshes_to_binary(vertices_list, faces_list)` to solve the following problem:
Encodes meshes in a binary format for transferring over the network. Args: vertices_list: list of numpy array V x 3 (float32; will convert to this) faces_list: list of numpy array F x 3 (int32; will convert to this) Returns: bytes
Here is the function:
def meshes_to_binary(vertices_list, faces_list):
"""Encodes meshes in a binary format for transferring over the network.
Args:
vertices_list: list of numpy array V x 3 (float32; will convert to this)
faces_list: list of numpy array F x 3 (int32; will convert to this)
Returns:
bytes
"""
nmeshes = len(vertices_list)
if len(faces_list) != nmeshes:
raise RuntimeError(
'Expected equal number of vertex and face lists, got: {}, {}'.format(
nmeshes, len(faces_list)))
# TODO: if needed, specify consistent order in tobytes()
texture_mode = 0 # TODO: use to extend support with backward compatibility
tbd_info0 = 0
tbd_info1 = 0
binstr = np.array([nmeshes, texture_mode, tbd_info0, tbd_info1], dtype=np.int32).tobytes()
for i in range(nmeshes):
vertices = vertices_list[i]
faces = faces_list[i]
# TODO: better to assume we always reshape USD data consistently before calling this
nvertices = vertices.size // 3
nfaces = faces.size // 3
binstr += np.array([nvertices, nfaces], dtype=np.int32).tobytes()
# TODO: ideally stream raw USD chunk without even parsing
binstr += vertices.astype(np.float32).tobytes()
binstr += faces.astype(np.int32).tobytes()
return binstr | Encodes meshes in a binary format for transferring over the network. Args: vertices_list: list of numpy array V x 3 (float32; will convert to this) faces_list: list of numpy array F x 3 (int32; will convert to this) Returns: bytes |
4,746 | import json
import logging
import numpy as np
from pxr import Usd, UsdGeom
from tornado.websocket import WebSocketHandler
import tornado.gen
import kaolin.io.usd
from kaolin.visualize import TimelapseParser
The provided code snippet includes necessary dependencies for implementing the `point_clouds_to_binary` function. Write a Python function `def point_clouds_to_binary(positions_list)` to solve the following problem:
Encodes point clouds in a binary format for transferring over the network. Args: positions_list (list of numpy arrays): P x 3 (float32; will convert to this) Returns: bytes
Here is the function:
def point_clouds_to_binary(positions_list):
"""Encodes point clouds in a binary format for transferring over the network.
Args:
positions_list (list of numpy arrays): P x 3 (float32; will convert to this)
Returns:
bytes
"""
nclouds = len(positions_list)
texture_mode = 0 # TODO: use to extend support with backward compatibility
tbd_info0 = 0
tbd_info1 = 0
binstr = np.array([nclouds, texture_mode, tbd_info0, tbd_info1], dtype=np.int32).tobytes()
for i in range(nclouds):
positions = positions_list[i]
# TODO: better to assume we always reshape USD data consistently before calling this
npts = positions.size // 3
binstr += np.array([npts, 0], dtype=np.int32).tobytes()
# Also include bounding box, mins, then maxes
binstr += np.min(positions, axis=0).astype(np.float32).tobytes()
binstr += np.max(positions, axis=0).astype(np.float32).tobytes()
# TODO: avoid going through numpy; ideally stream raw USD chunk without even parsing
binstr += positions.astype(np.float32).tobytes()
return binstr | Encodes point clouds in a binary format for transferring over the network. Args: positions_list (list of numpy arrays): P x 3 (float32; will convert to this) Returns: bytes |
4,747 | from __future__ import print_function
import argparse
import logging
import os
import sys
import flask
from flask import Flask, render_template
from tornado.wsgi import WSGIContainer
from tornado.web import Application, FallbackHandler
from tornado.ioloop import IOLoop
from kaolin.experimental.dash3d.util import StreamingGeometryHelper, GeometryWebSocketHandler
def create_server(logdir):
""" Create the server, including websocket handler through tornado, and flask http server.
Args:
logdir (str): directory where Timelapse data is written.
"""
# Helper for streaming geometry from the logdir
helper = StreamingGeometryHelper(logdir)
# Flask for HTTP
_base_dir = os.path.dirname(__file__)
_template_dir = os.path.join(_base_dir, 'templates')
_static_dir = os.path.join(_base_dir, 'static')
app = Flask('kaolin_dash3d',
template_folder=_template_dir,
static_url_path='/static',
static_folder=_static_dir)
def index():
helper.parser.check_for_updates()
urlargs = dict(flask.request.args)
max_viewports = get_max_viewports(urlargs)
return render_template('home.html', logdir=helper.logdir,
nmeshes=min(helper.parser.num_mesh_items(), max_viewports),
npointclouds=min(helper.parser.num_pointcloud_items(), max_viewports),
urlargs=urlargs)
# Tornado server to handle websockets
container = WSGIContainer(app)
server = Application([
(r'/websocket/', GeometryWebSocketHandler, dict(helper=helper)),
(r'.*', FallbackHandler, dict(fallback=container))
])
return server
def run_main():
aparser = argparse.ArgumentParser(
description='NVIDIA Kaolin Tensorboard 3d visualizer for USD files generated during training.')
aparser.add_argument('--logdir', action='store', type=str, required=True,
help='The vis folder generated by the Timelapse module.')
aparser.add_argument('--log_level', action='store', type=int, default=logging.INFO,
help='Logging level, DEBUG: 10, INFO: 20, WARN: 30, ERROR: 40.')
aparser.add_argument('--port', action='store', default=8080)
args = aparser.parse_args()
logging.basicConfig(level=args.log_level,
format='%(asctime)s|%(levelname)8s|%(name)15s| %(message)s',
handlers=[logging.StreamHandler(sys.stdout)])
print(f'Dash3D server starting. Go to: http://localhost:{args.port}')
server = create_server(args.logdir)
server.listen(args.port)
IOLoop.instance().start() | null |
4,748 | import argparse
import pefile
import glob
import os
import shutil
def parseArgs():
parser = argparse.ArgumentParser( description="Disable ASLR and make .nv_fatb sections read-only", formatter_class=argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument('--input', help="Glob to parse", default="*.dll")
parser.add_argument('--backup', help="Backup modified files", default=True, required=False)
parser.add_argument('--recursive', '-r', default=False, action='store_true', help="Recurse into subdirectories")
return parser.parse_args() | null |
4,749 | from string import printable
from pathvalidate import sanitize_filename, sanitize_filepath
ALLOWED_CHARS = set(printable)
def clean_filename(fn: str, restrict: bool = False) -> str:
path = str(sanitize_filename(fn))
if restrict:
path = "".join(c for c in path if c in ALLOWED_CHARS)
return path | null |
4,750 | from string import printable
from pathvalidate import sanitize_filename, sanitize_filepath
ALLOWED_CHARS = set(printable)
def clean_filepath(fn: str, restrict: bool = False) -> str:
path = str(sanitize_filepath(fn))
if restrict:
path = "".join(c for c in path if c in ALLOWED_CHARS)
return path | null |
4,751 | import functools
from typing import Optional, Type, TypeVar
def get_album_track_ids(source: str, resp) -> list[str]:
tracklist = resp["tracks"]
if source == "qobuz":
tracklist = tracklist["items"]
return [track["id"] for track in tracklist] | null |
4,752 | import functools
from typing import Optional, Type, TypeVar
def safe_get(dictionary, *keys, default=None):
return functools.reduce(
lambda d, key: d.get(key, default) if isinstance(d, dict) else default,
keys,
dictionary,
) | null |
4,753 | import functools
from typing import Optional, Type, TypeVar
T = TypeVar("T")
def typed(thing, expected_type: Type[T]) -> T:
assert isinstance(thing, expected_type)
return thing | null |
4,754 | import functools
from typing import Optional, Type, TypeVar
The provided code snippet includes necessary dependencies for implementing the `get_quality_id` function. Write a Python function `def get_quality_id( bit_depth: Optional[int], sampling_rate: Optional[int | float], ) -> int` to solve the following problem:
Get the universal quality id from bit depth and sampling rate. :param bit_depth: :type bit_depth: Optional[int] :param sampling_rate: In kHz :type sampling_rate: Optional[int]
Here is the function:
def get_quality_id(
bit_depth: Optional[int],
sampling_rate: Optional[int | float],
) -> int:
"""Get the universal quality id from bit depth and sampling rate.
:param bit_depth:
:type bit_depth: Optional[int]
:param sampling_rate: In kHz
:type sampling_rate: Optional[int]
"""
# XXX: Should `0` quality be supported?
if bit_depth is None or sampling_rate is None: # is lossy
return 1
if bit_depth == 16:
return 2
if bit_depth == 24:
if sampling_rate <= 96:
return 3
return 4
raise Exception(f"Invalid {bit_depth = }") | Get the universal quality id from bit depth and sampling rate. :param bit_depth: :type bit_depth: Optional[int] :param sampling_rate: In kHz :type sampling_rate: Optional[int] |
4,755 | import logging
import os
from enum import Enum
import aiofiles
from mutagen import id3
from mutagen.flac import FLAC, Picture
from mutagen.id3 import (
APIC, # type: ignore
ID3,
ID3NoHeaderError,
)
from mutagen.mp4 import MP4, MP4Cover
from .track import TrackMetadata
logger = logging.getLogger("streamrip")
class Container(Enum):
FLAC = 1
AAC = 2
MP3 = 3
def get_mutagen_class(self, path: str):
if self == Container.FLAC:
return FLAC(path)
elif self == Container.AAC:
return MP4(path)
elif self == Container.MP3:
try:
return ID3(path)
except ID3NoHeaderError:
return ID3()
# unreachable
return {}
def get_tag_pairs(self, meta) -> list[tuple]:
if self == Container.FLAC:
return self._tag_flac(meta)
elif self == Container.MP3:
return self._tag_mp3(meta)
elif self == Container.AAC:
return self._tag_mp4(meta)
# unreachable
return []
def _tag_flac(self, meta: TrackMetadata) -> list[tuple]:
out = []
for k, v in FLAC_KEY.items():
tag = self._attr_from_meta(meta, k)
if tag:
if k in {
"tracknumber",
"discnumber",
"tracktotal",
"disctotal",
}:
tag = f"{int(tag):02}"
out.append((v, str(tag)))
return out
def _tag_mp3(self, meta: TrackMetadata):
out = []
for k, v in MP3_KEY.items():
if k == "tracknumber":
text = f"{meta.tracknumber}/{meta.album.tracktotal}"
elif k == "discnumber":
text = f"{meta.discnumber}/{meta.album.disctotal}"
else:
text = self._attr_from_meta(meta, k)
if text is not None and v is not None:
out.append((v.__name__, v(encoding=3, text=text)))
return out
def _tag_mp4(self, meta: TrackMetadata):
out = []
for k, v in MP4_KEY.items():
if k == "tracknumber":
text = [(meta.tracknumber, meta.album.tracktotal)]
elif k == "discnumber":
text = [(meta.discnumber, meta.album.disctotal)]
elif k == "isrc" and meta.isrc is not None:
# because ISRC is an mp4 freeform value (not supported natively)
# we have to pass in the actual bytes to mutagen
# See mutagen.MP4Tags.__render_freeform
text = meta.isrc.encode("utf-8")
else:
text = self._attr_from_meta(meta, k)
if v is not None and text is not None:
out.append((v, text))
return out
def _attr_from_meta(self, meta: TrackMetadata, attr: str) -> str | None:
# TODO: verify this works
in_trackmetadata = {
"title",
"album",
"artist",
"tracknumber",
"discnumber",
"composer",
"isrc",
}
if attr in in_trackmetadata:
if attr == "album":
return meta.album.album
val = getattr(meta, attr)
if val is None:
return None
return str(val)
else:
if attr == "genre":
return meta.album.get_genres()
elif attr == "copyright":
return meta.album.get_copyright()
val = getattr(meta.album, attr)
if val is None:
return None
return str(val)
def tag_audio(self, audio, tags: list[tuple]):
for k, v in tags:
audio[k] = v
async def embed_cover(self, audio, cover_path):
if self == Container.FLAC:
size = os.path.getsize(cover_path)
if size > FLAC_MAX_BLOCKSIZE:
raise Exception("Cover art too big for FLAC")
cover = Picture()
cover.type = 3
cover.mime = "image/jpeg"
async with aiofiles.open(cover_path, "rb") as img:
cover.data = await img.read()
audio.add_picture(cover)
elif self == Container.MP3:
cover = APIC()
cover.type = 3
cover.mime = "image/jpeg"
async with aiofiles.open(cover_path, "rb") as img:
cover.data = await img.read()
audio.add(cover)
elif self == Container.AAC:
async with aiofiles.open(cover_path, "rb") as img:
cover = MP4Cover(await img.read(), imageformat=MP4Cover.FORMAT_JPEG)
audio["covr"] = [cover]
def save_audio(self, audio, path):
if self == Container.FLAC:
audio.save()
elif self == Container.AAC:
audio.save()
elif self == Container.MP3:
audio.save(path, "v2_version=3")
class TrackMetadata:
info: TrackInfo
title: str
album: AlbumMetadata
artist: str
tracknumber: int
discnumber: int
composer: str | None
isrc: str | None = None
def from_qobuz(cls, album: AlbumMetadata, resp: dict) -> TrackMetadata | None:
title = typed(resp["title"].strip(), str)
isrc = typed(resp["isrc"], str)
streamable = typed(resp.get("streamable", False), bool)
if not streamable:
return None
version = typed(resp.get("version"), str | None)
work = typed(resp.get("work"), str | None)
if version is not None and version not in title:
title = f"{title} ({version})"
if work is not None and work not in title:
title = f"{work}: {title}"
composer = typed(resp.get("composer", {}).get("name"), str | None)
tracknumber = typed(resp.get("track_number", 1), int)
discnumber = typed(resp.get("media_number", 1), int)
artist = typed(
safe_get(
resp,
"performer",
"name",
),
str,
)
track_id = str(resp["id"])
bit_depth = typed(resp.get("maximum_bit_depth"), int | None)
sampling_rate = typed(resp.get("maximum_sampling_rate"), int | float | None)
# Is the info included?
explicit = False
info = TrackInfo(
id=track_id,
quality=album.info.quality,
bit_depth=bit_depth,
explicit=explicit,
sampling_rate=sampling_rate,
work=work,
)
return cls(
info=info,
title=title,
album=album,
artist=artist,
tracknumber=tracknumber,
discnumber=discnumber,
composer=composer,
isrc=isrc,
)
def from_deezer(cls, album: AlbumMetadata, resp) -> TrackMetadata | None:
track_id = str(resp["id"])
isrc = typed(resp["isrc"], str)
bit_depth = 16
sampling_rate = 44.1
explicit = typed(resp["explicit_lyrics"], bool)
work = None
title = typed(resp["title"], str)
artist = typed(resp["artist"]["name"], str)
tracknumber = typed(resp["track_position"], int)
discnumber = typed(resp["disk_number"], int)
composer = None
info = TrackInfo(
id=track_id,
quality=album.info.quality,
bit_depth=bit_depth,
explicit=explicit,
sampling_rate=sampling_rate,
work=work,
)
return cls(
info=info,
title=title,
album=album,
artist=artist,
tracknumber=tracknumber,
discnumber=discnumber,
composer=composer,
isrc=isrc,
)
def from_soundcloud(cls, album: AlbumMetadata, resp: dict) -> TrackMetadata:
track = resp
track_id = track["id"]
isrc = typed(safe_get(track, "publisher_metadata", "isrc"), str | None)
bit_depth, sampling_rate = None, None
explicit = typed(
safe_get(track, "publisher_metadata", "explicit", default=False),
bool,
)
title = typed(track["title"].strip(), str)
artist = typed(track["user"]["username"], str)
tracknumber = 1
info = TrackInfo(
id=track_id,
quality=album.info.quality,
bit_depth=bit_depth,
explicit=explicit,
sampling_rate=sampling_rate,
work=None,
)
return cls(
info=info,
title=title,
album=album,
artist=artist,
tracknumber=tracknumber,
discnumber=0,
composer=None,
isrc=isrc,
)
def from_tidal(cls, album: AlbumMetadata, track) -> TrackMetadata:
title = typed(track["title"], str).strip()
item_id = str(track["id"])
isrc = typed(track["isrc"], str)
version = track.get("version")
explicit = track.get("explicit", False)
if version:
title = f"{title} ({version})"
tracknumber = typed(track.get("trackNumber", 1), int)
discnumber = typed(track.get("volumeNumber", 1), int)
artists = track.get("artists")
if len(artists) > 0:
artist = ", ".join(a["name"] for a in artists)
else:
artist = track["artist"]["name"]
quality_map: dict[str, int] = {
"LOW": 0,
"HIGH": 1,
"LOSSLESS": 2,
"HI_RES": 3,
}
tidal_quality = track.get("audioQuality")
if tidal_quality is not None:
quality = quality_map[tidal_quality]
else:
quality = 0
if quality >= 2:
sampling_rate = 44100
if quality == 3:
bit_depth = 24
else:
bit_depth = 16
else:
sampling_rate = bit_depth = None
info = TrackInfo(
id=item_id,
quality=quality,
bit_depth=bit_depth,
explicit=explicit,
sampling_rate=sampling_rate,
work=None,
)
return cls(
info=info,
title=title,
album=album,
artist=artist,
tracknumber=tracknumber,
discnumber=discnumber,
composer=None,
isrc=isrc,
)
def from_resp(cls, album: AlbumMetadata, source, resp) -> TrackMetadata | None:
if source == "qobuz":
return cls.from_qobuz(album, resp)
if source == "tidal":
return cls.from_tidal(album, resp)
if source == "soundcloud":
return cls.from_soundcloud(album, resp)
if source == "deezer":
return cls.from_deezer(album, resp)
raise Exception
def format_track_path(self, format_string: str) -> str:
# Available keys: "tracknumber", "artist", "albumartist", "composer", "title",
# and "explicit", "albumcomposer"
none_text = "Unknown"
info = {
"title": self.title,
"tracknumber": self.tracknumber,
"artist": self.artist,
"albumartist": self.album.albumartist,
"albumcomposer": self.album.albumcomposer or none_text,
"composer": self.composer or none_text,
"explicit": " (Explicit) " if self.info.explicit else "",
}
return format_string.format(**info)
async def tag_file(path: str, meta: TrackMetadata, cover_path: str | None):
ext = path.split(".")[-1].lower()
if ext == "flac":
container = Container.FLAC
elif ext == "m4a":
container = Container.AAC
elif ext == "mp3":
container = Container.MP3
else:
raise Exception(f"Invalid extension {ext}")
audio = container.get_mutagen_class(path)
tags = container.get_tag_pairs(meta)
logger.debug("Tagging with %s", tags)
container.tag_audio(audio, tags)
if cover_path is not None:
await container.embed_cover(audio, cover_path)
container.save_audio(audio, path) | null |
4,756 | import os
import re
import textwrap
from abc import ABC, abstractmethod
from dataclasses import dataclass
def clean(s: str, trunc=True) -> str:
s = s.replace("|", "").replace("\n", "")
if trunc:
max_chars = 50
return s[:max_chars]
return s | null |
4,757 | import logging
from dataclasses import dataclass
from .album import AlbumMetadata
from .track import TrackMetadata
from .util import typed
NON_STREAMABLE = "_non_streamable"
ORIGINAL_DOWNLOAD = "_original_download"
NOT_RESOLVED = "_not_resolved"
def get_soundcloud_id(resp: dict) -> str:
item_id = resp["id"]
if "media" not in resp:
return f"{item_id}|{NOT_RESOLVED}"
if not resp["streamable"] or resp["policy"] == "BLOCK":
return f"{item_id}|{NON_STREAMABLE}"
if resp["downloadable"] and resp["has_downloads_left"]:
return f"{item_id}|{ORIGINAL_DOWNLOAD}"
url = None
for tc in resp["media"]["transcodings"]:
fmt = tc["format"]
if fmt["protocol"] == "hls" and fmt["mime_type"] == "audio/mpeg":
url = tc["url"]
break
assert url is not None
return f"{item_id}|{url}" | null |
4,758 | import logging
from dataclasses import dataclass
from .album import AlbumMetadata
from .track import TrackMetadata
from .util import typed
def parse_soundcloud_id(item_id: str) -> tuple[str, str]:
info = item_id.split("|")
assert len(info) == 2
return tuple(info) | null |
4,759 | import copy
import logging
import os
import shutil
from dataclasses import dataclass, fields
from pathlib import Path
import click
from tomlkit.api import dumps, parse
from tomlkit.toml_document import TOMLDocument
def update_toml_section_from_config(toml_section, config):
for field in fields(config):
toml_section[field.name] = getattr(config, field.name) | null |
4,760 | import asyncio
import logging
import os
import shutil
import aiohttp
from PIL import Image
from ..client import BasicDownloadable
from ..config import ArtworkConfig
from ..metadata import Covers
_artwork_tempdirs: set[str] = set()
logger = logging.getLogger("streamrip")
def remove_artwork_tempdirs():
logger.debug("Removing dirs %s", _artwork_tempdirs)
for path in _artwork_tempdirs:
try:
shutil.rmtree(path)
except FileNotFoundError:
pass | null |
4,761 | import asyncio
import logging
import os
import shutil
import aiohttp
from PIL import Image
from ..client import BasicDownloadable
from ..config import ArtworkConfig
from ..metadata import Covers
_artwork_tempdirs: set[str] = set()
def downscale_image(input_image_path: str, max_dimension: int):
"""Downscale an image in place given a maximum allowed dimension.
Args:
----
input_image_path (str): Path to image
max_dimension (int): Maximum dimension allowed
Returns:
-------
"""
# Open the image
image = Image.open(input_image_path)
# Get the original width and height
width, height = image.size
if max_dimension >= max(width, height):
return
# Calculate the new dimensions while maintaining the aspect ratio
if width > height:
new_width = max_dimension
new_height = int(height * (max_dimension / width))
else:
new_height = max_dimension
new_width = int(width * (max_dimension / height))
# Resize the image with the new dimensions
resized_image = image.resize((new_width, new_height))
# Save the resized image
resized_image.save(input_image_path)
class ArtworkConfig:
# Write the image to the audio file
embed: bool
# The size of the artwork to embed. Options: thumbnail, small, large, original.
# "original" images can be up to 30MB, and may fail embedding.
# Using "large" is recommended.
embed_size: str
# Both of these options limit the size of the embedded artwork. If their values
# are larger than the actual dimensions of the image, they will be ignored.
# If either value is -1, the image is left untouched.
embed_max_width: int
# Save the cover image at the highest quality as a seperate jpg file
save_artwork: bool
# If artwork is saved, downscale it to these dimensions, or ignore if -1
saved_max_width: int
The provided code snippet includes necessary dependencies for implementing the `download_artwork` function. Write a Python function `async def download_artwork( session: aiohttp.ClientSession, folder: str, covers: Covers, config: ArtworkConfig, for_playlist: bool, ) -> tuple[str | None, str | None]` to solve the following problem:
Download artwork and update passed Covers object with filepaths. If paths for the selected sizes already exist in `covers`, nothing will be downloaded. If `for_playlist` is set, it will not download hires cover art regardless of the config setting. Embedded artworks are put in a temporary directory under `folder` called "__embed" that can be deleted once a playlist or album is done downloading. Hi-res (saved) artworks are kept in `folder` as "cover.jpg". Args: ---- session (aiohttp.ClientSession): folder (str): covers (Covers): config (ArtworkConfig): for_playlist (bool): Set to disable saved hires covers. Returns: ------- (path to embedded artwork, path to hires artwork)
Here is the function:
async def download_artwork(
session: aiohttp.ClientSession,
folder: str,
covers: Covers,
config: ArtworkConfig,
for_playlist: bool,
) -> tuple[str | None, str | None]:
"""Download artwork and update passed Covers object with filepaths.
If paths for the selected sizes already exist in `covers`, nothing will
be downloaded.
If `for_playlist` is set, it will not download hires cover art regardless
of the config setting.
Embedded artworks are put in a temporary directory under `folder` called
"__embed" that can be deleted once a playlist or album is done downloading.
Hi-res (saved) artworks are kept in `folder` as "cover.jpg".
Args:
----
session (aiohttp.ClientSession):
folder (str):
covers (Covers):
config (ArtworkConfig):
for_playlist (bool): Set to disable saved hires covers.
Returns:
-------
(path to embedded artwork, path to hires artwork)
"""
save_artwork, embed = config.save_artwork, config.embed
if for_playlist:
save_artwork = False
if not (save_artwork or embed) or covers.empty():
# No need to download anything
return None, None
downloadables = []
_, l_url, saved_cover_path = covers.largest()
if saved_cover_path is None and save_artwork:
saved_cover_path = os.path.join(folder, "cover.jpg")
assert l_url is not None
downloadables.append(
BasicDownloadable(session, l_url, "jpg").download(
saved_cover_path,
lambda _: None,
),
)
_, embed_url, embed_cover_path = covers.get_size(config.embed_size)
if embed_cover_path is None and embed:
assert embed_url is not None
embed_dir = os.path.join(folder, "__artwork")
os.makedirs(embed_dir, exist_ok=True)
_artwork_tempdirs.add(embed_dir)
embed_cover_path = os.path.join(embed_dir, f"cover{hash(embed_url)}.jpg")
downloadables.append(
BasicDownloadable(session, embed_url, "jpg").download(
embed_cover_path,
lambda _: None,
),
)
if len(downloadables) == 0:
return embed_cover_path, saved_cover_path
await asyncio.gather(*downloadables)
# Update `covers` to reflect the current download state
if save_artwork:
assert saved_cover_path is not None
covers.set_largest_path(saved_cover_path)
if config.saved_max_width > 0:
downscale_image(saved_cover_path, config.saved_max_width)
if embed:
assert embed_cover_path is not None
covers.set_path(config.embed_size, embed_cover_path)
if config.embed_max_width > 0:
downscale_image(embed_cover_path, config.embed_max_width)
return embed_cover_path, saved_cover_path | Download artwork and update passed Covers object with filepaths. If paths for the selected sizes already exist in `covers`, nothing will be downloaded. If `for_playlist` is set, it will not download hires cover art regardless of the config setting. Embedded artworks are put in a temporary directory under `folder` called "__embed" that can be deleted once a playlist or album is done downloading. Hi-res (saved) artworks are kept in `folder` as "cover.jpg". Args: ---- session (aiohttp.ClientSession): folder (str): covers (Covers): config (ArtworkConfig): for_playlist (bool): Set to disable saved hires covers. Returns: ------- (path to embedded artwork, path to hires artwork) |
4,762 | import asyncio
from contextlib import nullcontext
from ..config import DownloadsConfig
_unlimited = nullcontext()
_global_semaphore: None | tuple[int, asyncio.Semaphore] = None
class DownloadsConfig:
# Folder where tracks are downloaded to
folder: str
# Put Qobuz albums in a 'Qobuz' folder, Tidal albums in 'Tidal' etc.
source_subdirectories: bool
# Download (and convert) tracks all at once, instead of sequentially.
# If you are converting the tracks, or have fast internet, this will
# substantially improve processing speed.
concurrency: bool
# The maximum number of tracks to download at once
# If you have very fast internet, you will benefit from a higher value,
# A value that is too high for your bandwidth may cause slowdowns
max_connections: int
requests_per_minute: int
The provided code snippet includes necessary dependencies for implementing the `global_download_semaphore` function. Write a Python function `def global_download_semaphore(c: DownloadsConfig) -> asyncio.Semaphore | nullcontext` to solve the following problem:
A global semaphore that limit the number of total tracks being downloaded at once. If concurrency is disabled in the config, the semaphore is set to 1. Otherwise it's set to `max_connections`. A negative `max_connections` value means there is no maximum and no semaphore is used. Since it is global, only one value of `max_connections` is allowed per session.
Here is the function:
def global_download_semaphore(c: DownloadsConfig) -> asyncio.Semaphore | nullcontext:
"""A global semaphore that limit the number of total tracks being downloaded
at once.
If concurrency is disabled in the config, the semaphore is set to 1.
Otherwise it's set to `max_connections`.
A negative `max_connections` value means there is no maximum and no semaphore is used.
Since it is global, only one value of `max_connections` is allowed per session.
"""
global _unlimited, _global_semaphore
if c.concurrency:
max_connections = c.max_connections if c.max_connections > 0 else None
else:
max_connections = 1
if max_connections is None:
return _unlimited
if max_connections <= 0:
raise Exception(f"{max_connections = } too small")
if _global_semaphore is None:
_global_semaphore = (max_connections, asyncio.Semaphore(max_connections))
assert (
max_connections == _global_semaphore[0]
), f"Already have other global semaphore {_global_semaphore}"
return _global_semaphore[1] | A global semaphore that limit the number of total tracks being downloaded at once. If concurrency is disabled in the config, the semaphore is set to 1. Otherwise it's set to `max_connections`. A negative `max_connections` value means there is no maximum and no semaphore is used. Since it is global, only one value of `max_connections` is allowed per session. |
4,763 | import asyncio
import json
import logging
import os
import shutil
import subprocess
from functools import wraps
from typing import Any
import aiofiles
import aiohttp
import click
from click_help_colors import HelpColorsGroup
from rich.logging import RichHandler
from rich.markdown import Markdown
from rich.prompt import Confirm
from rich.traceback import install
from .. import __version__, db
from ..config import DEFAULT_CONFIG_PATH, Config, set_user_defaults
from ..console import console
from .main import Main
def coro(f):
@wraps(f)
def wrapper(*args, **kwargs):
return asyncio.run(f(*args, **kwargs))
return wrapper | null |
4,764 | import asyncio
import json
import logging
import os
import shutil
import subprocess
from functools import wraps
from typing import Any
import aiofiles
import aiohttp
import click
from click_help_colors import HelpColorsGroup
from rich.logging import RichHandler
from rich.markdown import Markdown
from rich.prompt import Confirm
from rich.traceback import install
from .. import __version__, db
from ..config import DEFAULT_CONFIG_PATH, Config, set_user_defaults
from ..console import console
from .main import Main
def rip(ctx, config_path, folder, no_db, quality, codec, no_progress, verbose):
"""Streamrip: the all in one music downloader."""
global logger
logging.basicConfig(
level="INFO",
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler()],
)
logger = logging.getLogger("streamrip")
if verbose:
install(
console=console,
suppress=[
click,
],
show_locals=True,
locals_hide_sunder=False,
)
logger.setLevel(logging.DEBUG)
logger.debug("Showing all debug logs")
else:
install(console=console, suppress=[click, asyncio], max_frames=1)
logger.setLevel(logging.INFO)
if not os.path.isfile(config_path):
console.print(
f"No file found at [bold cyan]{config_path}[/bold cyan], creating default config.",
)
set_user_defaults(config_path)
# pass to subcommands
ctx.ensure_object(dict)
ctx.obj["config_path"] = config_path
try:
c = Config(config_path)
except Exception as e:
console.print(
f"Error loading config from [bold cyan]{config_path}[/bold cyan]: {e}\n"
"Try running [bold]rip config reset[/bold]",
)
ctx.obj["config"] = None
return
# set session config values to command line args
if no_db:
c.session.database.downloads_enabled = False
if folder is not None:
c.session.downloads.folder = folder
if quality is not None:
c.session.qobuz.quality = quality
c.session.tidal.quality = quality
c.session.deezer.quality = quality
c.session.soundcloud.quality = quality
if codec is not None:
c.session.conversion.enabled = True
assert codec.upper() in ("ALAC", "FLAC", "OGG", "MP3", "AAC")
c.session.conversion.codec = codec.upper()
if no_progress:
c.session.cli.progress_bars = False
ctx.obj["config"] = c
async def latest_streamrip_version() -> tuple[str, str | None]:
async with aiohttp.ClientSession() as s:
async with s.get("https://pypi.org/pypi/streamrip/json") as resp:
data = await resp.json()
version = data["info"]["version"]
if version == __version__:
return version, None
async with s.get(
"https://api.github.com/repos/nathom/streamrip/releases/latest"
) as resp:
json = await resp.json()
notes = json["body"]
return version, notes
__version__ = "2.0.5"
class Config:
def __init__(self, path: str, /):
self.path = path
with open(path) as toml_file:
self.file: ConfigData = ConfigData.from_toml(toml_file.read())
self.session: ConfigData = copy.deepcopy(self.file)
def save_file(self):
if not self.file.modified:
return
with open(self.path, "w") as toml_file:
self.file.update_toml()
toml_file.write(dumps(self.file.toml))
def defaults(cls):
return cls(BLANK_CONFIG_PATH)
def __enter__(self):
return self
def __exit__(self, *_):
self.save_file()
console = Console()
class Main:
"""Provides all of the functionality called into by the CLI.
* Logs in to Clients and prompts for credentials
* Handles output logging
* Handles downloading Media
* Handles interactive search
User input (urls) -> Main --> Download files & Output messages to terminal
"""
def __init__(self, config: Config):
# Data pipeline:
# input URL -> (URL) -> (Pending) -> (Media) -> (Downloadable) -> audio file
self.pending: list[Pending] = []
self.media: list[Media] = []
self.config = config
self.clients: dict[str, Client] = {
"qobuz": QobuzClient(config),
"tidal": TidalClient(config),
"deezer": DeezerClient(config),
"soundcloud": SoundcloudClient(config),
}
self.database: db.Database
c = self.config.session.database
if c.downloads_enabled:
downloads_db = db.Downloads(c.downloads_path)
else:
downloads_db = db.Dummy()
if c.failed_downloads_enabled:
failed_downloads_db = db.Failed(c.failed_downloads_path)
else:
failed_downloads_db = db.Dummy()
self.database = db.Database(downloads_db, failed_downloads_db)
async def add(self, url: str):
"""Add url as a pending item.
Do not `asyncio.gather` calls to this! Use `add_all` for concurrency.
"""
parsed = parse_url(url)
if parsed is None:
raise Exception(f"Unable to parse url {url}")
client = await self.get_logged_in_client(parsed.source)
self.pending.append(
await parsed.into_pending(client, self.config, self.database),
)
logger.debug("Added url=%s", url)
async def add_by_id(self, source: str, media_type: str, id: str):
client = await self.get_logged_in_client(source)
self._add_by_id_client(client, media_type, id)
async def add_all_by_id(self, info: list[tuple[str, str, str]]):
sources = set(s for s, _, _ in info)
clients = {s: await self.get_logged_in_client(s) for s in sources}
for source, media_type, id in info:
self._add_by_id_client(clients[source], media_type, id)
def _add_by_id_client(self, client: Client, media_type: str, id: str):
if media_type == "track":
item = PendingSingle(id, client, self.config, self.database)
elif media_type == "album":
item = PendingAlbum(id, client, self.config, self.database)
elif media_type == "playlist":
item = PendingPlaylist(id, client, self.config, self.database)
elif media_type == "label":
item = PendingLabel(id, client, self.config, self.database)
elif media_type == "artist":
item = PendingArtist(id, client, self.config, self.database)
else:
raise Exception(media_type)
self.pending.append(item)
async def add_all(self, urls: list[str]):
"""Add multiple urls concurrently as pending items."""
parsed = [parse_url(url) for url in urls]
url_client_pairs = []
for i, p in enumerate(parsed):
if p is None:
console.print(
f"[red]Found invalid url [cyan]{urls[i]}[/cyan], skipping.",
)
continue
url_client_pairs.append((p, await self.get_logged_in_client(p.source)))
pendings = await asyncio.gather(
*[
url.into_pending(client, self.config, self.database)
for url, client in url_client_pairs
],
)
self.pending.extend(pendings)
async def get_logged_in_client(self, source: str):
"""Return a functioning client instance for `source`."""
client = self.clients.get(source)
if client is None:
raise Exception(
f"No client named {source} available. Only have {self.clients.keys()}",
)
if not client.logged_in:
prompter = get_prompter(client, self.config)
if not prompter.has_creds():
# Get credentials from user and log into client
await prompter.prompt_and_login()
prompter.save()
else:
with console.status(f"[cyan]Logging into {source}", spinner="dots"):
# Log into client using credentials from config
await client.login()
assert client.logged_in
return client
async def resolve(self):
"""Resolve all currently pending items."""
with console.status("Resolving URLs...", spinner="dots"):
coros = [p.resolve() for p in self.pending]
new_media: list[Media] = [
m for m in await asyncio.gather(*coros) if m is not None
]
self.media.extend(new_media)
self.pending.clear()
async def rip(self):
"""Download all resolved items."""
await asyncio.gather(*[item.rip() for item in self.media])
async def search_interactive(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=100)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
if platform.system() == "Windows": # simple term menu not supported for windows
from pick import pick
choices = pick(
search_results.results,
title=(
f"{source.capitalize()} {media_type} search.\n"
"Press SPACE to select, RETURN to download, CTRL-C to exit."
),
multiselect=True,
min_selection_count=1,
)
assert isinstance(choices, list)
await self.add_all_by_id(
[(source, media_type, item.id) for item, _ in choices],
)
else:
from simple_term_menu import TerminalMenu
menu = TerminalMenu(
search_results.summaries(),
preview_command=search_results.preview,
preview_size=0.5,
title=(
f"Results for {media_type} '{query}' from {source.capitalize()}\n"
"SPACE - select, ENTER - download, ESC - exit"
),
cycle_cursor=True,
clear_screen=True,
multi_select=True,
)
chosen_ind = menu.show()
if chosen_ind is None:
console.print("[yellow]No items chosen. Exiting.")
else:
choices = search_results.get_choices(chosen_ind)
await self.add_all_by_id(
[(source, item.media_type(), item.id) for item in choices],
)
async def search_take_first(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=1)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
assert len(search_results.results) > 0
first = search_results.results[0]
await self.add_by_id(source, first.media_type(), first.id)
async def search_output_file(
self, source: str, media_type: str, query: str, filepath: str, limit: int
):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=limit)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
file_contents = json.dumps(search_results.as_list(source), indent=4)
async with aiofiles.open(filepath, "w") as f:
await f.write(file_contents)
console.print(
f"Wrote [purple]{len(search_results.results)}[/purple] results to [cyan]{filepath} as JSON!"
)
async def resolve_lastfm(self, playlist_url: str):
"""Resolve a last.fm playlist."""
c = self.config.session.lastfm
client = await self.get_logged_in_client(c.source)
if len(c.fallback_source) > 0:
fallback_client = await self.get_logged_in_client(c.fallback_source)
else:
fallback_client = None
pending_playlist = PendingLastfmPlaylist(
playlist_url,
client,
fallback_client,
self.config,
self.database,
)
playlist = await pending_playlist.resolve()
if playlist is not None:
self.media.append(playlist)
async def __aenter__(self):
return self
async def __aexit__(self, *_):
# Ensure all client sessions are closed
for client in self.clients.values():
if hasattr(client, "session"):
await client.session.close()
# close global progress bar manager
clear_progress()
# We remove artwork tempdirs here because multiple singles
# may be able to share downloaded artwork in the same `rip` session
# We don't know that a cover will not be used again until end of execution
remove_artwork_tempdirs()
The provided code snippet includes necessary dependencies for implementing the `url` function. Write a Python function `async def url(ctx, urls)` to solve the following problem:
Download content from URLs.
Here is the function:
async def url(ctx, urls):
"""Download content from URLs."""
with ctx.obj["config"] as cfg:
cfg: Config
updates = cfg.session.misc.check_for_updates
if updates:
# Run in background
version_coro = asyncio.create_task(latest_streamrip_version())
else:
version_coro = None
async with Main(cfg) as main:
await main.add_all(urls)
await main.resolve()
await main.rip()
if version_coro is not None:
latest_version, notes = await version_coro
if latest_version != __version__:
console.print(
f"\n[green]A new version of streamrip [cyan]v{latest_version}[/cyan]"
" is available! Run [white][bold]pip3 install streamrip --upgrade[/bold][/white]"
" to update.[/green]\n"
)
console.print(Markdown(notes)) | Download content from URLs. |
4,765 | import asyncio
import json
import logging
import os
import shutil
import subprocess
from functools import wraps
from typing import Any
import aiofiles
import aiohttp
import click
from click_help_colors import HelpColorsGroup
from rich.logging import RichHandler
from rich.markdown import Markdown
from rich.prompt import Confirm
from rich.traceback import install
from .. import __version__, db
from ..config import DEFAULT_CONFIG_PATH, Config, set_user_defaults
from ..console import console
from .main import Main
def rip(ctx, config_path, folder, no_db, quality, codec, no_progress, verbose):
"""Streamrip: the all in one music downloader."""
global logger
logging.basicConfig(
level="INFO",
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler()],
)
logger = logging.getLogger("streamrip")
if verbose:
install(
console=console,
suppress=[
click,
],
show_locals=True,
locals_hide_sunder=False,
)
logger.setLevel(logging.DEBUG)
logger.debug("Showing all debug logs")
else:
install(console=console, suppress=[click, asyncio], max_frames=1)
logger.setLevel(logging.INFO)
if not os.path.isfile(config_path):
console.print(
f"No file found at [bold cyan]{config_path}[/bold cyan], creating default config.",
)
set_user_defaults(config_path)
# pass to subcommands
ctx.ensure_object(dict)
ctx.obj["config_path"] = config_path
try:
c = Config(config_path)
except Exception as e:
console.print(
f"Error loading config from [bold cyan]{config_path}[/bold cyan]: {e}\n"
"Try running [bold]rip config reset[/bold]",
)
ctx.obj["config"] = None
return
# set session config values to command line args
if no_db:
c.session.database.downloads_enabled = False
if folder is not None:
c.session.downloads.folder = folder
if quality is not None:
c.session.qobuz.quality = quality
c.session.tidal.quality = quality
c.session.deezer.quality = quality
c.session.soundcloud.quality = quality
if codec is not None:
c.session.conversion.enabled = True
assert codec.upper() in ("ALAC", "FLAC", "OGG", "MP3", "AAC")
c.session.conversion.codec = codec.upper()
if no_progress:
c.session.cli.progress_bars = False
ctx.obj["config"] = c
console = Console()
class Main:
"""Provides all of the functionality called into by the CLI.
* Logs in to Clients and prompts for credentials
* Handles output logging
* Handles downloading Media
* Handles interactive search
User input (urls) -> Main --> Download files & Output messages to terminal
"""
def __init__(self, config: Config):
# Data pipeline:
# input URL -> (URL) -> (Pending) -> (Media) -> (Downloadable) -> audio file
self.pending: list[Pending] = []
self.media: list[Media] = []
self.config = config
self.clients: dict[str, Client] = {
"qobuz": QobuzClient(config),
"tidal": TidalClient(config),
"deezer": DeezerClient(config),
"soundcloud": SoundcloudClient(config),
}
self.database: db.Database
c = self.config.session.database
if c.downloads_enabled:
downloads_db = db.Downloads(c.downloads_path)
else:
downloads_db = db.Dummy()
if c.failed_downloads_enabled:
failed_downloads_db = db.Failed(c.failed_downloads_path)
else:
failed_downloads_db = db.Dummy()
self.database = db.Database(downloads_db, failed_downloads_db)
async def add(self, url: str):
"""Add url as a pending item.
Do not `asyncio.gather` calls to this! Use `add_all` for concurrency.
"""
parsed = parse_url(url)
if parsed is None:
raise Exception(f"Unable to parse url {url}")
client = await self.get_logged_in_client(parsed.source)
self.pending.append(
await parsed.into_pending(client, self.config, self.database),
)
logger.debug("Added url=%s", url)
async def add_by_id(self, source: str, media_type: str, id: str):
client = await self.get_logged_in_client(source)
self._add_by_id_client(client, media_type, id)
async def add_all_by_id(self, info: list[tuple[str, str, str]]):
sources = set(s for s, _, _ in info)
clients = {s: await self.get_logged_in_client(s) for s in sources}
for source, media_type, id in info:
self._add_by_id_client(clients[source], media_type, id)
def _add_by_id_client(self, client: Client, media_type: str, id: str):
if media_type == "track":
item = PendingSingle(id, client, self.config, self.database)
elif media_type == "album":
item = PendingAlbum(id, client, self.config, self.database)
elif media_type == "playlist":
item = PendingPlaylist(id, client, self.config, self.database)
elif media_type == "label":
item = PendingLabel(id, client, self.config, self.database)
elif media_type == "artist":
item = PendingArtist(id, client, self.config, self.database)
else:
raise Exception(media_type)
self.pending.append(item)
async def add_all(self, urls: list[str]):
"""Add multiple urls concurrently as pending items."""
parsed = [parse_url(url) for url in urls]
url_client_pairs = []
for i, p in enumerate(parsed):
if p is None:
console.print(
f"[red]Found invalid url [cyan]{urls[i]}[/cyan], skipping.",
)
continue
url_client_pairs.append((p, await self.get_logged_in_client(p.source)))
pendings = await asyncio.gather(
*[
url.into_pending(client, self.config, self.database)
for url, client in url_client_pairs
],
)
self.pending.extend(pendings)
async def get_logged_in_client(self, source: str):
"""Return a functioning client instance for `source`."""
client = self.clients.get(source)
if client is None:
raise Exception(
f"No client named {source} available. Only have {self.clients.keys()}",
)
if not client.logged_in:
prompter = get_prompter(client, self.config)
if not prompter.has_creds():
# Get credentials from user and log into client
await prompter.prompt_and_login()
prompter.save()
else:
with console.status(f"[cyan]Logging into {source}", spinner="dots"):
# Log into client using credentials from config
await client.login()
assert client.logged_in
return client
async def resolve(self):
"""Resolve all currently pending items."""
with console.status("Resolving URLs...", spinner="dots"):
coros = [p.resolve() for p in self.pending]
new_media: list[Media] = [
m for m in await asyncio.gather(*coros) if m is not None
]
self.media.extend(new_media)
self.pending.clear()
async def rip(self):
"""Download all resolved items."""
await asyncio.gather(*[item.rip() for item in self.media])
async def search_interactive(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=100)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
if platform.system() == "Windows": # simple term menu not supported for windows
from pick import pick
choices = pick(
search_results.results,
title=(
f"{source.capitalize()} {media_type} search.\n"
"Press SPACE to select, RETURN to download, CTRL-C to exit."
),
multiselect=True,
min_selection_count=1,
)
assert isinstance(choices, list)
await self.add_all_by_id(
[(source, media_type, item.id) for item, _ in choices],
)
else:
from simple_term_menu import TerminalMenu
menu = TerminalMenu(
search_results.summaries(),
preview_command=search_results.preview,
preview_size=0.5,
title=(
f"Results for {media_type} '{query}' from {source.capitalize()}\n"
"SPACE - select, ENTER - download, ESC - exit"
),
cycle_cursor=True,
clear_screen=True,
multi_select=True,
)
chosen_ind = menu.show()
if chosen_ind is None:
console.print("[yellow]No items chosen. Exiting.")
else:
choices = search_results.get_choices(chosen_ind)
await self.add_all_by_id(
[(source, item.media_type(), item.id) for item in choices],
)
async def search_take_first(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=1)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
assert len(search_results.results) > 0
first = search_results.results[0]
await self.add_by_id(source, first.media_type(), first.id)
async def search_output_file(
self, source: str, media_type: str, query: str, filepath: str, limit: int
):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=limit)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
file_contents = json.dumps(search_results.as_list(source), indent=4)
async with aiofiles.open(filepath, "w") as f:
await f.write(file_contents)
console.print(
f"Wrote [purple]{len(search_results.results)}[/purple] results to [cyan]{filepath} as JSON!"
)
async def resolve_lastfm(self, playlist_url: str):
"""Resolve a last.fm playlist."""
c = self.config.session.lastfm
client = await self.get_logged_in_client(c.source)
if len(c.fallback_source) > 0:
fallback_client = await self.get_logged_in_client(c.fallback_source)
else:
fallback_client = None
pending_playlist = PendingLastfmPlaylist(
playlist_url,
client,
fallback_client,
self.config,
self.database,
)
playlist = await pending_playlist.resolve()
if playlist is not None:
self.media.append(playlist)
async def __aenter__(self):
return self
async def __aexit__(self, *_):
# Ensure all client sessions are closed
for client in self.clients.values():
if hasattr(client, "session"):
await client.session.close()
# close global progress bar manager
clear_progress()
# We remove artwork tempdirs here because multiple singles
# may be able to share downloaded artwork in the same `rip` session
# We don't know that a cover will not be used again until end of execution
remove_artwork_tempdirs()
The provided code snippet includes necessary dependencies for implementing the `file` function. Write a Python function `async def file(ctx, path)` to solve the following problem:
Download content from URLs in a file. Example usage: rip file urls.txt
Here is the function:
async def file(ctx, path):
"""Download content from URLs in a file.
Example usage:
rip file urls.txt
"""
with ctx.obj["config"] as cfg:
async with Main(cfg) as main:
async with aiofiles.open(path, "r") as f:
content = await f.read()
try:
items: Any = json.loads(content)
loaded = True
except json.JSONDecodeError:
items = content.split()
loaded = False
if loaded:
console.print(
f"Detected json file. Loading [yellow]{len(items)}[/yellow] items"
)
await main.add_all_by_id(
[(i["source"], i["media_type"], i["id"]) for i in items]
)
else:
s = set(items)
if len(s) < len(items):
console.print(
f"Found [orange]{len(items)-len(s)}[/orange] repeated URLs!"
)
items = list(s)
console.print(
f"Detected list of urls. Loading [yellow]{len(items)}[/yellow] items"
)
await main.add_all(items)
await main.resolve()
await main.rip() | Download content from URLs in a file. Example usage: rip file urls.txt |
4,766 | import asyncio
import json
import logging
import os
import shutil
import subprocess
from functools import wraps
from typing import Any
import aiofiles
import aiohttp
import click
from click_help_colors import HelpColorsGroup
from rich.logging import RichHandler
from rich.markdown import Markdown
from rich.prompt import Confirm
from rich.traceback import install
from .. import __version__, db
from ..config import DEFAULT_CONFIG_PATH, Config, set_user_defaults
from ..console import console
from .main import Main
def config_path(ctx):
"""Display the path of the config file."""
config_path = ctx.obj["config_path"]
console.print(f"Config path: [bold cyan]'{config_path}'")
console = Console()
The provided code snippet includes necessary dependencies for implementing the `config_open` function. Write a Python function `def config_open(ctx, vim)` to solve the following problem:
Open the config file in a text editor.
Here is the function:
def config_open(ctx, vim):
"""Open the config file in a text editor."""
config_path = ctx.obj["config"].path
console.print(f"Opening file at [bold cyan]{config_path}")
if vim:
if shutil.which("nvim") is not None:
subprocess.run(["nvim", config_path])
elif shutil.which("vim") is not None:
subprocess.run(["vim", config_path])
else:
logger.error("Could not find nvim or vim. Using default launcher.")
click.launch(config_path)
else:
click.launch(config_path) | Open the config file in a text editor. |
4,767 | import asyncio
import json
import logging
import os
import shutil
import subprocess
from functools import wraps
from typing import Any
import aiofiles
import aiohttp
import click
from click_help_colors import HelpColorsGroup
from rich.logging import RichHandler
from rich.markdown import Markdown
from rich.prompt import Confirm
from rich.traceback import install
from .. import __version__, db
from ..config import DEFAULT_CONFIG_PATH, Config, set_user_defaults
from ..console import console
from .main import Main
def config_path(ctx):
"""Display the path of the config file."""
config_path = ctx.obj["config_path"]
console.print(f"Config path: [bold cyan]'{config_path}'")
def set_user_defaults(path: str, /):
"""Update the TOML file at the path with user-specific default values."""
shutil.copy(BLANK_CONFIG_PATH, path)
with open(path) as f:
toml = parse(f.read())
toml["downloads"]["folder"] = DEFAULT_DOWNLOADS_FOLDER # type: ignore
toml["database"]["downloads_path"] = DEFAULT_DOWNLOADS_DB_PATH # type: ignore
toml["database"]["failed_downloads_path"] = DEFAULT_FAILED_DOWNLOADS_DB_PATH # type: ignore
toml["youtube"]["video_downloads_folder"] = DEFAULT_YOUTUBE_VIDEO_DOWNLOADS_FOLDER # type: ignore
with open(path, "w") as f:
f.write(dumps(toml))
console = Console()
The provided code snippet includes necessary dependencies for implementing the `config_reset` function. Write a Python function `def config_reset(ctx, yes)` to solve the following problem:
Reset the config file.
Here is the function:
def config_reset(ctx, yes):
"""Reset the config file."""
config_path = ctx.obj["config_path"]
if not yes:
if not Confirm.ask(
f"Are you sure you want to reset the config file at {config_path}?",
):
console.print("[green]Reset aborted")
return
set_user_defaults(config_path)
console.print(f"Reset the config file at [bold cyan]{config_path}!") | Reset the config file. |
4,768 | import asyncio
import json
import logging
import os
import shutil
import subprocess
from functools import wraps
from typing import Any
import aiofiles
import aiohttp
import click
from click_help_colors import HelpColorsGroup
from rich.logging import RichHandler
from rich.markdown import Markdown
from rich.prompt import Confirm
from rich.traceback import install
from .. import __version__, db
from ..config import DEFAULT_CONFIG_PATH, Config, set_user_defaults
from ..console import console
from .main import Main
def database():
"""View and modify the downloads and failed downloads databases."""
class Config:
def __init__(self, path: str, /):
self.path = path
with open(path) as toml_file:
self.file: ConfigData = ConfigData.from_toml(toml_file.read())
self.session: ConfigData = copy.deepcopy(self.file)
def save_file(self):
if not self.file.modified:
return
with open(self.path, "w") as toml_file:
self.file.update_toml()
toml_file.write(dumps(self.file.toml))
def defaults(cls):
return cls(BLANK_CONFIG_PATH)
def __enter__(self):
return self
def __exit__(self, *_):
self.save_file()
console = Console()
The provided code snippet includes necessary dependencies for implementing the `database_browse` function. Write a Python function `def database_browse(ctx, table)` to solve the following problem:
Browse the contents of a table. Available tables: * Downloads * Failed
Here is the function:
def database_browse(ctx, table):
"""Browse the contents of a table.
Available tables:
* Downloads
* Failed
"""
from rich.table import Table
cfg: Config = ctx.obj["config"]
if table.lower() == "downloads":
downloads = db.Downloads(cfg.session.database.downloads_path)
t = Table(title="Downloads database")
t.add_column("Row")
t.add_column("ID")
for i, row in enumerate(downloads.all()):
t.add_row(f"{i:02}", *row)
console.print(t)
elif table.lower() == "failed":
failed = db.Failed(cfg.session.database.failed_downloads_path)
t = Table(title="Failed downloads database")
t.add_column("Source")
t.add_column("Media Type")
t.add_column("ID")
for i, row in enumerate(failed.all()):
t.add_row(f"{i:02}", *row)
console.print(t)
else:
console.print(
f"[red]Invalid database[/red] [bold]{table}[/bold]. [red]Choose[/red] [bold]downloads "
"[red]or[/red] failed[/bold].",
) | Browse the contents of a table. Available tables: * Downloads * Failed |
4,769 | import asyncio
import json
import logging
import os
import shutil
import subprocess
from functools import wraps
from typing import Any
import aiofiles
import aiohttp
import click
from click_help_colors import HelpColorsGroup
from rich.logging import RichHandler
from rich.markdown import Markdown
from rich.prompt import Confirm
from rich.traceback import install
from .. import __version__, db
from ..config import DEFAULT_CONFIG_PATH, Config, set_user_defaults
from ..console import console
from .main import Main
def rip(ctx, config_path, folder, no_db, quality, codec, no_progress, verbose):
"""Streamrip: the all in one music downloader."""
global logger
logging.basicConfig(
level="INFO",
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler()],
)
logger = logging.getLogger("streamrip")
if verbose:
install(
console=console,
suppress=[
click,
],
show_locals=True,
locals_hide_sunder=False,
)
logger.setLevel(logging.DEBUG)
logger.debug("Showing all debug logs")
else:
install(console=console, suppress=[click, asyncio], max_frames=1)
logger.setLevel(logging.INFO)
if not os.path.isfile(config_path):
console.print(
f"No file found at [bold cyan]{config_path}[/bold cyan], creating default config.",
)
set_user_defaults(config_path)
# pass to subcommands
ctx.ensure_object(dict)
ctx.obj["config_path"] = config_path
try:
c = Config(config_path)
except Exception as e:
console.print(
f"Error loading config from [bold cyan]{config_path}[/bold cyan]: {e}\n"
"Try running [bold]rip config reset[/bold]",
)
ctx.obj["config"] = None
return
# set session config values to command line args
if no_db:
c.session.database.downloads_enabled = False
if folder is not None:
c.session.downloads.folder = folder
if quality is not None:
c.session.qobuz.quality = quality
c.session.tidal.quality = quality
c.session.deezer.quality = quality
c.session.soundcloud.quality = quality
if codec is not None:
c.session.conversion.enabled = True
assert codec.upper() in ("ALAC", "FLAC", "OGG", "MP3", "AAC")
c.session.conversion.codec = codec.upper()
if no_progress:
c.session.cli.progress_bars = False
ctx.obj["config"] = c
console = Console()
class Main:
"""Provides all of the functionality called into by the CLI.
* Logs in to Clients and prompts for credentials
* Handles output logging
* Handles downloading Media
* Handles interactive search
User input (urls) -> Main --> Download files & Output messages to terminal
"""
def __init__(self, config: Config):
# Data pipeline:
# input URL -> (URL) -> (Pending) -> (Media) -> (Downloadable) -> audio file
self.pending: list[Pending] = []
self.media: list[Media] = []
self.config = config
self.clients: dict[str, Client] = {
"qobuz": QobuzClient(config),
"tidal": TidalClient(config),
"deezer": DeezerClient(config),
"soundcloud": SoundcloudClient(config),
}
self.database: db.Database
c = self.config.session.database
if c.downloads_enabled:
downloads_db = db.Downloads(c.downloads_path)
else:
downloads_db = db.Dummy()
if c.failed_downloads_enabled:
failed_downloads_db = db.Failed(c.failed_downloads_path)
else:
failed_downloads_db = db.Dummy()
self.database = db.Database(downloads_db, failed_downloads_db)
async def add(self, url: str):
"""Add url as a pending item.
Do not `asyncio.gather` calls to this! Use `add_all` for concurrency.
"""
parsed = parse_url(url)
if parsed is None:
raise Exception(f"Unable to parse url {url}")
client = await self.get_logged_in_client(parsed.source)
self.pending.append(
await parsed.into_pending(client, self.config, self.database),
)
logger.debug("Added url=%s", url)
async def add_by_id(self, source: str, media_type: str, id: str):
client = await self.get_logged_in_client(source)
self._add_by_id_client(client, media_type, id)
async def add_all_by_id(self, info: list[tuple[str, str, str]]):
sources = set(s for s, _, _ in info)
clients = {s: await self.get_logged_in_client(s) for s in sources}
for source, media_type, id in info:
self._add_by_id_client(clients[source], media_type, id)
def _add_by_id_client(self, client: Client, media_type: str, id: str):
if media_type == "track":
item = PendingSingle(id, client, self.config, self.database)
elif media_type == "album":
item = PendingAlbum(id, client, self.config, self.database)
elif media_type == "playlist":
item = PendingPlaylist(id, client, self.config, self.database)
elif media_type == "label":
item = PendingLabel(id, client, self.config, self.database)
elif media_type == "artist":
item = PendingArtist(id, client, self.config, self.database)
else:
raise Exception(media_type)
self.pending.append(item)
async def add_all(self, urls: list[str]):
"""Add multiple urls concurrently as pending items."""
parsed = [parse_url(url) for url in urls]
url_client_pairs = []
for i, p in enumerate(parsed):
if p is None:
console.print(
f"[red]Found invalid url [cyan]{urls[i]}[/cyan], skipping.",
)
continue
url_client_pairs.append((p, await self.get_logged_in_client(p.source)))
pendings = await asyncio.gather(
*[
url.into_pending(client, self.config, self.database)
for url, client in url_client_pairs
],
)
self.pending.extend(pendings)
async def get_logged_in_client(self, source: str):
"""Return a functioning client instance for `source`."""
client = self.clients.get(source)
if client is None:
raise Exception(
f"No client named {source} available. Only have {self.clients.keys()}",
)
if not client.logged_in:
prompter = get_prompter(client, self.config)
if not prompter.has_creds():
# Get credentials from user and log into client
await prompter.prompt_and_login()
prompter.save()
else:
with console.status(f"[cyan]Logging into {source}", spinner="dots"):
# Log into client using credentials from config
await client.login()
assert client.logged_in
return client
async def resolve(self):
"""Resolve all currently pending items."""
with console.status("Resolving URLs...", spinner="dots"):
coros = [p.resolve() for p in self.pending]
new_media: list[Media] = [
m for m in await asyncio.gather(*coros) if m is not None
]
self.media.extend(new_media)
self.pending.clear()
async def rip(self):
"""Download all resolved items."""
await asyncio.gather(*[item.rip() for item in self.media])
async def search_interactive(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=100)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
if platform.system() == "Windows": # simple term menu not supported for windows
from pick import pick
choices = pick(
search_results.results,
title=(
f"{source.capitalize()} {media_type} search.\n"
"Press SPACE to select, RETURN to download, CTRL-C to exit."
),
multiselect=True,
min_selection_count=1,
)
assert isinstance(choices, list)
await self.add_all_by_id(
[(source, media_type, item.id) for item, _ in choices],
)
else:
from simple_term_menu import TerminalMenu
menu = TerminalMenu(
search_results.summaries(),
preview_command=search_results.preview,
preview_size=0.5,
title=(
f"Results for {media_type} '{query}' from {source.capitalize()}\n"
"SPACE - select, ENTER - download, ESC - exit"
),
cycle_cursor=True,
clear_screen=True,
multi_select=True,
)
chosen_ind = menu.show()
if chosen_ind is None:
console.print("[yellow]No items chosen. Exiting.")
else:
choices = search_results.get_choices(chosen_ind)
await self.add_all_by_id(
[(source, item.media_type(), item.id) for item in choices],
)
async def search_take_first(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=1)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
assert len(search_results.results) > 0
first = search_results.results[0]
await self.add_by_id(source, first.media_type(), first.id)
async def search_output_file(
self, source: str, media_type: str, query: str, filepath: str, limit: int
):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=limit)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
file_contents = json.dumps(search_results.as_list(source), indent=4)
async with aiofiles.open(filepath, "w") as f:
await f.write(file_contents)
console.print(
f"Wrote [purple]{len(search_results.results)}[/purple] results to [cyan]{filepath} as JSON!"
)
async def resolve_lastfm(self, playlist_url: str):
"""Resolve a last.fm playlist."""
c = self.config.session.lastfm
client = await self.get_logged_in_client(c.source)
if len(c.fallback_source) > 0:
fallback_client = await self.get_logged_in_client(c.fallback_source)
else:
fallback_client = None
pending_playlist = PendingLastfmPlaylist(
playlist_url,
client,
fallback_client,
self.config,
self.database,
)
playlist = await pending_playlist.resolve()
if playlist is not None:
self.media.append(playlist)
async def __aenter__(self):
return self
async def __aexit__(self, *_):
# Ensure all client sessions are closed
for client in self.clients.values():
if hasattr(client, "session"):
await client.session.close()
# close global progress bar manager
clear_progress()
# We remove artwork tempdirs here because multiple singles
# may be able to share downloaded artwork in the same `rip` session
# We don't know that a cover will not be used again until end of execution
remove_artwork_tempdirs()
The provided code snippet includes necessary dependencies for implementing the `search` function. Write a Python function `async def search(ctx, first, output_file, num_results, source, media_type, query)` to solve the following problem:
Search for content using a specific source. Example: rip search qobuz album 'rumours'
Here is the function:
async def search(ctx, first, output_file, num_results, source, media_type, query):
"""Search for content using a specific source.
Example:
rip search qobuz album 'rumours'
"""
if first and output_file:
console.print("Cannot choose --first and --output-file!")
return
with ctx.obj["config"] as cfg:
async with Main(cfg) as main:
if first:
await main.search_take_first(source, media_type, query)
elif output_file:
await main.search_output_file(
source, media_type, query, output_file, num_results
)
else:
await main.search_interactive(source, media_type, query)
await main.resolve()
await main.rip() | Search for content using a specific source. Example: rip search qobuz album 'rumours' |
4,770 | import asyncio
import json
import logging
import os
import shutil
import subprocess
from functools import wraps
from typing import Any
import aiofiles
import aiohttp
import click
from click_help_colors import HelpColorsGroup
from rich.logging import RichHandler
from rich.markdown import Markdown
from rich.prompt import Confirm
from rich.traceback import install
from .. import __version__, db
from ..config import DEFAULT_CONFIG_PATH, Config, set_user_defaults
from ..console import console
from .main import Main
def rip(ctx, config_path, folder, no_db, quality, codec, no_progress, verbose):
"""Streamrip: the all in one music downloader."""
global logger
logging.basicConfig(
level="INFO",
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler()],
)
logger = logging.getLogger("streamrip")
if verbose:
install(
console=console,
suppress=[
click,
],
show_locals=True,
locals_hide_sunder=False,
)
logger.setLevel(logging.DEBUG)
logger.debug("Showing all debug logs")
else:
install(console=console, suppress=[click, asyncio], max_frames=1)
logger.setLevel(logging.INFO)
if not os.path.isfile(config_path):
console.print(
f"No file found at [bold cyan]{config_path}[/bold cyan], creating default config.",
)
set_user_defaults(config_path)
# pass to subcommands
ctx.ensure_object(dict)
ctx.obj["config_path"] = config_path
try:
c = Config(config_path)
except Exception as e:
console.print(
f"Error loading config from [bold cyan]{config_path}[/bold cyan]: {e}\n"
"Try running [bold]rip config reset[/bold]",
)
ctx.obj["config"] = None
return
# set session config values to command line args
if no_db:
c.session.database.downloads_enabled = False
if folder is not None:
c.session.downloads.folder = folder
if quality is not None:
c.session.qobuz.quality = quality
c.session.tidal.quality = quality
c.session.deezer.quality = quality
c.session.soundcloud.quality = quality
if codec is not None:
c.session.conversion.enabled = True
assert codec.upper() in ("ALAC", "FLAC", "OGG", "MP3", "AAC")
c.session.conversion.codec = codec.upper()
if no_progress:
c.session.cli.progress_bars = False
ctx.obj["config"] = c
def config():
"""Manage configuration files."""
class Main:
"""Provides all of the functionality called into by the CLI.
* Logs in to Clients and prompts for credentials
* Handles output logging
* Handles downloading Media
* Handles interactive search
User input (urls) -> Main --> Download files & Output messages to terminal
"""
def __init__(self, config: Config):
# Data pipeline:
# input URL -> (URL) -> (Pending) -> (Media) -> (Downloadable) -> audio file
self.pending: list[Pending] = []
self.media: list[Media] = []
self.config = config
self.clients: dict[str, Client] = {
"qobuz": QobuzClient(config),
"tidal": TidalClient(config),
"deezer": DeezerClient(config),
"soundcloud": SoundcloudClient(config),
}
self.database: db.Database
c = self.config.session.database
if c.downloads_enabled:
downloads_db = db.Downloads(c.downloads_path)
else:
downloads_db = db.Dummy()
if c.failed_downloads_enabled:
failed_downloads_db = db.Failed(c.failed_downloads_path)
else:
failed_downloads_db = db.Dummy()
self.database = db.Database(downloads_db, failed_downloads_db)
async def add(self, url: str):
"""Add url as a pending item.
Do not `asyncio.gather` calls to this! Use `add_all` for concurrency.
"""
parsed = parse_url(url)
if parsed is None:
raise Exception(f"Unable to parse url {url}")
client = await self.get_logged_in_client(parsed.source)
self.pending.append(
await parsed.into_pending(client, self.config, self.database),
)
logger.debug("Added url=%s", url)
async def add_by_id(self, source: str, media_type: str, id: str):
client = await self.get_logged_in_client(source)
self._add_by_id_client(client, media_type, id)
async def add_all_by_id(self, info: list[tuple[str, str, str]]):
sources = set(s for s, _, _ in info)
clients = {s: await self.get_logged_in_client(s) for s in sources}
for source, media_type, id in info:
self._add_by_id_client(clients[source], media_type, id)
def _add_by_id_client(self, client: Client, media_type: str, id: str):
if media_type == "track":
item = PendingSingle(id, client, self.config, self.database)
elif media_type == "album":
item = PendingAlbum(id, client, self.config, self.database)
elif media_type == "playlist":
item = PendingPlaylist(id, client, self.config, self.database)
elif media_type == "label":
item = PendingLabel(id, client, self.config, self.database)
elif media_type == "artist":
item = PendingArtist(id, client, self.config, self.database)
else:
raise Exception(media_type)
self.pending.append(item)
async def add_all(self, urls: list[str]):
"""Add multiple urls concurrently as pending items."""
parsed = [parse_url(url) for url in urls]
url_client_pairs = []
for i, p in enumerate(parsed):
if p is None:
console.print(
f"[red]Found invalid url [cyan]{urls[i]}[/cyan], skipping.",
)
continue
url_client_pairs.append((p, await self.get_logged_in_client(p.source)))
pendings = await asyncio.gather(
*[
url.into_pending(client, self.config, self.database)
for url, client in url_client_pairs
],
)
self.pending.extend(pendings)
async def get_logged_in_client(self, source: str):
"""Return a functioning client instance for `source`."""
client = self.clients.get(source)
if client is None:
raise Exception(
f"No client named {source} available. Only have {self.clients.keys()}",
)
if not client.logged_in:
prompter = get_prompter(client, self.config)
if not prompter.has_creds():
# Get credentials from user and log into client
await prompter.prompt_and_login()
prompter.save()
else:
with console.status(f"[cyan]Logging into {source}", spinner="dots"):
# Log into client using credentials from config
await client.login()
assert client.logged_in
return client
async def resolve(self):
"""Resolve all currently pending items."""
with console.status("Resolving URLs...", spinner="dots"):
coros = [p.resolve() for p in self.pending]
new_media: list[Media] = [
m for m in await asyncio.gather(*coros) if m is not None
]
self.media.extend(new_media)
self.pending.clear()
async def rip(self):
"""Download all resolved items."""
await asyncio.gather(*[item.rip() for item in self.media])
async def search_interactive(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=100)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
if platform.system() == "Windows": # simple term menu not supported for windows
from pick import pick
choices = pick(
search_results.results,
title=(
f"{source.capitalize()} {media_type} search.\n"
"Press SPACE to select, RETURN to download, CTRL-C to exit."
),
multiselect=True,
min_selection_count=1,
)
assert isinstance(choices, list)
await self.add_all_by_id(
[(source, media_type, item.id) for item, _ in choices],
)
else:
from simple_term_menu import TerminalMenu
menu = TerminalMenu(
search_results.summaries(),
preview_command=search_results.preview,
preview_size=0.5,
title=(
f"Results for {media_type} '{query}' from {source.capitalize()}\n"
"SPACE - select, ENTER - download, ESC - exit"
),
cycle_cursor=True,
clear_screen=True,
multi_select=True,
)
chosen_ind = menu.show()
if chosen_ind is None:
console.print("[yellow]No items chosen. Exiting.")
else:
choices = search_results.get_choices(chosen_ind)
await self.add_all_by_id(
[(source, item.media_type(), item.id) for item in choices],
)
async def search_take_first(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=1)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
assert len(search_results.results) > 0
first = search_results.results[0]
await self.add_by_id(source, first.media_type(), first.id)
async def search_output_file(
self, source: str, media_type: str, query: str, filepath: str, limit: int
):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=limit)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
file_contents = json.dumps(search_results.as_list(source), indent=4)
async with aiofiles.open(filepath, "w") as f:
await f.write(file_contents)
console.print(
f"Wrote [purple]{len(search_results.results)}[/purple] results to [cyan]{filepath} as JSON!"
)
async def resolve_lastfm(self, playlist_url: str):
"""Resolve a last.fm playlist."""
c = self.config.session.lastfm
client = await self.get_logged_in_client(c.source)
if len(c.fallback_source) > 0:
fallback_client = await self.get_logged_in_client(c.fallback_source)
else:
fallback_client = None
pending_playlist = PendingLastfmPlaylist(
playlist_url,
client,
fallback_client,
self.config,
self.database,
)
playlist = await pending_playlist.resolve()
if playlist is not None:
self.media.append(playlist)
async def __aenter__(self):
return self
async def __aexit__(self, *_):
# Ensure all client sessions are closed
for client in self.clients.values():
if hasattr(client, "session"):
await client.session.close()
# close global progress bar manager
clear_progress()
# We remove artwork tempdirs here because multiple singles
# may be able to share downloaded artwork in the same `rip` session
# We don't know that a cover will not be used again until end of execution
remove_artwork_tempdirs()
The provided code snippet includes necessary dependencies for implementing the `lastfm` function. Write a Python function `async def lastfm(ctx, source, fallback_source, url)` to solve the following problem:
Download tracks from a last.fm playlist.
Here is the function:
async def lastfm(ctx, source, fallback_source, url):
"""Download tracks from a last.fm playlist."""
config = ctx.obj["config"]
if source is not None:
config.session.lastfm.source = source
if fallback_source is not None:
config.session.lastfm.fallback_source = fallback_source
with config as cfg:
async with Main(cfg) as main:
await main.resolve_lastfm(url)
await main.rip() | Download tracks from a last.fm playlist. |
4,771 | import asyncio
import json
import logging
import os
import shutil
import subprocess
from functools import wraps
from typing import Any
import aiofiles
import aiohttp
import click
from click_help_colors import HelpColorsGroup
from rich.logging import RichHandler
from rich.markdown import Markdown
from rich.prompt import Confirm
from rich.traceback import install
from .. import __version__, db
from ..config import DEFAULT_CONFIG_PATH, Config, set_user_defaults
from ..console import console
from .main import Main
def rip(ctx, config_path, folder, no_db, quality, codec, no_progress, verbose):
"""Streamrip: the all in one music downloader."""
global logger
logging.basicConfig(
level="INFO",
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler()],
)
logger = logging.getLogger("streamrip")
if verbose:
install(
console=console,
suppress=[
click,
],
show_locals=True,
locals_hide_sunder=False,
)
logger.setLevel(logging.DEBUG)
logger.debug("Showing all debug logs")
else:
install(console=console, suppress=[click, asyncio], max_frames=1)
logger.setLevel(logging.INFO)
if not os.path.isfile(config_path):
console.print(
f"No file found at [bold cyan]{config_path}[/bold cyan], creating default config.",
)
set_user_defaults(config_path)
# pass to subcommands
ctx.ensure_object(dict)
ctx.obj["config_path"] = config_path
try:
c = Config(config_path)
except Exception as e:
console.print(
f"Error loading config from [bold cyan]{config_path}[/bold cyan]: {e}\n"
"Try running [bold]rip config reset[/bold]",
)
ctx.obj["config"] = None
return
# set session config values to command line args
if no_db:
c.session.database.downloads_enabled = False
if folder is not None:
c.session.downloads.folder = folder
if quality is not None:
c.session.qobuz.quality = quality
c.session.tidal.quality = quality
c.session.deezer.quality = quality
c.session.soundcloud.quality = quality
if codec is not None:
c.session.conversion.enabled = True
assert codec.upper() in ("ALAC", "FLAC", "OGG", "MP3", "AAC")
c.session.conversion.codec = codec.upper()
if no_progress:
c.session.cli.progress_bars = False
ctx.obj["config"] = c
class Main:
"""Provides all of the functionality called into by the CLI.
* Logs in to Clients and prompts for credentials
* Handles output logging
* Handles downloading Media
* Handles interactive search
User input (urls) -> Main --> Download files & Output messages to terminal
"""
def __init__(self, config: Config):
# Data pipeline:
# input URL -> (URL) -> (Pending) -> (Media) -> (Downloadable) -> audio file
self.pending: list[Pending] = []
self.media: list[Media] = []
self.config = config
self.clients: dict[str, Client] = {
"qobuz": QobuzClient(config),
"tidal": TidalClient(config),
"deezer": DeezerClient(config),
"soundcloud": SoundcloudClient(config),
}
self.database: db.Database
c = self.config.session.database
if c.downloads_enabled:
downloads_db = db.Downloads(c.downloads_path)
else:
downloads_db = db.Dummy()
if c.failed_downloads_enabled:
failed_downloads_db = db.Failed(c.failed_downloads_path)
else:
failed_downloads_db = db.Dummy()
self.database = db.Database(downloads_db, failed_downloads_db)
async def add(self, url: str):
"""Add url as a pending item.
Do not `asyncio.gather` calls to this! Use `add_all` for concurrency.
"""
parsed = parse_url(url)
if parsed is None:
raise Exception(f"Unable to parse url {url}")
client = await self.get_logged_in_client(parsed.source)
self.pending.append(
await parsed.into_pending(client, self.config, self.database),
)
logger.debug("Added url=%s", url)
async def add_by_id(self, source: str, media_type: str, id: str):
client = await self.get_logged_in_client(source)
self._add_by_id_client(client, media_type, id)
async def add_all_by_id(self, info: list[tuple[str, str, str]]):
sources = set(s for s, _, _ in info)
clients = {s: await self.get_logged_in_client(s) for s in sources}
for source, media_type, id in info:
self._add_by_id_client(clients[source], media_type, id)
def _add_by_id_client(self, client: Client, media_type: str, id: str):
if media_type == "track":
item = PendingSingle(id, client, self.config, self.database)
elif media_type == "album":
item = PendingAlbum(id, client, self.config, self.database)
elif media_type == "playlist":
item = PendingPlaylist(id, client, self.config, self.database)
elif media_type == "label":
item = PendingLabel(id, client, self.config, self.database)
elif media_type == "artist":
item = PendingArtist(id, client, self.config, self.database)
else:
raise Exception(media_type)
self.pending.append(item)
async def add_all(self, urls: list[str]):
"""Add multiple urls concurrently as pending items."""
parsed = [parse_url(url) for url in urls]
url_client_pairs = []
for i, p in enumerate(parsed):
if p is None:
console.print(
f"[red]Found invalid url [cyan]{urls[i]}[/cyan], skipping.",
)
continue
url_client_pairs.append((p, await self.get_logged_in_client(p.source)))
pendings = await asyncio.gather(
*[
url.into_pending(client, self.config, self.database)
for url, client in url_client_pairs
],
)
self.pending.extend(pendings)
async def get_logged_in_client(self, source: str):
"""Return a functioning client instance for `source`."""
client = self.clients.get(source)
if client is None:
raise Exception(
f"No client named {source} available. Only have {self.clients.keys()}",
)
if not client.logged_in:
prompter = get_prompter(client, self.config)
if not prompter.has_creds():
# Get credentials from user and log into client
await prompter.prompt_and_login()
prompter.save()
else:
with console.status(f"[cyan]Logging into {source}", spinner="dots"):
# Log into client using credentials from config
await client.login()
assert client.logged_in
return client
async def resolve(self):
"""Resolve all currently pending items."""
with console.status("Resolving URLs...", spinner="dots"):
coros = [p.resolve() for p in self.pending]
new_media: list[Media] = [
m for m in await asyncio.gather(*coros) if m is not None
]
self.media.extend(new_media)
self.pending.clear()
async def rip(self):
"""Download all resolved items."""
await asyncio.gather(*[item.rip() for item in self.media])
async def search_interactive(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=100)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
if platform.system() == "Windows": # simple term menu not supported for windows
from pick import pick
choices = pick(
search_results.results,
title=(
f"{source.capitalize()} {media_type} search.\n"
"Press SPACE to select, RETURN to download, CTRL-C to exit."
),
multiselect=True,
min_selection_count=1,
)
assert isinstance(choices, list)
await self.add_all_by_id(
[(source, media_type, item.id) for item, _ in choices],
)
else:
from simple_term_menu import TerminalMenu
menu = TerminalMenu(
search_results.summaries(),
preview_command=search_results.preview,
preview_size=0.5,
title=(
f"Results for {media_type} '{query}' from {source.capitalize()}\n"
"SPACE - select, ENTER - download, ESC - exit"
),
cycle_cursor=True,
clear_screen=True,
multi_select=True,
)
chosen_ind = menu.show()
if chosen_ind is None:
console.print("[yellow]No items chosen. Exiting.")
else:
choices = search_results.get_choices(chosen_ind)
await self.add_all_by_id(
[(source, item.media_type(), item.id) for item in choices],
)
async def search_take_first(self, source: str, media_type: str, query: str):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=1)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
assert len(search_results.results) > 0
first = search_results.results[0]
await self.add_by_id(source, first.media_type(), first.id)
async def search_output_file(
self, source: str, media_type: str, query: str, filepath: str, limit: int
):
client = await self.get_logged_in_client(source)
with console.status(f"[bold]Searching {source}", spinner="dots"):
pages = await client.search(media_type, query, limit=limit)
if len(pages) == 0:
console.print(f"[red]No search results found for query {query}")
return
search_results = SearchResults.from_pages(source, media_type, pages)
file_contents = json.dumps(search_results.as_list(source), indent=4)
async with aiofiles.open(filepath, "w") as f:
await f.write(file_contents)
console.print(
f"Wrote [purple]{len(search_results.results)}[/purple] results to [cyan]{filepath} as JSON!"
)
async def resolve_lastfm(self, playlist_url: str):
"""Resolve a last.fm playlist."""
c = self.config.session.lastfm
client = await self.get_logged_in_client(c.source)
if len(c.fallback_source) > 0:
fallback_client = await self.get_logged_in_client(c.fallback_source)
else:
fallback_client = None
pending_playlist = PendingLastfmPlaylist(
playlist_url,
client,
fallback_client,
self.config,
self.database,
)
playlist = await pending_playlist.resolve()
if playlist is not None:
self.media.append(playlist)
async def __aenter__(self):
return self
async def __aexit__(self, *_):
# Ensure all client sessions are closed
for client in self.clients.values():
if hasattr(client, "session"):
await client.session.close()
# close global progress bar manager
clear_progress()
# We remove artwork tempdirs here because multiple singles
# may be able to share downloaded artwork in the same `rip` session
# We don't know that a cover will not be used again until end of execution
remove_artwork_tempdirs()
The provided code snippet includes necessary dependencies for implementing the `id` function. Write a Python function `async def id(ctx, source, media_type, id)` to solve the following problem:
Download an item by ID.
Here is the function:
async def id(ctx, source, media_type, id):
"""Download an item by ID."""
with ctx.obj["config"] as cfg:
async with Main(cfg) as main:
await main.add_by_id(source, media_type, id)
await main.resolve()
await main.rip() | Download an item by ID. |
4,772 | from __future__ import annotations
import logging
import re
from abc import ABC, abstractmethod
from ..client import Client, SoundcloudClient
from ..config import Config
from ..db import Database
from ..media import (
Pending,
PendingAlbum,
PendingArtist,
PendingLabel,
PendingPlaylist,
PendingSingle,
)
class URL(ABC):
match: re.Match
source: str
def __init__(self, match: re.Match, source: str):
self.match = match
self.source = source
def from_str(cls, url: str) -> URL | None:
raise NotImplementedError
async def into_pending(
self,
client: Client,
config: Config,
db: Database,
) -> Pending:
raise NotImplementedError
class GenericURL(URL):
def from_str(cls, url: str) -> URL | None:
generic_url = URL_REGEX.match(url)
if generic_url is None:
return None
source, media_type, item_id = generic_url.groups()
if source is None or media_type is None or item_id is None:
return None
return cls(generic_url, source)
async def into_pending(
self,
client: Client,
config: Config,
db: Database,
) -> Pending:
source, media_type, item_id = self.match.groups()
assert client.source == source
if media_type == "track":
return PendingSingle(item_id, client, config, db)
elif media_type == "album":
return PendingAlbum(item_id, client, config, db)
elif media_type == "playlist":
return PendingPlaylist(item_id, client, config, db)
elif media_type == "artist":
return PendingArtist(item_id, client, config, db)
elif media_type == "label":
return PendingLabel(item_id, client, config, db)
raise NotImplementedError
class QobuzInterpreterURL(URL):
interpreter_artist_regex = re.compile(r"getSimilarArtist\(\s*'(\w+)'")
def from_str(cls, url: str) -> URL | None:
qobuz_interpreter_url = QOBUZ_INTERPRETER_URL_REGEX.match(url)
if qobuz_interpreter_url is None:
return None
return cls(qobuz_interpreter_url, "qobuz")
async def into_pending(
self,
client: Client,
config: Config,
db: Database,
) -> Pending:
url = self.match.group(0)
possible_id = self.match.group(1)
if possible_id.isdigit():
logger.debug("Found artist ID %s in interpreter url %s", possible_id, url)
artist_id = possible_id
else:
artist_id = await self.extract_interpreter_url(url, client)
return PendingArtist(artist_id, client, config, db)
async def extract_interpreter_url(url: str, client: Client) -> str:
"""Extract artist ID from a Qobuz interpreter url.
:param url: Urls of the form "https://www.qobuz.com/us-en/interpreter/{artist}/download-streaming-albums"
:type url: str
:rtype: str
"""
async with client.session.get(url) as resp:
match = QobuzInterpreterURL.interpreter_artist_regex.search(
await resp.text(),
)
if match:
return match.group(1)
raise Exception(
"Unable to extract artist id from interpreter url. Use a "
"url that contains an artist id.",
)
class DeezerDynamicURL(URL):
standard_link_re = re.compile(
r"https://www\.deezer\.com/[a-z]{2}/(album|artist|playlist|track)/(\d+)"
)
dynamic_link_re = re.compile(r"https://deezer\.page\.link/\w+")
def from_str(cls, url: str) -> URL | None:
match = cls.dynamic_link_re.match(url)
if match is None:
return None
return cls(match, "deezer")
async def into_pending(
self,
client: Client,
config: Config,
db: Database,
) -> Pending:
url = self.match.group(0) # entire dynamic link
media_type, item_id = await self._extract_info_from_dynamic_link(url, client)
if media_type == "track":
return PendingSingle(item_id, client, config, db)
elif media_type == "album":
return PendingAlbum(item_id, client, config, db)
elif media_type == "playlist":
return PendingPlaylist(item_id, client, config, db)
elif media_type == "artist":
return PendingArtist(item_id, client, config, db)
elif media_type == "label":
return PendingLabel(item_id, client, config, db)
raise NotImplementedError
async def _extract_info_from_dynamic_link(
cls, url: str, client: Client
) -> tuple[str, str]:
"""Extract the item's type and ID from a dynamic link.
:param url:
:type url: str
:rtype: Tuple[str, str] (media type, item id)
"""
async with client.session.get(url) as resp:
match = cls.standard_link_re.search(await resp.text())
if match:
return match.group(1), match.group(2)
raise Exception("Unable to extract Deezer dynamic link.")
class SoundcloudURL(URL):
source = "soundcloud"
def __init__(self, url: str):
self.url = url
async def into_pending(
self,
client: SoundcloudClient,
config: Config,
db: Database,
) -> Pending:
resolved = await client.resolve_url(self.url)
media_type = resolved["kind"]
item_id = str(resolved["id"])
if media_type == "track":
return PendingSingle(item_id, client, config, db)
elif media_type == "playlist":
return PendingPlaylist(item_id, client, config, db)
else:
raise NotImplementedError(media_type)
def from_str(cls, url: str):
soundcloud_url = SOUNDCLOUD_URL_REGEX.match(url)
if soundcloud_url is None:
return None
return cls(soundcloud_url.group(0))
The provided code snippet includes necessary dependencies for implementing the `parse_url` function. Write a Python function `def parse_url(url: str) -> URL | None` to solve the following problem:
Return a URL type given a url string. Args: ---- url (str): Url to parse Returns: A URL type, or None if nothing matched.
Here is the function:
def parse_url(url: str) -> URL | None:
"""Return a URL type given a url string.
Args:
----
url (str): Url to parse
Returns: A URL type, or None if nothing matched.
"""
url = url.strip()
parsed_urls: list[URL | None] = [
GenericURL.from_str(url),
QobuzInterpreterURL.from_str(url),
SoundcloudURL.from_str(url),
DeezerDynamicURL.from_str(url),
# TODO: the rest of the url types
]
return next((u for u in parsed_urls if u is not None), None) | Return a URL type given a url string. Args: ---- url (str): Url to parse Returns: A URL type, or None if nothing matched. |
4,773 | import asyncio
import hashlib
import logging
import time
from abc import ABC, abstractmethod
from click import launch
from rich.prompt import Prompt
from ..client import Client, DeezerClient, QobuzClient, SoundcloudClient, TidalClient
from ..config import Config
from ..console import console
from ..exceptions import AuthenticationError, MissingCredentialsError
class CredentialPrompter(ABC):
client: Client
def __init__(self, config: Config, client: Client):
self.config = config
self.client = self.type_check_client(client)
def has_creds(self) -> bool:
raise NotImplementedError
async def prompt_and_login(self):
"""Prompt for credentials in the appropriate way,
and save them to the configuration.
"""
raise NotImplementedError
def save(self):
"""Save current config to file"""
raise NotImplementedError
def type_check_client(self, client: Client):
raise NotImplementedError
PROMPTERS = {
"qobuz": QobuzPrompter,
"deezer": DeezerPrompter,
"tidal": TidalPrompter,
"soundcloud": SoundcloudPrompter,
}
class Config:
def __init__(self, path: str, /):
self.path = path
with open(path) as toml_file:
self.file: ConfigData = ConfigData.from_toml(toml_file.read())
self.session: ConfigData = copy.deepcopy(self.file)
def save_file(self):
if not self.file.modified:
return
with open(self.path, "w") as toml_file:
self.file.update_toml()
toml_file.write(dumps(self.file.toml))
def defaults(cls):
return cls(BLANK_CONFIG_PATH)
def __enter__(self):
return self
def __exit__(self, *_):
self.save_file()
The provided code snippet includes necessary dependencies for implementing the `get_prompter` function. Write a Python function `def get_prompter(client: Client, config: Config) -> CredentialPrompter` to solve the following problem:
Return an instance of a prompter.
Here is the function:
def get_prompter(client: Client, config: Config) -> CredentialPrompter:
"""Return an instance of a prompter."""
p = PROMPTERS[client.source]
return p(config, client) | Return an instance of a prompter. |
4,774 | import asyncio
import logging
import os
import shutil
from tempfile import gettempdir
from typing import Final, Optional
from .exceptions import ConversionError
class Converter:
def __init__(
self,
filename: str,
ffmpeg_arg: Optional[str] = None,
sampling_rate: Optional[int] = None,
bit_depth: Optional[int] = None,
copy_art: bool = True,
remove_source: bool = False,
show_progress: bool = False,
):
async def convert(self, custom_fn: Optional[str] = None):
def _gen_command(self):
def _is_command_valid(self):
class FLAC(Converter):
class LAME(Converter):
def get_quality_arg(self, rate):
class ALAC(Converter):
class Vorbis(Converter):
def get_quality_arg(self, rate: int) -> str:
class OPUS(Converter):
def get_quality_arg(self, _: int) -> str:
class AAC(Converter):
def get_quality_arg(self, _: int) -> str:
def get(codec: str) -> type[Converter]:
converter_classes = {
"FLAC": FLAC,
"ALAC": ALAC,
"MP3": LAME,
"OPUS": OPUS,
"OGG": Vorbis,
"VORBIS": Vorbis,
"AAC": AAC,
"M4A": AAC,
}
return converter_classes[codec.upper()] | null |
4,775 | from dataclasses import dataclass
from typing import Callable
from rich.console import Group
from rich.live import Live
from rich.progress import (
BarColumn,
Progress,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
from rich.rule import Rule
from rich.text import Text
from .console import console
class Handle:
update: Callable[[int], None]
done: Callable[[], None]
def __enter__(self):
return self.update
def __exit__(self, *_):
self.done()
_p = ProgressManager()
def get_progress_callback(enabled: bool, total: int, desc: str) -> Handle:
global _p
if not enabled:
return Handle(lambda _: None, lambda: None)
return _p.get_callback(total, desc) | null |
4,776 | from dataclasses import dataclass
from typing import Callable
from rich.console import Group
from rich.live import Live
from rich.progress import (
BarColumn,
Progress,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
from rich.rule import Rule
from rich.text import Text
from .console import console
_p = ProgressManager()
def add_title(title: str):
global _p
_p.add_title(title) | null |
4,777 | from dataclasses import dataclass
from typing import Callable
from rich.console import Group
from rich.live import Live
from rich.progress import (
BarColumn,
Progress,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
from rich.rule import Rule
from rich.text import Text
from .console import console
_p = ProgressManager()
def remove_title(title: str):
global _p
_p.remove_title(title) | null |
4,778 | from dataclasses import dataclass
from typing import Callable
from rich.console import Group
from rich.live import Live
from rich.progress import (
BarColumn,
Progress,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
from rich.rule import Rule
from rich.text import Text
from .console import console
_p = ProgressManager()
def clear_progress():
global _p
_p.cleanup() | null |
4,779 | import asyncio
import base64
import functools
import hashlib
import itertools
import json
import logging
import os
import re
import shutil
import tempfile
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Callable, Optional
import aiofiles
import aiohttp
import m3u8
from Cryptodome.Cipher import AES, Blowfish
from Cryptodome.Util import Counter
from .. import converter
from ..exceptions import NonStreamableError
def generate_temp_path(url: str):
return os.path.join(
tempfile.gettempdir(),
f"__streamrip_{hash(url)}_{time.time()}.download",
) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.