id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
9,600
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.interpolate_with
def interpolate_with(self, other_tf, t): """Interpolate with another rigid transformation. Parameters ---------- other_tf : :obj:`RigidTransform` The transform to interpolate with. t : float The interpolation step in [0,1], where 0 favors this RigidTransform. Returns ------- :obj:`RigidTransform` The interpolated RigidTransform. Raises ------ ValueError If t isn't in [0,1]. """ if t < 0 or t > 1: raise ValueError('Must interpolate between 0 and 1') interp_translation = (1.0 - t) * self.translation + t * other_tf.translation interp_rotation = transformations.quaternion_slerp(self.quaternion, other_tf.quaternion, t) interp_tf = RigidTransform(rotation=interp_rotation, translation=interp_translation, from_frame = self.from_frame, to_frame = self.to_frame) return interp_tf
python
def interpolate_with(self, other_tf, t): """Interpolate with another rigid transformation. Parameters ---------- other_tf : :obj:`RigidTransform` The transform to interpolate with. t : float The interpolation step in [0,1], where 0 favors this RigidTransform. Returns ------- :obj:`RigidTransform` The interpolated RigidTransform. Raises ------ ValueError If t isn't in [0,1]. """ if t < 0 or t > 1: raise ValueError('Must interpolate between 0 and 1') interp_translation = (1.0 - t) * self.translation + t * other_tf.translation interp_rotation = transformations.quaternion_slerp(self.quaternion, other_tf.quaternion, t) interp_tf = RigidTransform(rotation=interp_rotation, translation=interp_translation, from_frame = self.from_frame, to_frame = self.to_frame) return interp_tf
[ "def", "interpolate_with", "(", "self", ",", "other_tf", ",", "t", ")", ":", "if", "t", "<", "0", "or", "t", ">", "1", ":", "raise", "ValueError", "(", "'Must interpolate between 0 and 1'", ")", "interp_translation", "=", "(", "1.0", "-", "t", ")", "*", "self", ".", "translation", "+", "t", "*", "other_tf", ".", "translation", "interp_rotation", "=", "transformations", ".", "quaternion_slerp", "(", "self", ".", "quaternion", ",", "other_tf", ".", "quaternion", ",", "t", ")", "interp_tf", "=", "RigidTransform", "(", "rotation", "=", "interp_rotation", ",", "translation", "=", "interp_translation", ",", "from_frame", "=", "self", ".", "from_frame", ",", "to_frame", "=", "self", ".", "to_frame", ")", "return", "interp_tf" ]
Interpolate with another rigid transformation. Parameters ---------- other_tf : :obj:`RigidTransform` The transform to interpolate with. t : float The interpolation step in [0,1], where 0 favors this RigidTransform. Returns ------- :obj:`RigidTransform` The interpolated RigidTransform. Raises ------ ValueError If t isn't in [0,1].
[ "Interpolate", "with", "another", "rigid", "transformation", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L288-L316
9,601
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.linear_trajectory_to
def linear_trajectory_to(self, target_tf, traj_len): """Creates a trajectory of poses linearly interpolated from this tf to a target tf. Parameters ---------- target_tf : :obj:`RigidTransform` The RigidTransform to interpolate to. traj_len : int The number of RigidTransforms in the returned trajectory. Returns ------- :obj:`list` of :obj:`RigidTransform` A list of interpolated transforms from this transform to the target. """ if traj_len < 0: raise ValueError('Traj len must at least 0') delta_t = 1.0 / (traj_len + 1) t = 0.0 traj = [] while t < 1.0: traj.append(self.interpolate_with(target_tf, t)) t += delta_t traj.append(target_tf) return traj
python
def linear_trajectory_to(self, target_tf, traj_len): """Creates a trajectory of poses linearly interpolated from this tf to a target tf. Parameters ---------- target_tf : :obj:`RigidTransform` The RigidTransform to interpolate to. traj_len : int The number of RigidTransforms in the returned trajectory. Returns ------- :obj:`list` of :obj:`RigidTransform` A list of interpolated transforms from this transform to the target. """ if traj_len < 0: raise ValueError('Traj len must at least 0') delta_t = 1.0 / (traj_len + 1) t = 0.0 traj = [] while t < 1.0: traj.append(self.interpolate_with(target_tf, t)) t += delta_t traj.append(target_tf) return traj
[ "def", "linear_trajectory_to", "(", "self", ",", "target_tf", ",", "traj_len", ")", ":", "if", "traj_len", "<", "0", ":", "raise", "ValueError", "(", "'Traj len must at least 0'", ")", "delta_t", "=", "1.0", "/", "(", "traj_len", "+", "1", ")", "t", "=", "0.0", "traj", "=", "[", "]", "while", "t", "<", "1.0", ":", "traj", ".", "append", "(", "self", ".", "interpolate_with", "(", "target_tf", ",", "t", ")", ")", "t", "+=", "delta_t", "traj", ".", "append", "(", "target_tf", ")", "return", "traj" ]
Creates a trajectory of poses linearly interpolated from this tf to a target tf. Parameters ---------- target_tf : :obj:`RigidTransform` The RigidTransform to interpolate to. traj_len : int The number of RigidTransforms in the returned trajectory. Returns ------- :obj:`list` of :obj:`RigidTransform` A list of interpolated transforms from this transform to the target.
[ "Creates", "a", "trajectory", "of", "poses", "linearly", "interpolated", "from", "this", "tf", "to", "a", "target", "tf", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L318-L342
9,602
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.apply
def apply(self, points): """Applies the rigid transformation to a set of 3D objects. Parameters ---------- points : :obj:`BagOfPoints` A set of objects to transform. Could be any subclass of BagOfPoints. Returns ------- :obj:`BagOfPoints` A transformed set of objects of the same type as the input. Raises ------ ValueError If the input is not a Bag of 3D points or if the points are not in this transform's from_frame. """ if not isinstance(points, BagOfPoints): raise ValueError('Rigid transformations can only be applied to bags of points') if points.dim != 3: raise ValueError('Rigid transformations can only be applied to 3-dimensional points') if points.frame != self._from_frame: raise ValueError('Cannot transform points in frame %s with rigid transformation from frame %s to frame %s' %(points.frame, self._from_frame, self._to_frame)) if isinstance(points, BagOfVectors): # rotation only x = points.data x_tf = self.rotation.dot(x) else: # extract numpy data, homogenize, and transform x = points.data if len(x.shape) == 1: x = x[:,np.newaxis] x_homog = np.r_[x, np.ones([1, points.num_points])] x_homog_tf = self.matrix.dot(x_homog) x_tf = x_homog_tf[0:3,:] # output in BagOfPoints format if isinstance(points, PointCloud): return PointCloud(x_tf, frame=self._to_frame) elif isinstance(points, Point): return Point(x_tf, frame=self._to_frame) elif isinstance(points, Direction): return Direction(x_tf, frame=self._to_frame) elif isinstance(points, NormalCloud): return NormalCloud(x_tf, frame=self._to_frame) raise ValueError('Type %s not yet supported' %(type(points)))
python
def apply(self, points): """Applies the rigid transformation to a set of 3D objects. Parameters ---------- points : :obj:`BagOfPoints` A set of objects to transform. Could be any subclass of BagOfPoints. Returns ------- :obj:`BagOfPoints` A transformed set of objects of the same type as the input. Raises ------ ValueError If the input is not a Bag of 3D points or if the points are not in this transform's from_frame. """ if not isinstance(points, BagOfPoints): raise ValueError('Rigid transformations can only be applied to bags of points') if points.dim != 3: raise ValueError('Rigid transformations can only be applied to 3-dimensional points') if points.frame != self._from_frame: raise ValueError('Cannot transform points in frame %s with rigid transformation from frame %s to frame %s' %(points.frame, self._from_frame, self._to_frame)) if isinstance(points, BagOfVectors): # rotation only x = points.data x_tf = self.rotation.dot(x) else: # extract numpy data, homogenize, and transform x = points.data if len(x.shape) == 1: x = x[:,np.newaxis] x_homog = np.r_[x, np.ones([1, points.num_points])] x_homog_tf = self.matrix.dot(x_homog) x_tf = x_homog_tf[0:3,:] # output in BagOfPoints format if isinstance(points, PointCloud): return PointCloud(x_tf, frame=self._to_frame) elif isinstance(points, Point): return Point(x_tf, frame=self._to_frame) elif isinstance(points, Direction): return Direction(x_tf, frame=self._to_frame) elif isinstance(points, NormalCloud): return NormalCloud(x_tf, frame=self._to_frame) raise ValueError('Type %s not yet supported' %(type(points)))
[ "def", "apply", "(", "self", ",", "points", ")", ":", "if", "not", "isinstance", "(", "points", ",", "BagOfPoints", ")", ":", "raise", "ValueError", "(", "'Rigid transformations can only be applied to bags of points'", ")", "if", "points", ".", "dim", "!=", "3", ":", "raise", "ValueError", "(", "'Rigid transformations can only be applied to 3-dimensional points'", ")", "if", "points", ".", "frame", "!=", "self", ".", "_from_frame", ":", "raise", "ValueError", "(", "'Cannot transform points in frame %s with rigid transformation from frame %s to frame %s'", "%", "(", "points", ".", "frame", ",", "self", ".", "_from_frame", ",", "self", ".", "_to_frame", ")", ")", "if", "isinstance", "(", "points", ",", "BagOfVectors", ")", ":", "# rotation only", "x", "=", "points", ".", "data", "x_tf", "=", "self", ".", "rotation", ".", "dot", "(", "x", ")", "else", ":", "# extract numpy data, homogenize, and transform", "x", "=", "points", ".", "data", "if", "len", "(", "x", ".", "shape", ")", "==", "1", ":", "x", "=", "x", "[", ":", ",", "np", ".", "newaxis", "]", "x_homog", "=", "np", ".", "r_", "[", "x", ",", "np", ".", "ones", "(", "[", "1", ",", "points", ".", "num_points", "]", ")", "]", "x_homog_tf", "=", "self", ".", "matrix", ".", "dot", "(", "x_homog", ")", "x_tf", "=", "x_homog_tf", "[", "0", ":", "3", ",", ":", "]", "# output in BagOfPoints format", "if", "isinstance", "(", "points", ",", "PointCloud", ")", ":", "return", "PointCloud", "(", "x_tf", ",", "frame", "=", "self", ".", "_to_frame", ")", "elif", "isinstance", "(", "points", ",", "Point", ")", ":", "return", "Point", "(", "x_tf", ",", "frame", "=", "self", ".", "_to_frame", ")", "elif", "isinstance", "(", "points", ",", "Direction", ")", ":", "return", "Direction", "(", "x_tf", ",", "frame", "=", "self", ".", "_to_frame", ")", "elif", "isinstance", "(", "points", ",", "NormalCloud", ")", ":", "return", "NormalCloud", "(", "x_tf", ",", "frame", "=", "self", ".", "_to_frame", ")", "raise", "ValueError", "(", "'Type %s not yet supported'", "%", "(", "type", "(", "points", ")", ")", ")" ]
Applies the rigid transformation to a set of 3D objects. Parameters ---------- points : :obj:`BagOfPoints` A set of objects to transform. Could be any subclass of BagOfPoints. Returns ------- :obj:`BagOfPoints` A transformed set of objects of the same type as the input. Raises ------ ValueError If the input is not a Bag of 3D points or if the points are not in this transform's from_frame.
[ "Applies", "the", "rigid", "transformation", "to", "a", "set", "of", "3D", "objects", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L344-L392
9,603
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.dot
def dot(self, other_tf): """Compose this rigid transform with another. This transform is on the left-hand side of the composition. Parameters ---------- other_tf : :obj:`RigidTransform` The other RigidTransform to compose with this one. Returns ------- :obj:`RigidTransform` A RigidTransform that represents the composition. Raises ------ ValueError If the to_frame of other_tf is not identical to this transform's from_frame. """ if other_tf.to_frame != self.from_frame: raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame)) pose_tf = self.matrix.dot(other_tf.matrix) rotation, translation = RigidTransform.rotation_and_translation_from_matrix(pose_tf) if isinstance(other_tf, SimilarityTransform): return SimilarityTransform(self.rotation, self.translation, scale=1.0, from_frame=self.from_frame, to_frame=self.to_frame) * other_tf return RigidTransform(rotation, translation, from_frame=other_tf.from_frame, to_frame=self.to_frame)
python
def dot(self, other_tf): """Compose this rigid transform with another. This transform is on the left-hand side of the composition. Parameters ---------- other_tf : :obj:`RigidTransform` The other RigidTransform to compose with this one. Returns ------- :obj:`RigidTransform` A RigidTransform that represents the composition. Raises ------ ValueError If the to_frame of other_tf is not identical to this transform's from_frame. """ if other_tf.to_frame != self.from_frame: raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame)) pose_tf = self.matrix.dot(other_tf.matrix) rotation, translation = RigidTransform.rotation_and_translation_from_matrix(pose_tf) if isinstance(other_tf, SimilarityTransform): return SimilarityTransform(self.rotation, self.translation, scale=1.0, from_frame=self.from_frame, to_frame=self.to_frame) * other_tf return RigidTransform(rotation, translation, from_frame=other_tf.from_frame, to_frame=self.to_frame)
[ "def", "dot", "(", "self", ",", "other_tf", ")", ":", "if", "other_tf", ".", "to_frame", "!=", "self", ".", "from_frame", ":", "raise", "ValueError", "(", "'To frame of right hand side ({0}) must match from frame of left hand side ({1})'", ".", "format", "(", "other_tf", ".", "to_frame", ",", "self", ".", "from_frame", ")", ")", "pose_tf", "=", "self", ".", "matrix", ".", "dot", "(", "other_tf", ".", "matrix", ")", "rotation", ",", "translation", "=", "RigidTransform", ".", "rotation_and_translation_from_matrix", "(", "pose_tf", ")", "if", "isinstance", "(", "other_tf", ",", "SimilarityTransform", ")", ":", "return", "SimilarityTransform", "(", "self", ".", "rotation", ",", "self", ".", "translation", ",", "scale", "=", "1.0", ",", "from_frame", "=", "self", ".", "from_frame", ",", "to_frame", "=", "self", ".", "to_frame", ")", "*", "other_tf", "return", "RigidTransform", "(", "rotation", ",", "translation", ",", "from_frame", "=", "other_tf", ".", "from_frame", ",", "to_frame", "=", "self", ".", "to_frame", ")" ]
Compose this rigid transform with another. This transform is on the left-hand side of the composition. Parameters ---------- other_tf : :obj:`RigidTransform` The other RigidTransform to compose with this one. Returns ------- :obj:`RigidTransform` A RigidTransform that represents the composition. Raises ------ ValueError If the to_frame of other_tf is not identical to this transform's from_frame.
[ "Compose", "this", "rigid", "transform", "with", "another", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L394-L427
9,604
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.inverse
def inverse(self): """Take the inverse of the rigid transform. Returns ------- :obj:`RigidTransform` The inverse of this RigidTransform. """ inv_rotation = self.rotation.T inv_translation = np.dot(-self.rotation.T, self.translation) return RigidTransform(inv_rotation, inv_translation, from_frame=self._to_frame, to_frame=self._from_frame)
python
def inverse(self): """Take the inverse of the rigid transform. Returns ------- :obj:`RigidTransform` The inverse of this RigidTransform. """ inv_rotation = self.rotation.T inv_translation = np.dot(-self.rotation.T, self.translation) return RigidTransform(inv_rotation, inv_translation, from_frame=self._to_frame, to_frame=self._from_frame)
[ "def", "inverse", "(", "self", ")", ":", "inv_rotation", "=", "self", ".", "rotation", ".", "T", "inv_translation", "=", "np", ".", "dot", "(", "-", "self", ".", "rotation", ".", "T", ",", "self", ".", "translation", ")", "return", "RigidTransform", "(", "inv_rotation", ",", "inv_translation", ",", "from_frame", "=", "self", ".", "_to_frame", ",", "to_frame", "=", "self", ".", "_from_frame", ")" ]
Take the inverse of the rigid transform. Returns ------- :obj:`RigidTransform` The inverse of this RigidTransform.
[ "Take", "the", "inverse", "of", "the", "rigid", "transform", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L456-L468
9,605
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.save
def save(self, filename): """Save the RigidTransform to a file. The file format is: from_frame to_frame translation (space separated) rotation_row_0 (space separated) rotation_row_1 (space separated) rotation_row_2 (space separated) Parameters ---------- filename : :obj:`str` The file to save the transform to. Raises ------ ValueError If filename's extension isn't .tf. """ file_root, file_ext = os.path.splitext(filename) if file_ext.lower() != TF_EXTENSION: raise ValueError('Extension %s not supported for RigidTransform. Must be stored with extension %s' %(file_ext, TF_EXTENSION)) f = open(filename, 'w') f.write('%s\n' %(self._from_frame)) f.write('%s\n' %(self._to_frame)) f.write('%f %f %f\n' %(self._translation[0], self._translation[1], self._translation[2])) f.write('%f %f %f\n' %(self._rotation[0, 0], self._rotation[0, 1], self._rotation[0, 2])) f.write('%f %f %f\n' %(self._rotation[1, 0], self._rotation[1, 1], self._rotation[1, 2])) f.write('%f %f %f\n' %(self._rotation[2, 0], self._rotation[2, 1], self._rotation[2, 2])) f.close()
python
def save(self, filename): """Save the RigidTransform to a file. The file format is: from_frame to_frame translation (space separated) rotation_row_0 (space separated) rotation_row_1 (space separated) rotation_row_2 (space separated) Parameters ---------- filename : :obj:`str` The file to save the transform to. Raises ------ ValueError If filename's extension isn't .tf. """ file_root, file_ext = os.path.splitext(filename) if file_ext.lower() != TF_EXTENSION: raise ValueError('Extension %s not supported for RigidTransform. Must be stored with extension %s' %(file_ext, TF_EXTENSION)) f = open(filename, 'w') f.write('%s\n' %(self._from_frame)) f.write('%s\n' %(self._to_frame)) f.write('%f %f %f\n' %(self._translation[0], self._translation[1], self._translation[2])) f.write('%f %f %f\n' %(self._rotation[0, 0], self._rotation[0, 1], self._rotation[0, 2])) f.write('%f %f %f\n' %(self._rotation[1, 0], self._rotation[1, 1], self._rotation[1, 2])) f.write('%f %f %f\n' %(self._rotation[2, 0], self._rotation[2, 1], self._rotation[2, 2])) f.close()
[ "def", "save", "(", "self", ",", "filename", ")", ":", "file_root", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "file_ext", ".", "lower", "(", ")", "!=", "TF_EXTENSION", ":", "raise", "ValueError", "(", "'Extension %s not supported for RigidTransform. Must be stored with extension %s'", "%", "(", "file_ext", ",", "TF_EXTENSION", ")", ")", "f", "=", "open", "(", "filename", ",", "'w'", ")", "f", ".", "write", "(", "'%s\\n'", "%", "(", "self", ".", "_from_frame", ")", ")", "f", ".", "write", "(", "'%s\\n'", "%", "(", "self", ".", "_to_frame", ")", ")", "f", ".", "write", "(", "'%f %f %f\\n'", "%", "(", "self", ".", "_translation", "[", "0", "]", ",", "self", ".", "_translation", "[", "1", "]", ",", "self", ".", "_translation", "[", "2", "]", ")", ")", "f", ".", "write", "(", "'%f %f %f\\n'", "%", "(", "self", ".", "_rotation", "[", "0", ",", "0", "]", ",", "self", ".", "_rotation", "[", "0", ",", "1", "]", ",", "self", ".", "_rotation", "[", "0", ",", "2", "]", ")", ")", "f", ".", "write", "(", "'%f %f %f\\n'", "%", "(", "self", ".", "_rotation", "[", "1", ",", "0", "]", ",", "self", ".", "_rotation", "[", "1", ",", "1", "]", ",", "self", ".", "_rotation", "[", "1", ",", "2", "]", ")", ")", "f", ".", "write", "(", "'%f %f %f\\n'", "%", "(", "self", ".", "_rotation", "[", "2", ",", "0", "]", ",", "self", ".", "_rotation", "[", "2", ",", "1", "]", ",", "self", ".", "_rotation", "[", "2", ",", "2", "]", ")", ")", "f", ".", "close", "(", ")" ]
Save the RigidTransform to a file. The file format is: from_frame to_frame translation (space separated) rotation_row_0 (space separated) rotation_row_1 (space separated) rotation_row_2 (space separated) Parameters ---------- filename : :obj:`str` The file to save the transform to. Raises ------ ValueError If filename's extension isn't .tf.
[ "Save", "the", "RigidTransform", "to", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L470-L502
9,606
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.as_frames
def as_frames(self, from_frame, to_frame='world'): """Return a shallow copy of this rigid transform with just the frames changed. Parameters ---------- from_frame : :obj:`str` The new from_frame. to_frame : :obj:`str` The new to_frame. Returns ------- :obj:`RigidTransform` The RigidTransform with new frames. """ return RigidTransform(self.rotation, self.translation, from_frame, to_frame)
python
def as_frames(self, from_frame, to_frame='world'): """Return a shallow copy of this rigid transform with just the frames changed. Parameters ---------- from_frame : :obj:`str` The new from_frame. to_frame : :obj:`str` The new to_frame. Returns ------- :obj:`RigidTransform` The RigidTransform with new frames. """ return RigidTransform(self.rotation, self.translation, from_frame, to_frame)
[ "def", "as_frames", "(", "self", ",", "from_frame", ",", "to_frame", "=", "'world'", ")", ":", "return", "RigidTransform", "(", "self", ".", "rotation", ",", "self", ".", "translation", ",", "from_frame", ",", "to_frame", ")" ]
Return a shallow copy of this rigid transform with just the frames changed. Parameters ---------- from_frame : :obj:`str` The new from_frame. to_frame : :obj:`str` The new to_frame. Returns ------- :obj:`RigidTransform` The RigidTransform with new frames.
[ "Return", "a", "shallow", "copy", "of", "this", "rigid", "transform", "with", "just", "the", "frames", "changed", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L504-L521
9,607
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.rotation_from_quaternion
def rotation_from_quaternion(q_wxyz): """Convert quaternion array to rotation matrix. Parameters ---------- q_wxyz : :obj:`numpy.ndarray` of float A quaternion in wxyz order. Returns ------- :obj:`numpy.ndarray` of float A 3x3 rotation matrix made from the quaternion. """ q_xyzw = np.array([q_wxyz[1], q_wxyz[2], q_wxyz[3], q_wxyz[0]]) R = transformations.quaternion_matrix(q_xyzw)[:3,:3] return R
python
def rotation_from_quaternion(q_wxyz): """Convert quaternion array to rotation matrix. Parameters ---------- q_wxyz : :obj:`numpy.ndarray` of float A quaternion in wxyz order. Returns ------- :obj:`numpy.ndarray` of float A 3x3 rotation matrix made from the quaternion. """ q_xyzw = np.array([q_wxyz[1], q_wxyz[2], q_wxyz[3], q_wxyz[0]]) R = transformations.quaternion_matrix(q_xyzw)[:3,:3] return R
[ "def", "rotation_from_quaternion", "(", "q_wxyz", ")", ":", "q_xyzw", "=", "np", ".", "array", "(", "[", "q_wxyz", "[", "1", "]", ",", "q_wxyz", "[", "2", "]", ",", "q_wxyz", "[", "3", "]", ",", "q_wxyz", "[", "0", "]", "]", ")", "R", "=", "transformations", ".", "quaternion_matrix", "(", "q_xyzw", ")", "[", ":", "3", ",", ":", "3", "]", "return", "R" ]
Convert quaternion array to rotation matrix. Parameters ---------- q_wxyz : :obj:`numpy.ndarray` of float A quaternion in wxyz order. Returns ------- :obj:`numpy.ndarray` of float A 3x3 rotation matrix made from the quaternion.
[ "Convert", "quaternion", "array", "to", "rotation", "matrix", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L700-L715
9,608
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.quaternion_from_axis_angle
def quaternion_from_axis_angle(v): """Convert axis-angle representation to a quaternion vector. Parameters ---------- v : :obj:`numpy.ndarray` of float An axis-angle representation. Returns ------- :obj:`numpy.ndarray` of float A quaternion vector from the axis-angle vector. """ theta = np.linalg.norm(v) if theta > 0: v = v / np.linalg.norm(v) ax, ay, az = v qx = ax * np.sin(0.5 * theta) qy = ay * np.sin(0.5 * theta) qz = az * np.sin(0.5 * theta) qw = np.cos(0.5 * theta) q = np.array([qw, qx, qy, qz]) return q
python
def quaternion_from_axis_angle(v): """Convert axis-angle representation to a quaternion vector. Parameters ---------- v : :obj:`numpy.ndarray` of float An axis-angle representation. Returns ------- :obj:`numpy.ndarray` of float A quaternion vector from the axis-angle vector. """ theta = np.linalg.norm(v) if theta > 0: v = v / np.linalg.norm(v) ax, ay, az = v qx = ax * np.sin(0.5 * theta) qy = ay * np.sin(0.5 * theta) qz = az * np.sin(0.5 * theta) qw = np.cos(0.5 * theta) q = np.array([qw, qx, qy, qz]) return q
[ "def", "quaternion_from_axis_angle", "(", "v", ")", ":", "theta", "=", "np", ".", "linalg", ".", "norm", "(", "v", ")", "if", "theta", ">", "0", ":", "v", "=", "v", "/", "np", ".", "linalg", ".", "norm", "(", "v", ")", "ax", ",", "ay", ",", "az", "=", "v", "qx", "=", "ax", "*", "np", ".", "sin", "(", "0.5", "*", "theta", ")", "qy", "=", "ay", "*", "np", ".", "sin", "(", "0.5", "*", "theta", ")", "qz", "=", "az", "*", "np", ".", "sin", "(", "0.5", "*", "theta", ")", "qw", "=", "np", ".", "cos", "(", "0.5", "*", "theta", ")", "q", "=", "np", ".", "array", "(", "[", "qw", ",", "qx", ",", "qy", ",", "qz", "]", ")", "return", "q" ]
Convert axis-angle representation to a quaternion vector. Parameters ---------- v : :obj:`numpy.ndarray` of float An axis-angle representation. Returns ------- :obj:`numpy.ndarray` of float A quaternion vector from the axis-angle vector.
[ "Convert", "axis", "-", "angle", "representation", "to", "a", "quaternion", "vector", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L719-L741
9,609
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.transform_from_dual_quaternion
def transform_from_dual_quaternion(dq, from_frame='unassigned', to_frame='world'): """Create a RigidTransform from a DualQuaternion. Parameters ---------- dq : :obj:`DualQuaternion` The DualQuaternion to transform. from_frame : :obj:`str` A name for the frame of reference on which this transform operates. to_frame : :obj:`str` A name for the frame of reference to which this transform moves objects. Returns ------- :obj:`RigidTransform` The RigidTransform made from the DualQuaternion. """ quaternion = dq.qr translation = 2 * dq.qd[1:] return RigidTransform(rotation=quaternion, translation=translation, from_frame=from_frame, to_frame=to_frame)
python
def transform_from_dual_quaternion(dq, from_frame='unassigned', to_frame='world'): """Create a RigidTransform from a DualQuaternion. Parameters ---------- dq : :obj:`DualQuaternion` The DualQuaternion to transform. from_frame : :obj:`str` A name for the frame of reference on which this transform operates. to_frame : :obj:`str` A name for the frame of reference to which this transform moves objects. Returns ------- :obj:`RigidTransform` The RigidTransform made from the DualQuaternion. """ quaternion = dq.qr translation = 2 * dq.qd[1:] return RigidTransform(rotation=quaternion, translation=translation, from_frame=from_frame, to_frame=to_frame)
[ "def", "transform_from_dual_quaternion", "(", "dq", ",", "from_frame", "=", "'unassigned'", ",", "to_frame", "=", "'world'", ")", ":", "quaternion", "=", "dq", ".", "qr", "translation", "=", "2", "*", "dq", ".", "qd", "[", "1", ":", "]", "return", "RigidTransform", "(", "rotation", "=", "quaternion", ",", "translation", "=", "translation", ",", "from_frame", "=", "from_frame", ",", "to_frame", "=", "to_frame", ")" ]
Create a RigidTransform from a DualQuaternion. Parameters ---------- dq : :obj:`DualQuaternion` The DualQuaternion to transform. from_frame : :obj:`str` A name for the frame of reference on which this transform operates. to_frame : :obj:`str` A name for the frame of reference to which this transform moves objects. Returns ------- :obj:`RigidTransform` The RigidTransform made from the DualQuaternion.
[ "Create", "a", "RigidTransform", "from", "a", "DualQuaternion", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L760-L783
9,610
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.rotation_and_translation_from_matrix
def rotation_and_translation_from_matrix(matrix): """Helper to convert 4x4 matrix to rotation matrix and translation vector. Parameters ---------- matrix : :obj:`numpy.ndarray` of float 4x4 rigid transformation matrix to be converted. Returns ------- :obj:`tuple` of :obj:`numpy.ndarray` of float A 3x3 rotation matrix and a 3-entry translation vector. Raises ------ ValueError If the incoming matrix isn't a 4x4 ndarray. """ if not isinstance(matrix, np.ndarray) or \ matrix.shape[0] != 4 or matrix.shape[1] != 4: raise ValueError('Matrix must be specified as a 4x4 ndarray') rotation = matrix[:3,:3] translation = matrix[:3,3] return rotation, translation
python
def rotation_and_translation_from_matrix(matrix): """Helper to convert 4x4 matrix to rotation matrix and translation vector. Parameters ---------- matrix : :obj:`numpy.ndarray` of float 4x4 rigid transformation matrix to be converted. Returns ------- :obj:`tuple` of :obj:`numpy.ndarray` of float A 3x3 rotation matrix and a 3-entry translation vector. Raises ------ ValueError If the incoming matrix isn't a 4x4 ndarray. """ if not isinstance(matrix, np.ndarray) or \ matrix.shape[0] != 4 or matrix.shape[1] != 4: raise ValueError('Matrix must be specified as a 4x4 ndarray') rotation = matrix[:3,:3] translation = matrix[:3,3] return rotation, translation
[ "def", "rotation_and_translation_from_matrix", "(", "matrix", ")", ":", "if", "not", "isinstance", "(", "matrix", ",", "np", ".", "ndarray", ")", "or", "matrix", ".", "shape", "[", "0", "]", "!=", "4", "or", "matrix", ".", "shape", "[", "1", "]", "!=", "4", ":", "raise", "ValueError", "(", "'Matrix must be specified as a 4x4 ndarray'", ")", "rotation", "=", "matrix", "[", ":", "3", ",", ":", "3", "]", "translation", "=", "matrix", "[", ":", "3", ",", "3", "]", "return", "rotation", ",", "translation" ]
Helper to convert 4x4 matrix to rotation matrix and translation vector. Parameters ---------- matrix : :obj:`numpy.ndarray` of float 4x4 rigid transformation matrix to be converted. Returns ------- :obj:`tuple` of :obj:`numpy.ndarray` of float A 3x3 rotation matrix and a 3-entry translation vector. Raises ------ ValueError If the incoming matrix isn't a 4x4 ndarray.
[ "Helper", "to", "convert", "4x4", "matrix", "to", "rotation", "matrix", "and", "translation", "vector", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L786-L809
9,611
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.rotation_from_axis_and_origin
def rotation_from_axis_and_origin(axis, origin, angle, to_frame='world'): """ Returns a rotation matrix around some arbitrary axis, about the point origin, using Rodrigues Formula Parameters ---------- axis : :obj:`numpy.ndarray` of float 3x1 vector representing which axis we should be rotating about origin : :obj:`numpy.ndarray` of float 3x1 vector representing where the rotation should be centered around angle : float how much to rotate (in radians) to_frame : :obj:`str` A name for the frame of reference to which this transform moves objects. """ axis_hat = np.array([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]], [-axis[1], axis[0], 0]]) # Rodrigues Formula R = RigidTransform( np.eye(3) + np.sin(angle) * axis_hat + (1 - np.cos(angle)) * axis_hat.dot(axis_hat), from_frame=to_frame, to_frame=to_frame ) return RigidTransform(translation=origin, from_frame=to_frame, to_frame=to_frame) \ .dot(R) \ .dot(RigidTransform(translation=-origin, from_frame=to_frame, to_frame=to_frame))
python
def rotation_from_axis_and_origin(axis, origin, angle, to_frame='world'): """ Returns a rotation matrix around some arbitrary axis, about the point origin, using Rodrigues Formula Parameters ---------- axis : :obj:`numpy.ndarray` of float 3x1 vector representing which axis we should be rotating about origin : :obj:`numpy.ndarray` of float 3x1 vector representing where the rotation should be centered around angle : float how much to rotate (in radians) to_frame : :obj:`str` A name for the frame of reference to which this transform moves objects. """ axis_hat = np.array([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]], [-axis[1], axis[0], 0]]) # Rodrigues Formula R = RigidTransform( np.eye(3) + np.sin(angle) * axis_hat + (1 - np.cos(angle)) * axis_hat.dot(axis_hat), from_frame=to_frame, to_frame=to_frame ) return RigidTransform(translation=origin, from_frame=to_frame, to_frame=to_frame) \ .dot(R) \ .dot(RigidTransform(translation=-origin, from_frame=to_frame, to_frame=to_frame))
[ "def", "rotation_from_axis_and_origin", "(", "axis", ",", "origin", ",", "angle", ",", "to_frame", "=", "'world'", ")", ":", "axis_hat", "=", "np", ".", "array", "(", "[", "[", "0", ",", "-", "axis", "[", "2", "]", ",", "axis", "[", "1", "]", "]", ",", "[", "axis", "[", "2", "]", ",", "0", ",", "-", "axis", "[", "0", "]", "]", ",", "[", "-", "axis", "[", "1", "]", ",", "axis", "[", "0", "]", ",", "0", "]", "]", ")", "# Rodrigues Formula", "R", "=", "RigidTransform", "(", "np", ".", "eye", "(", "3", ")", "+", "np", ".", "sin", "(", "angle", ")", "*", "axis_hat", "+", "(", "1", "-", "np", ".", "cos", "(", "angle", ")", ")", "*", "axis_hat", ".", "dot", "(", "axis_hat", ")", ",", "from_frame", "=", "to_frame", ",", "to_frame", "=", "to_frame", ")", "return", "RigidTransform", "(", "translation", "=", "origin", ",", "from_frame", "=", "to_frame", ",", "to_frame", "=", "to_frame", ")", ".", "dot", "(", "R", ")", ".", "dot", "(", "RigidTransform", "(", "translation", "=", "-", "origin", ",", "from_frame", "=", "to_frame", ",", "to_frame", "=", "to_frame", ")", ")" ]
Returns a rotation matrix around some arbitrary axis, about the point origin, using Rodrigues Formula Parameters ---------- axis : :obj:`numpy.ndarray` of float 3x1 vector representing which axis we should be rotating about origin : :obj:`numpy.ndarray` of float 3x1 vector representing where the rotation should be centered around angle : float how much to rotate (in radians) to_frame : :obj:`str` A name for the frame of reference to which this transform moves objects.
[ "Returns", "a", "rotation", "matrix", "around", "some", "arbitrary", "axis", "about", "the", "point", "origin", "using", "Rodrigues", "Formula" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L812-L840
9,612
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.x_axis_rotation
def x_axis_rotation(theta): """Generates a 3x3 rotation matrix for a rotation of angle theta about the x axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ R = np.array([[1, 0, 0,], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)]]) return R
python
def x_axis_rotation(theta): """Generates a 3x3 rotation matrix for a rotation of angle theta about the x axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ R = np.array([[1, 0, 0,], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)]]) return R
[ "def", "x_axis_rotation", "(", "theta", ")", ":", "R", "=", "np", ".", "array", "(", "[", "[", "1", ",", "0", ",", "0", ",", "]", ",", "[", "0", ",", "np", ".", "cos", "(", "theta", ")", ",", "-", "np", ".", "sin", "(", "theta", ")", "]", ",", "[", "0", ",", "np", ".", "sin", "(", "theta", ")", ",", "np", ".", "cos", "(", "theta", ")", "]", "]", ")", "return", "R" ]
Generates a 3x3 rotation matrix for a rotation of angle theta about the x axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix.
[ "Generates", "a", "3x3", "rotation", "matrix", "for", "a", "rotation", "of", "angle", "theta", "about", "the", "x", "axis", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L843-L860
9,613
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.y_axis_rotation
def y_axis_rotation(theta): """Generates a 3x3 rotation matrix for a rotation of angle theta about the y axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ R = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-np.sin(theta), 0, np.cos(theta)]]) return R
python
def y_axis_rotation(theta): """Generates a 3x3 rotation matrix for a rotation of angle theta about the y axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ R = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-np.sin(theta), 0, np.cos(theta)]]) return R
[ "def", "y_axis_rotation", "(", "theta", ")", ":", "R", "=", "np", ".", "array", "(", "[", "[", "np", ".", "cos", "(", "theta", ")", ",", "0", ",", "np", ".", "sin", "(", "theta", ")", "]", ",", "[", "0", ",", "1", ",", "0", "]", ",", "[", "-", "np", ".", "sin", "(", "theta", ")", ",", "0", ",", "np", ".", "cos", "(", "theta", ")", "]", "]", ")", "return", "R" ]
Generates a 3x3 rotation matrix for a rotation of angle theta about the y axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix.
[ "Generates", "a", "3x3", "rotation", "matrix", "for", "a", "rotation", "of", "angle", "theta", "about", "the", "y", "axis", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L863-L880
9,614
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.z_axis_rotation
def z_axis_rotation(theta): """Generates a 3x3 rotation matrix for a rotation of angle theta about the z axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ R = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) return R
python
def z_axis_rotation(theta): """Generates a 3x3 rotation matrix for a rotation of angle theta about the z axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ R = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) return R
[ "def", "z_axis_rotation", "(", "theta", ")", ":", "R", "=", "np", ".", "array", "(", "[", "[", "np", ".", "cos", "(", "theta", ")", ",", "-", "np", ".", "sin", "(", "theta", ")", ",", "0", "]", ",", "[", "np", ".", "sin", "(", "theta", ")", ",", "np", ".", "cos", "(", "theta", ")", ",", "0", "]", ",", "[", "0", ",", "0", ",", "1", "]", "]", ")", "return", "R" ]
Generates a 3x3 rotation matrix for a rotation of angle theta about the z axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix.
[ "Generates", "a", "3x3", "rotation", "matrix", "for", "a", "rotation", "of", "angle", "theta", "about", "the", "z", "axis", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L883-L900
9,615
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.random_rotation
def random_rotation(): """Generates a random 3x3 rotation matrix with SVD. Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ rand_seed = np.random.rand(3, 3) U, S, V = np.linalg.svd(rand_seed) return U
python
def random_rotation(): """Generates a random 3x3 rotation matrix with SVD. Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ rand_seed = np.random.rand(3, 3) U, S, V = np.linalg.svd(rand_seed) return U
[ "def", "random_rotation", "(", ")", ":", "rand_seed", "=", "np", ".", "random", ".", "rand", "(", "3", ",", "3", ")", "U", ",", "S", ",", "V", "=", "np", ".", "linalg", ".", "svd", "(", "rand_seed", ")", "return", "U" ]
Generates a random 3x3 rotation matrix with SVD. Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix.
[ "Generates", "a", "random", "3x3", "rotation", "matrix", "with", "SVD", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L903-L913
9,616
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.rotation_from_axes
def rotation_from_axes(x_axis, y_axis, z_axis): """Convert specification of axis in target frame to a rotation matrix from source to target frame. Parameters ---------- x_axis : :obj:`numpy.ndarray` of float A normalized 3-vector for the target frame's x-axis. y_axis : :obj:`numpy.ndarray` of float A normalized 3-vector for the target frame's y-axis. z_axis : :obj:`numpy.ndarray` of float A normalized 3-vector for the target frame's z-axis. Returns ------- :obj:`numpy.ndarray` of float A 3x3 rotation matrix that transforms from a source frame to the given target frame. """ return np.hstack((x_axis[:,np.newaxis], y_axis[:,np.newaxis], z_axis[:,np.newaxis]))
python
def rotation_from_axes(x_axis, y_axis, z_axis): """Convert specification of axis in target frame to a rotation matrix from source to target frame. Parameters ---------- x_axis : :obj:`numpy.ndarray` of float A normalized 3-vector for the target frame's x-axis. y_axis : :obj:`numpy.ndarray` of float A normalized 3-vector for the target frame's y-axis. z_axis : :obj:`numpy.ndarray` of float A normalized 3-vector for the target frame's z-axis. Returns ------- :obj:`numpy.ndarray` of float A 3x3 rotation matrix that transforms from a source frame to the given target frame. """ return np.hstack((x_axis[:,np.newaxis], y_axis[:,np.newaxis], z_axis[:,np.newaxis]))
[ "def", "rotation_from_axes", "(", "x_axis", ",", "y_axis", ",", "z_axis", ")", ":", "return", "np", ".", "hstack", "(", "(", "x_axis", "[", ":", ",", "np", ".", "newaxis", "]", ",", "y_axis", "[", ":", ",", "np", ".", "newaxis", "]", ",", "z_axis", "[", ":", ",", "np", ".", "newaxis", "]", ")", ")" ]
Convert specification of axis in target frame to a rotation matrix from source to target frame. Parameters ---------- x_axis : :obj:`numpy.ndarray` of float A normalized 3-vector for the target frame's x-axis. y_axis : :obj:`numpy.ndarray` of float A normalized 3-vector for the target frame's y-axis. z_axis : :obj:`numpy.ndarray` of float A normalized 3-vector for the target frame's z-axis. Returns ------- :obj:`numpy.ndarray` of float A 3x3 rotation matrix that transforms from a source frame to the given target frame.
[ "Convert", "specification", "of", "axis", "in", "target", "frame", "to", "a", "rotation", "matrix", "from", "source", "to", "target", "frame", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L927-L948
9,617
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.interpolate
def interpolate(T0, T1, t): """Return an interpolation of two RigidTransforms. Parameters ---------- T0 : :obj:`RigidTransform` The first RigidTransform to interpolate. T1 : :obj:`RigidTransform` The second RigidTransform to interpolate. t : float The interpolation step in [0,1]. 0 favors T0, 1 favors T1. Returns ------- :obj:`RigidTransform` The interpolated RigidTransform. Raises ------ ValueError If the to_frame of the two RigidTransforms are not identical. """ if T0.to_frame != T1.to_frame: raise ValueError('Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}'.format(T0.to_frame, T1.to_frame)) dq0 = T0.dual_quaternion dq1 = T1.dual_quaternion dqt = DualQuaternion.interpolate(dq0, dq1, t) from_frame = "{0}_{1}_{2}".format(T0.from_frame, T1.from_frame, t) return RigidTransform.transform_from_dual_quaternion(dqt, from_frame, T0.to_frame)
python
def interpolate(T0, T1, t): """Return an interpolation of two RigidTransforms. Parameters ---------- T0 : :obj:`RigidTransform` The first RigidTransform to interpolate. T1 : :obj:`RigidTransform` The second RigidTransform to interpolate. t : float The interpolation step in [0,1]. 0 favors T0, 1 favors T1. Returns ------- :obj:`RigidTransform` The interpolated RigidTransform. Raises ------ ValueError If the to_frame of the two RigidTransforms are not identical. """ if T0.to_frame != T1.to_frame: raise ValueError('Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}'.format(T0.to_frame, T1.to_frame)) dq0 = T0.dual_quaternion dq1 = T1.dual_quaternion dqt = DualQuaternion.interpolate(dq0, dq1, t) from_frame = "{0}_{1}_{2}".format(T0.from_frame, T1.from_frame, t) return RigidTransform.transform_from_dual_quaternion(dqt, from_frame, T0.to_frame)
[ "def", "interpolate", "(", "T0", ",", "T1", ",", "t", ")", ":", "if", "T0", ".", "to_frame", "!=", "T1", ".", "to_frame", ":", "raise", "ValueError", "(", "'Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}'", ".", "format", "(", "T0", ".", "to_frame", ",", "T1", ".", "to_frame", ")", ")", "dq0", "=", "T0", ".", "dual_quaternion", "dq1", "=", "T1", ".", "dual_quaternion", "dqt", "=", "DualQuaternion", ".", "interpolate", "(", "dq0", ",", "dq1", ",", "t", ")", "from_frame", "=", "\"{0}_{1}_{2}\"", ".", "format", "(", "T0", ".", "from_frame", ",", "T1", ".", "from_frame", ",", "t", ")", "return", "RigidTransform", ".", "transform_from_dual_quaternion", "(", "dqt", ",", "from_frame", ",", "T0", ".", "to_frame", ")" ]
Return an interpolation of two RigidTransforms. Parameters ---------- T0 : :obj:`RigidTransform` The first RigidTransform to interpolate. T1 : :obj:`RigidTransform` The second RigidTransform to interpolate. t : float The interpolation step in [0,1]. 0 favors T0, 1 favors T1. Returns ------- :obj:`RigidTransform` The interpolated RigidTransform. Raises ------ ValueError If the to_frame of the two RigidTransforms are not identical.
[ "Return", "an", "interpolation", "of", "two", "RigidTransforms", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L973-L1004
9,618
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
RigidTransform.load
def load(filename): """Load a RigidTransform from a file. The file format is: from_frame to_frame translation (space separated) rotation_row_0 (space separated) rotation_row_1 (space separated) rotation_row_2 (space separated) Parameters ---------- filename : :obj:`str` The file to load the transform from. Returns ------- :obj:`RigidTransform` The RigidTransform read from the file. Raises ------ ValueError If filename's extension isn't .tf. """ file_root, file_ext = os.path.splitext(filename) if file_ext.lower() != TF_EXTENSION: raise ValueError('Extension %s not supported for RigidTransform. Can only load extension %s' %(file_ext, TF_EXTENSION)) f = open(filename, 'r') lines = list(f) from_frame = lines[0][:-1] to_frame = lines[1][:-1] t = np.zeros(3) t_tokens = lines[2][:-1].split() t[0] = float(t_tokens[0]) t[1] = float(t_tokens[1]) t[2] = float(t_tokens[2]) R = np.zeros([3,3]) r_tokens = lines[3][:-1].split() R[0, 0] = float(r_tokens[0]) R[0, 1] = float(r_tokens[1]) R[0, 2] = float(r_tokens[2]) r_tokens = lines[4][:-1].split() R[1, 0] = float(r_tokens[0]) R[1, 1] = float(r_tokens[1]) R[1, 2] = float(r_tokens[2]) r_tokens = lines[5][:-1].split() R[2, 0] = float(r_tokens[0]) R[2, 1] = float(r_tokens[1]) R[2, 2] = float(r_tokens[2]) f.close() return RigidTransform(rotation=R, translation=t, from_frame=from_frame, to_frame=to_frame)
python
def load(filename): """Load a RigidTransform from a file. The file format is: from_frame to_frame translation (space separated) rotation_row_0 (space separated) rotation_row_1 (space separated) rotation_row_2 (space separated) Parameters ---------- filename : :obj:`str` The file to load the transform from. Returns ------- :obj:`RigidTransform` The RigidTransform read from the file. Raises ------ ValueError If filename's extension isn't .tf. """ file_root, file_ext = os.path.splitext(filename) if file_ext.lower() != TF_EXTENSION: raise ValueError('Extension %s not supported for RigidTransform. Can only load extension %s' %(file_ext, TF_EXTENSION)) f = open(filename, 'r') lines = list(f) from_frame = lines[0][:-1] to_frame = lines[1][:-1] t = np.zeros(3) t_tokens = lines[2][:-1].split() t[0] = float(t_tokens[0]) t[1] = float(t_tokens[1]) t[2] = float(t_tokens[2]) R = np.zeros([3,3]) r_tokens = lines[3][:-1].split() R[0, 0] = float(r_tokens[0]) R[0, 1] = float(r_tokens[1]) R[0, 2] = float(r_tokens[2]) r_tokens = lines[4][:-1].split() R[1, 0] = float(r_tokens[0]) R[1, 1] = float(r_tokens[1]) R[1, 2] = float(r_tokens[2]) r_tokens = lines[5][:-1].split() R[2, 0] = float(r_tokens[0]) R[2, 1] = float(r_tokens[1]) R[2, 2] = float(r_tokens[2]) f.close() return RigidTransform(rotation=R, translation=t, from_frame=from_frame, to_frame=to_frame)
[ "def", "load", "(", "filename", ")", ":", "file_root", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "file_ext", ".", "lower", "(", ")", "!=", "TF_EXTENSION", ":", "raise", "ValueError", "(", "'Extension %s not supported for RigidTransform. Can only load extension %s'", "%", "(", "file_ext", ",", "TF_EXTENSION", ")", ")", "f", "=", "open", "(", "filename", ",", "'r'", ")", "lines", "=", "list", "(", "f", ")", "from_frame", "=", "lines", "[", "0", "]", "[", ":", "-", "1", "]", "to_frame", "=", "lines", "[", "1", "]", "[", ":", "-", "1", "]", "t", "=", "np", ".", "zeros", "(", "3", ")", "t_tokens", "=", "lines", "[", "2", "]", "[", ":", "-", "1", "]", ".", "split", "(", ")", "t", "[", "0", "]", "=", "float", "(", "t_tokens", "[", "0", "]", ")", "t", "[", "1", "]", "=", "float", "(", "t_tokens", "[", "1", "]", ")", "t", "[", "2", "]", "=", "float", "(", "t_tokens", "[", "2", "]", ")", "R", "=", "np", ".", "zeros", "(", "[", "3", ",", "3", "]", ")", "r_tokens", "=", "lines", "[", "3", "]", "[", ":", "-", "1", "]", ".", "split", "(", ")", "R", "[", "0", ",", "0", "]", "=", "float", "(", "r_tokens", "[", "0", "]", ")", "R", "[", "0", ",", "1", "]", "=", "float", "(", "r_tokens", "[", "1", "]", ")", "R", "[", "0", ",", "2", "]", "=", "float", "(", "r_tokens", "[", "2", "]", ")", "r_tokens", "=", "lines", "[", "4", "]", "[", ":", "-", "1", "]", ".", "split", "(", ")", "R", "[", "1", ",", "0", "]", "=", "float", "(", "r_tokens", "[", "0", "]", ")", "R", "[", "1", ",", "1", "]", "=", "float", "(", "r_tokens", "[", "1", "]", ")", "R", "[", "1", ",", "2", "]", "=", "float", "(", "r_tokens", "[", "2", "]", ")", "r_tokens", "=", "lines", "[", "5", "]", "[", ":", "-", "1", "]", ".", "split", "(", ")", "R", "[", "2", ",", "0", "]", "=", "float", "(", "r_tokens", "[", "0", "]", ")", "R", "[", "2", ",", "1", "]", "=", "float", "(", "r_tokens", "[", "1", "]", ")", "R", "[", "2", ",", "2", "]", "=", "float", "(", "r_tokens", "[", "2", "]", ")", "f", ".", "close", "(", ")", "return", "RigidTransform", "(", "rotation", "=", "R", ",", "translation", "=", "t", ",", "from_frame", "=", "from_frame", ",", "to_frame", "=", "to_frame", ")" ]
Load a RigidTransform from a file. The file format is: from_frame to_frame translation (space separated) rotation_row_0 (space separated) rotation_row_1 (space separated) rotation_row_2 (space separated) Parameters ---------- filename : :obj:`str` The file to load the transform from. Returns ------- :obj:`RigidTransform` The RigidTransform read from the file. Raises ------ ValueError If filename's extension isn't .tf.
[ "Load", "a", "RigidTransform", "from", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L1007-L1066
9,619
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
SimilarityTransform.dot
def dot(self, other_tf): """Compose this simliarity transform with another. This transform is on the left-hand side of the composition. Parameters ---------- other_tf : :obj:`SimilarityTransform` The other SimilarityTransform to compose with this one. Returns ------- :obj:`SimilarityTransform` A SimilarityTransform that represents the composition. Raises ------ ValueError If the to_frame of other_tf is not identical to this transform's from_frame. """ if other_tf.to_frame != self.from_frame: raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame)) if not isinstance(other_tf, RigidTransform): raise ValueError('Can only compose with other RigidTransform classes') other_scale = 1.0 if isinstance(other_tf, SimilarityTransform): other_scale = other_tf.scale rotation = self.rotation.dot(other_tf.rotation) translation = self.translation + self.scale * self.rotation.dot(other_tf.translation) scale = self.scale * other_scale return SimilarityTransform(rotation, translation, scale, from_frame=other_tf.from_frame, to_frame=self.to_frame)
python
def dot(self, other_tf): """Compose this simliarity transform with another. This transform is on the left-hand side of the composition. Parameters ---------- other_tf : :obj:`SimilarityTransform` The other SimilarityTransform to compose with this one. Returns ------- :obj:`SimilarityTransform` A SimilarityTransform that represents the composition. Raises ------ ValueError If the to_frame of other_tf is not identical to this transform's from_frame. """ if other_tf.to_frame != self.from_frame: raise ValueError('To frame of right hand side ({0}) must match from frame of left hand side ({1})'.format(other_tf.to_frame, self.from_frame)) if not isinstance(other_tf, RigidTransform): raise ValueError('Can only compose with other RigidTransform classes') other_scale = 1.0 if isinstance(other_tf, SimilarityTransform): other_scale = other_tf.scale rotation = self.rotation.dot(other_tf.rotation) translation = self.translation + self.scale * self.rotation.dot(other_tf.translation) scale = self.scale * other_scale return SimilarityTransform(rotation, translation, scale, from_frame=other_tf.from_frame, to_frame=self.to_frame)
[ "def", "dot", "(", "self", ",", "other_tf", ")", ":", "if", "other_tf", ".", "to_frame", "!=", "self", ".", "from_frame", ":", "raise", "ValueError", "(", "'To frame of right hand side ({0}) must match from frame of left hand side ({1})'", ".", "format", "(", "other_tf", ".", "to_frame", ",", "self", ".", "from_frame", ")", ")", "if", "not", "isinstance", "(", "other_tf", ",", "RigidTransform", ")", ":", "raise", "ValueError", "(", "'Can only compose with other RigidTransform classes'", ")", "other_scale", "=", "1.0", "if", "isinstance", "(", "other_tf", ",", "SimilarityTransform", ")", ":", "other_scale", "=", "other_tf", ".", "scale", "rotation", "=", "self", ".", "rotation", ".", "dot", "(", "other_tf", ".", "rotation", ")", "translation", "=", "self", ".", "translation", "+", "self", ".", "scale", "*", "self", ".", "rotation", ".", "dot", "(", "other_tf", ".", "translation", ")", "scale", "=", "self", ".", "scale", "*", "other_scale", "return", "SimilarityTransform", "(", "rotation", ",", "translation", ",", "scale", ",", "from_frame", "=", "other_tf", ".", "from_frame", ",", "to_frame", "=", "self", ".", "to_frame", ")" ]
Compose this simliarity transform with another. This transform is on the left-hand side of the composition. Parameters ---------- other_tf : :obj:`SimilarityTransform` The other SimilarityTransform to compose with this one. Returns ------- :obj:`SimilarityTransform` A SimilarityTransform that represents the composition. Raises ------ ValueError If the to_frame of other_tf is not identical to this transform's from_frame.
[ "Compose", "this", "simliarity", "transform", "with", "another", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L1187-L1222
9,620
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
SimilarityTransform.inverse
def inverse(self): """Take the inverse of the similarity transform. Returns ------- :obj:`SimilarityTransform` The inverse of this SimilarityTransform. """ inv_rot = np.linalg.inv(self.rotation) inv_scale = 1.0 / self.scale inv_trans = -inv_scale * inv_rot.dot(self.translation) return SimilarityTransform(inv_rot, inv_trans, inv_scale, from_frame=self._to_frame, to_frame=self._from_frame)
python
def inverse(self): """Take the inverse of the similarity transform. Returns ------- :obj:`SimilarityTransform` The inverse of this SimilarityTransform. """ inv_rot = np.linalg.inv(self.rotation) inv_scale = 1.0 / self.scale inv_trans = -inv_scale * inv_rot.dot(self.translation) return SimilarityTransform(inv_rot, inv_trans, inv_scale, from_frame=self._to_frame, to_frame=self._from_frame)
[ "def", "inverse", "(", "self", ")", ":", "inv_rot", "=", "np", ".", "linalg", ".", "inv", "(", "self", ".", "rotation", ")", "inv_scale", "=", "1.0", "/", "self", ".", "scale", "inv_trans", "=", "-", "inv_scale", "*", "inv_rot", ".", "dot", "(", "self", ".", "translation", ")", "return", "SimilarityTransform", "(", "inv_rot", ",", "inv_trans", ",", "inv_scale", ",", "from_frame", "=", "self", ".", "_to_frame", ",", "to_frame", "=", "self", ".", "_from_frame", ")" ]
Take the inverse of the similarity transform. Returns ------- :obj:`SimilarityTransform` The inverse of this SimilarityTransform.
[ "Take", "the", "inverse", "of", "the", "similarity", "transform", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L1224-L1237
9,621
BerkeleyAutomation/autolab_core
autolab_core/points.py
BagOfPoints.save
def save(self, filename): """Saves the collection to a file. Parameters ---------- filename : :obj:`str` The file to save the collection to. Raises ------ ValueError If the file extension is not .npy or .npz. """ file_root, file_ext = os.path.splitext(filename) if file_ext == '.npy': np.save(filename, self._data) elif file_ext == '.npz': np.savez_compressed(filename, self._data) else: raise ValueError('Extension %s not supported for point saves.' %(file_ext))
python
def save(self, filename): """Saves the collection to a file. Parameters ---------- filename : :obj:`str` The file to save the collection to. Raises ------ ValueError If the file extension is not .npy or .npz. """ file_root, file_ext = os.path.splitext(filename) if file_ext == '.npy': np.save(filename, self._data) elif file_ext == '.npz': np.savez_compressed(filename, self._data) else: raise ValueError('Extension %s not supported for point saves.' %(file_ext))
[ "def", "save", "(", "self", ",", "filename", ")", ":", "file_root", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "file_ext", "==", "'.npy'", ":", "np", ".", "save", "(", "filename", ",", "self", ".", "_data", ")", "elif", "file_ext", "==", "'.npz'", ":", "np", ".", "savez_compressed", "(", "filename", ",", "self", ".", "_data", ")", "else", ":", "raise", "ValueError", "(", "'Extension %s not supported for point saves.'", "%", "(", "file_ext", ")", ")" ]
Saves the collection to a file. Parameters ---------- filename : :obj:`str` The file to save the collection to. Raises ------ ValueError If the file extension is not .npy or .npz.
[ "Saves", "the", "collection", "to", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L112-L131
9,622
BerkeleyAutomation/autolab_core
autolab_core/points.py
BagOfPoints.load_data
def load_data(filename): """Loads data from a file. Parameters ---------- filename : :obj:`str` The file to load the collection from. Returns ------- :obj:`numpy.ndarray` of float The data read from the file. Raises ------ ValueError If the file extension is not .npy or .npz. """ file_root, file_ext = os.path.splitext(filename) data = None if file_ext == '.npy': data = np.load(filename) elif file_ext == '.npz': data = np.load(filename)['arr_0'] else: raise ValueError('Extension %s not supported for point reads' %(file_ext)) return data
python
def load_data(filename): """Loads data from a file. Parameters ---------- filename : :obj:`str` The file to load the collection from. Returns ------- :obj:`numpy.ndarray` of float The data read from the file. Raises ------ ValueError If the file extension is not .npy or .npz. """ file_root, file_ext = os.path.splitext(filename) data = None if file_ext == '.npy': data = np.load(filename) elif file_ext == '.npz': data = np.load(filename)['arr_0'] else: raise ValueError('Extension %s not supported for point reads' %(file_ext)) return data
[ "def", "load_data", "(", "filename", ")", ":", "file_root", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "data", "=", "None", "if", "file_ext", "==", "'.npy'", ":", "data", "=", "np", ".", "load", "(", "filename", ")", "elif", "file_ext", "==", "'.npz'", ":", "data", "=", "np", ".", "load", "(", "filename", ")", "[", "'arr_0'", "]", "else", ":", "raise", "ValueError", "(", "'Extension %s not supported for point reads'", "%", "(", "file_ext", ")", ")", "return", "data" ]
Loads data from a file. Parameters ---------- filename : :obj:`str` The file to load the collection from. Returns ------- :obj:`numpy.ndarray` of float The data read from the file. Raises ------ ValueError If the file extension is not .npy or .npz.
[ "Loads", "data", "from", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L133-L159
9,623
BerkeleyAutomation/autolab_core
autolab_core/points.py
Point.open
def open(filename, frame='unspecified'): """Create a Point from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created point. Returns ------- :obj:`Point` A point created from the data in the file. """ data = BagOfPoints.load_data(filename) return Point(data, frame)
python
def open(filename, frame='unspecified'): """Create a Point from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created point. Returns ------- :obj:`Point` A point created from the data in the file. """ data = BagOfPoints.load_data(filename) return Point(data, frame)
[ "def", "open", "(", "filename", ",", "frame", "=", "'unspecified'", ")", ":", "data", "=", "BagOfPoints", ".", "load_data", "(", "filename", ")", "return", "Point", "(", "data", ",", "frame", ")" ]
Create a Point from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created point. Returns ------- :obj:`Point` A point created from the data in the file.
[ "Create", "a", "Point", "from", "data", "saved", "in", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L371-L388
9,624
BerkeleyAutomation/autolab_core
autolab_core/points.py
Direction._check_valid_data
def _check_valid_data(self, data): """Checks that the incoming data is a Nx1 ndarray. Parameters ---------- data : :obj:`numpy.ndarray` The data to verify. Raises ------ ValueError If the data is not of the correct shape or if the vector is not normed. """ if len(data.shape) == 2 and data.shape[1] != 1: raise ValueError('Can only initialize Direction from a single Nx1 array') if np.abs(np.linalg.norm(data) - 1.0) > 1e-4: raise ValueError('Direction data must have norm=1.0')
python
def _check_valid_data(self, data): """Checks that the incoming data is a Nx1 ndarray. Parameters ---------- data : :obj:`numpy.ndarray` The data to verify. Raises ------ ValueError If the data is not of the correct shape or if the vector is not normed. """ if len(data.shape) == 2 and data.shape[1] != 1: raise ValueError('Can only initialize Direction from a single Nx1 array') if np.abs(np.linalg.norm(data) - 1.0) > 1e-4: raise ValueError('Direction data must have norm=1.0')
[ "def", "_check_valid_data", "(", "self", ",", "data", ")", ":", "if", "len", "(", "data", ".", "shape", ")", "==", "2", "and", "data", ".", "shape", "[", "1", "]", "!=", "1", ":", "raise", "ValueError", "(", "'Can only initialize Direction from a single Nx1 array'", ")", "if", "np", ".", "abs", "(", "np", ".", "linalg", ".", "norm", "(", "data", ")", "-", "1.0", ")", ">", "1e-4", ":", "raise", "ValueError", "(", "'Direction data must have norm=1.0'", ")" ]
Checks that the incoming data is a Nx1 ndarray. Parameters ---------- data : :obj:`numpy.ndarray` The data to verify. Raises ------ ValueError If the data is not of the correct shape or if the vector is not normed.
[ "Checks", "that", "the", "incoming", "data", "is", "a", "Nx1", "ndarray", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L405-L422
9,625
BerkeleyAutomation/autolab_core
autolab_core/points.py
Direction.orthogonal_basis
def orthogonal_basis(self): """Return an orthogonal basis to this direction. Note ---- Only implemented in 3D. Returns ------- :obj:`tuple` of :obj:`Direction` The pair of normalized Direction vectors that form a basis of this direction's orthogonal complement. Raises ------ NotImplementedError If the vector is not 3D """ if self.dim == 3: x_arr = np.array([-self.data[1], self.data[0], 0]) if np.linalg.norm(x_arr) == 0: x_arr = np.array([self.data[2], 0, 0]) x_arr = x_arr / np.linalg.norm(x_arr) y_arr = np.cross(self.data, x_arr) return Direction(x_arr, frame=self.frame), Direction(y_arr, frame=self.frame) raise NotImplementedError('Orthogonal basis only supported for 3 dimensions')
python
def orthogonal_basis(self): """Return an orthogonal basis to this direction. Note ---- Only implemented in 3D. Returns ------- :obj:`tuple` of :obj:`Direction` The pair of normalized Direction vectors that form a basis of this direction's orthogonal complement. Raises ------ NotImplementedError If the vector is not 3D """ if self.dim == 3: x_arr = np.array([-self.data[1], self.data[0], 0]) if np.linalg.norm(x_arr) == 0: x_arr = np.array([self.data[2], 0, 0]) x_arr = x_arr / np.linalg.norm(x_arr) y_arr = np.cross(self.data, x_arr) return Direction(x_arr, frame=self.frame), Direction(y_arr, frame=self.frame) raise NotImplementedError('Orthogonal basis only supported for 3 dimensions')
[ "def", "orthogonal_basis", "(", "self", ")", ":", "if", "self", ".", "dim", "==", "3", ":", "x_arr", "=", "np", ".", "array", "(", "[", "-", "self", ".", "data", "[", "1", "]", ",", "self", ".", "data", "[", "0", "]", ",", "0", "]", ")", "if", "np", ".", "linalg", ".", "norm", "(", "x_arr", ")", "==", "0", ":", "x_arr", "=", "np", ".", "array", "(", "[", "self", ".", "data", "[", "2", "]", ",", "0", ",", "0", "]", ")", "x_arr", "=", "x_arr", "/", "np", ".", "linalg", ".", "norm", "(", "x_arr", ")", "y_arr", "=", "np", ".", "cross", "(", "self", ".", "data", ",", "x_arr", ")", "return", "Direction", "(", "x_arr", ",", "frame", "=", "self", ".", "frame", ")", ",", "Direction", "(", "y_arr", ",", "frame", "=", "self", ".", "frame", ")", "raise", "NotImplementedError", "(", "'Orthogonal basis only supported for 3 dimensions'", ")" ]
Return an orthogonal basis to this direction. Note ---- Only implemented in 3D. Returns ------- :obj:`tuple` of :obj:`Direction` The pair of normalized Direction vectors that form a basis of this direction's orthogonal complement. Raises ------ NotImplementedError If the vector is not 3D
[ "Return", "an", "orthogonal", "basis", "to", "this", "direction", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L424-L449
9,626
BerkeleyAutomation/autolab_core
autolab_core/points.py
Direction.open
def open(filename, frame='unspecified'): """Create a Direction from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created Direction. Returns ------- :obj:`Direction` A Direction created from the data in the file. """ data = BagOfPoints.load_data(filename) return Direction(data, frame)
python
def open(filename, frame='unspecified'): """Create a Direction from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created Direction. Returns ------- :obj:`Direction` A Direction created from the data in the file. """ data = BagOfPoints.load_data(filename) return Direction(data, frame)
[ "def", "open", "(", "filename", ",", "frame", "=", "'unspecified'", ")", ":", "data", "=", "BagOfPoints", ".", "load_data", "(", "filename", ")", "return", "Direction", "(", "data", ",", "frame", ")" ]
Create a Direction from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created Direction. Returns ------- :obj:`Direction` A Direction created from the data in the file.
[ "Create", "a", "Direction", "from", "data", "saved", "in", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L452-L469
9,627
BerkeleyAutomation/autolab_core
autolab_core/points.py
Plane3D.split_points
def split_points(self, point_cloud): """Split a point cloud into two along this plane. Parameters ---------- point_cloud : :obj:`PointCloud` The PointCloud to divide in two. Returns ------- :obj:`tuple` of :obj:`PointCloud` Two new PointCloud objects. The first contains points above the plane, and the second contains points below the plane. Raises ------ ValueError If the input is not a PointCloud. """ if not isinstance(point_cloud, PointCloud): raise ValueError('Can only split point clouds') # compute indices above and below above_plane = point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot(self._n) > 0 above_plane = point_cloud.z_coords > 0 & above_plane below_plane = point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot(self._n) <= 0 below_plane = point_cloud.z_coords > 0 & below_plane # split data above_data = point_cloud.data[:, above_plane] below_data = point_cloud.data[:, below_plane] return PointCloud(above_data, point_cloud.frame), PointCloud(below_data, point_cloud.frame)
python
def split_points(self, point_cloud): """Split a point cloud into two along this plane. Parameters ---------- point_cloud : :obj:`PointCloud` The PointCloud to divide in two. Returns ------- :obj:`tuple` of :obj:`PointCloud` Two new PointCloud objects. The first contains points above the plane, and the second contains points below the plane. Raises ------ ValueError If the input is not a PointCloud. """ if not isinstance(point_cloud, PointCloud): raise ValueError('Can only split point clouds') # compute indices above and below above_plane = point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot(self._n) > 0 above_plane = point_cloud.z_coords > 0 & above_plane below_plane = point_cloud._data - np.tile(self._x0.data, [1, point_cloud.num_points]).T.dot(self._n) <= 0 below_plane = point_cloud.z_coords > 0 & below_plane # split data above_data = point_cloud.data[:, above_plane] below_data = point_cloud.data[:, below_plane] return PointCloud(above_data, point_cloud.frame), PointCloud(below_data, point_cloud.frame)
[ "def", "split_points", "(", "self", ",", "point_cloud", ")", ":", "if", "not", "isinstance", "(", "point_cloud", ",", "PointCloud", ")", ":", "raise", "ValueError", "(", "'Can only split point clouds'", ")", "# compute indices above and below", "above_plane", "=", "point_cloud", ".", "_data", "-", "np", ".", "tile", "(", "self", ".", "_x0", ".", "data", ",", "[", "1", ",", "point_cloud", ".", "num_points", "]", ")", ".", "T", ".", "dot", "(", "self", ".", "_n", ")", ">", "0", "above_plane", "=", "point_cloud", ".", "z_coords", ">", "0", "&", "above_plane", "below_plane", "=", "point_cloud", ".", "_data", "-", "np", ".", "tile", "(", "self", ".", "_x0", ".", "data", ",", "[", "1", ",", "point_cloud", ".", "num_points", "]", ")", ".", "T", ".", "dot", "(", "self", ".", "_n", ")", "<=", "0", "below_plane", "=", "point_cloud", ".", "z_coords", ">", "0", "&", "below_plane", "# split data", "above_data", "=", "point_cloud", ".", "data", "[", ":", ",", "above_plane", "]", "below_data", "=", "point_cloud", ".", "data", "[", ":", ",", "below_plane", "]", "return", "PointCloud", "(", "above_data", ",", "point_cloud", ".", "frame", ")", ",", "PointCloud", "(", "below_data", ",", "point_cloud", ".", "frame", ")" ]
Split a point cloud into two along this plane. Parameters ---------- point_cloud : :obj:`PointCloud` The PointCloud to divide in two. Returns ------- :obj:`tuple` of :obj:`PointCloud` Two new PointCloud objects. The first contains points above the plane, and the second contains points below the plane. Raises ------ ValueError If the input is not a PointCloud.
[ "Split", "a", "point", "cloud", "into", "two", "along", "this", "plane", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L498-L528
9,628
BerkeleyAutomation/autolab_core
autolab_core/points.py
PointCloud.mean
def mean(self): """Returns the average point in the cloud. Returns ------- :obj:`Point` The mean point in the PointCloud. """ mean_point_data = np.mean(self._data, axis=1) return Point(mean_point_data, self._frame)
python
def mean(self): """Returns the average point in the cloud. Returns ------- :obj:`Point` The mean point in the PointCloud. """ mean_point_data = np.mean(self._data, axis=1) return Point(mean_point_data, self._frame)
[ "def", "mean", "(", "self", ")", ":", "mean_point_data", "=", "np", ".", "mean", "(", "self", ".", "_data", ",", "axis", "=", "1", ")", "return", "Point", "(", "mean_point_data", ",", "self", ".", "_frame", ")" ]
Returns the average point in the cloud. Returns ------- :obj:`Point` The mean point in the PointCloud.
[ "Returns", "the", "average", "point", "in", "the", "cloud", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L587-L596
9,629
BerkeleyAutomation/autolab_core
autolab_core/points.py
PointCloud.subsample
def subsample(self, rate, random=False): """Returns a subsampled version of the PointCloud. Parameters ---------- rate : int Only every rate-th element of the PointCloud is returned. Returns ------- :obj:`PointCloud` A subsampled point cloud with N / rate total samples. Raises ------ ValueError If rate is not a positive integer. """ if type(rate) != int and rate < 1: raise ValueError('Can only subsample with strictly positive integer rate') indices = np.arange(self.num_points) if random: np.random.shuffle(indices) subsample_inds = indices[::rate] subsampled_data = self._data[:,subsample_inds] return PointCloud(subsampled_data, self._frame), subsample_inds
python
def subsample(self, rate, random=False): """Returns a subsampled version of the PointCloud. Parameters ---------- rate : int Only every rate-th element of the PointCloud is returned. Returns ------- :obj:`PointCloud` A subsampled point cloud with N / rate total samples. Raises ------ ValueError If rate is not a positive integer. """ if type(rate) != int and rate < 1: raise ValueError('Can only subsample with strictly positive integer rate') indices = np.arange(self.num_points) if random: np.random.shuffle(indices) subsample_inds = indices[::rate] subsampled_data = self._data[:,subsample_inds] return PointCloud(subsampled_data, self._frame), subsample_inds
[ "def", "subsample", "(", "self", ",", "rate", ",", "random", "=", "False", ")", ":", "if", "type", "(", "rate", ")", "!=", "int", "and", "rate", "<", "1", ":", "raise", "ValueError", "(", "'Can only subsample with strictly positive integer rate'", ")", "indices", "=", "np", ".", "arange", "(", "self", ".", "num_points", ")", "if", "random", ":", "np", ".", "random", ".", "shuffle", "(", "indices", ")", "subsample_inds", "=", "indices", "[", ":", ":", "rate", "]", "subsampled_data", "=", "self", ".", "_data", "[", ":", ",", "subsample_inds", "]", "return", "PointCloud", "(", "subsampled_data", ",", "self", ".", "_frame", ")", ",", "subsample_inds" ]
Returns a subsampled version of the PointCloud. Parameters ---------- rate : int Only every rate-th element of the PointCloud is returned. Returns ------- :obj:`PointCloud` A subsampled point cloud with N / rate total samples. Raises ------ ValueError If rate is not a positive integer.
[ "Returns", "a", "subsampled", "version", "of", "the", "PointCloud", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L598-L623
9,630
BerkeleyAutomation/autolab_core
autolab_core/points.py
PointCloud.box_mask
def box_mask(self, box): """Return a PointCloud containing only points within the given Box. Parameters ---------- box : :obj:`Box` A box whose boundaries are used to filter points. Returns ------- :obj:`PointCloud` A filtered PointCloud whose points are all in the given box. :obj:`numpy.ndarray` Array of indices of the segmented points in the original cloud Raises ------ ValueError If the input is not a box in the same frame as the PointCloud. """ if not isinstance(box, Box): raise ValueError('Must provide Box object') if box.frame != self.frame: raise ValueError('Box must be in same frame as PointCloud') all_points = self.data.T cond1 = np.all(box.min_pt <= all_points, axis=1) cond2 = np.all(all_points <= box.max_pt, axis=1) valid_point_indices = np.where(np.logical_and(cond1, cond2))[0] valid_points = all_points[valid_point_indices] return PointCloud(valid_points.T, self.frame), valid_point_indices
python
def box_mask(self, box): """Return a PointCloud containing only points within the given Box. Parameters ---------- box : :obj:`Box` A box whose boundaries are used to filter points. Returns ------- :obj:`PointCloud` A filtered PointCloud whose points are all in the given box. :obj:`numpy.ndarray` Array of indices of the segmented points in the original cloud Raises ------ ValueError If the input is not a box in the same frame as the PointCloud. """ if not isinstance(box, Box): raise ValueError('Must provide Box object') if box.frame != self.frame: raise ValueError('Box must be in same frame as PointCloud') all_points = self.data.T cond1 = np.all(box.min_pt <= all_points, axis=1) cond2 = np.all(all_points <= box.max_pt, axis=1) valid_point_indices = np.where(np.logical_and(cond1, cond2))[0] valid_points = all_points[valid_point_indices] return PointCloud(valid_points.T, self.frame), valid_point_indices
[ "def", "box_mask", "(", "self", ",", "box", ")", ":", "if", "not", "isinstance", "(", "box", ",", "Box", ")", ":", "raise", "ValueError", "(", "'Must provide Box object'", ")", "if", "box", ".", "frame", "!=", "self", ".", "frame", ":", "raise", "ValueError", "(", "'Box must be in same frame as PointCloud'", ")", "all_points", "=", "self", ".", "data", ".", "T", "cond1", "=", "np", ".", "all", "(", "box", ".", "min_pt", "<=", "all_points", ",", "axis", "=", "1", ")", "cond2", "=", "np", ".", "all", "(", "all_points", "<=", "box", ".", "max_pt", ",", "axis", "=", "1", ")", "valid_point_indices", "=", "np", ".", "where", "(", "np", ".", "logical_and", "(", "cond1", ",", "cond2", ")", ")", "[", "0", "]", "valid_points", "=", "all_points", "[", "valid_point_indices", "]", "return", "PointCloud", "(", "valid_points", ".", "T", ",", "self", ".", "frame", ")", ",", "valid_point_indices" ]
Return a PointCloud containing only points within the given Box. Parameters ---------- box : :obj:`Box` A box whose boundaries are used to filter points. Returns ------- :obj:`PointCloud` A filtered PointCloud whose points are all in the given box. :obj:`numpy.ndarray` Array of indices of the segmented points in the original cloud Raises ------ ValueError If the input is not a box in the same frame as the PointCloud.
[ "Return", "a", "PointCloud", "containing", "only", "points", "within", "the", "given", "Box", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L625-L655
9,631
BerkeleyAutomation/autolab_core
autolab_core/points.py
PointCloud.best_fit_plane
def best_fit_plane(self): """Fits a plane to the point cloud using least squares. Returns ------- :obj:`tuple` of :obj:`numpy.ndarray` of float A normal vector to and point in the fitted plane. """ X = np.c_[self.x_coords, self.y_coords, np.ones(self.num_points)] y = self.z_coords A = X.T.dot(X) b = X.T.dot(y) w = np.linalg.inv(A).dot(b) n = np.array([w[0], w[1], -1]) n = n / np.linalg.norm(n) n = Direction(n, self._frame) x0 = self.mean() return n, x0
python
def best_fit_plane(self): """Fits a plane to the point cloud using least squares. Returns ------- :obj:`tuple` of :obj:`numpy.ndarray` of float A normal vector to and point in the fitted plane. """ X = np.c_[self.x_coords, self.y_coords, np.ones(self.num_points)] y = self.z_coords A = X.T.dot(X) b = X.T.dot(y) w = np.linalg.inv(A).dot(b) n = np.array([w[0], w[1], -1]) n = n / np.linalg.norm(n) n = Direction(n, self._frame) x0 = self.mean() return n, x0
[ "def", "best_fit_plane", "(", "self", ")", ":", "X", "=", "np", ".", "c_", "[", "self", ".", "x_coords", ",", "self", ".", "y_coords", ",", "np", ".", "ones", "(", "self", ".", "num_points", ")", "]", "y", "=", "self", ".", "z_coords", "A", "=", "X", ".", "T", ".", "dot", "(", "X", ")", "b", "=", "X", ".", "T", ".", "dot", "(", "y", ")", "w", "=", "np", ".", "linalg", ".", "inv", "(", "A", ")", ".", "dot", "(", "b", ")", "n", "=", "np", ".", "array", "(", "[", "w", "[", "0", "]", ",", "w", "[", "1", "]", ",", "-", "1", "]", ")", "n", "=", "n", "/", "np", ".", "linalg", ".", "norm", "(", "n", ")", "n", "=", "Direction", "(", "n", ",", "self", ".", "_frame", ")", "x0", "=", "self", ".", "mean", "(", ")", "return", "n", ",", "x0" ]
Fits a plane to the point cloud using least squares. Returns ------- :obj:`tuple` of :obj:`numpy.ndarray` of float A normal vector to and point in the fitted plane.
[ "Fits", "a", "plane", "to", "the", "point", "cloud", "using", "least", "squares", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L657-L674
9,632
BerkeleyAutomation/autolab_core
autolab_core/points.py
PointCloud.remove_zero_points
def remove_zero_points(self): """Removes points with a zero in the z-axis. Note ---- This returns nothing and updates the PointCloud in-place. """ points_of_interest = np.where(self.z_coords != 0.0)[0] self._data = self.data[:, points_of_interest]
python
def remove_zero_points(self): """Removes points with a zero in the z-axis. Note ---- This returns nothing and updates the PointCloud in-place. """ points_of_interest = np.where(self.z_coords != 0.0)[0] self._data = self.data[:, points_of_interest]
[ "def", "remove_zero_points", "(", "self", ")", ":", "points_of_interest", "=", "np", ".", "where", "(", "self", ".", "z_coords", "!=", "0.0", ")", "[", "0", "]", "self", ".", "_data", "=", "self", ".", "data", "[", ":", ",", "points_of_interest", "]" ]
Removes points with a zero in the z-axis. Note ---- This returns nothing and updates the PointCloud in-place.
[ "Removes", "points", "with", "a", "zero", "in", "the", "z", "-", "axis", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L687-L695
9,633
BerkeleyAutomation/autolab_core
autolab_core/points.py
PointCloud.remove_infinite_points
def remove_infinite_points(self): """Removes infinite points. Note ---- This returns nothing and updates the PointCloud in-place. """ points_of_interest = np.where(np.all(np.isfinite(self.data), axis=0))[0] self._data = self.data[:, points_of_interest]
python
def remove_infinite_points(self): """Removes infinite points. Note ---- This returns nothing and updates the PointCloud in-place. """ points_of_interest = np.where(np.all(np.isfinite(self.data), axis=0))[0] self._data = self.data[:, points_of_interest]
[ "def", "remove_infinite_points", "(", "self", ")", ":", "points_of_interest", "=", "np", ".", "where", "(", "np", ".", "all", "(", "np", ".", "isfinite", "(", "self", ".", "data", ")", ",", "axis", "=", "0", ")", ")", "[", "0", "]", "self", ".", "_data", "=", "self", ".", "data", "[", ":", ",", "points_of_interest", "]" ]
Removes infinite points. Note ---- This returns nothing and updates the PointCloud in-place.
[ "Removes", "infinite", "points", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L697-L705
9,634
BerkeleyAutomation/autolab_core
autolab_core/points.py
PointCloud.open
def open(filename, frame='unspecified'): """Create a PointCloud from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created PointCloud. Returns ------- :obj:`PointCloud` A PointCloud created from the data in the file. """ data = BagOfPoints.load_data(filename) return PointCloud(data, frame)
python
def open(filename, frame='unspecified'): """Create a PointCloud from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created PointCloud. Returns ------- :obj:`PointCloud` A PointCloud created from the data in the file. """ data = BagOfPoints.load_data(filename) return PointCloud(data, frame)
[ "def", "open", "(", "filename", ",", "frame", "=", "'unspecified'", ")", ":", "data", "=", "BagOfPoints", ".", "load_data", "(", "filename", ")", "return", "PointCloud", "(", "data", ",", "frame", ")" ]
Create a PointCloud from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created PointCloud. Returns ------- :obj:`PointCloud` A PointCloud created from the data in the file.
[ "Create", "a", "PointCloud", "from", "data", "saved", "in", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L817-L834
9,635
BerkeleyAutomation/autolab_core
autolab_core/points.py
NormalCloud.subsample
def subsample(self, rate): """Returns a subsampled version of the NormalCloud. Parameters ---------- rate : int Only every rate-th element of the NormalCloud is returned. Returns ------- :obj:`RateCloud` A subsampled point cloud with N / rate total samples. Raises ------ ValueError If rate is not a positive integer. """ if type(rate) != int and rate < 1: raise ValueError('Can only subsample with strictly positive integer rate') subsample_inds = np.arange(self.num_points)[::rate] subsampled_data = self._data[:,subsample_inds] return NormalCloud(subsampled_data, self._frame)
python
def subsample(self, rate): """Returns a subsampled version of the NormalCloud. Parameters ---------- rate : int Only every rate-th element of the NormalCloud is returned. Returns ------- :obj:`RateCloud` A subsampled point cloud with N / rate total samples. Raises ------ ValueError If rate is not a positive integer. """ if type(rate) != int and rate < 1: raise ValueError('Can only subsample with strictly positive integer rate') subsample_inds = np.arange(self.num_points)[::rate] subsampled_data = self._data[:,subsample_inds] return NormalCloud(subsampled_data, self._frame)
[ "def", "subsample", "(", "self", ",", "rate", ")", ":", "if", "type", "(", "rate", ")", "!=", "int", "and", "rate", "<", "1", ":", "raise", "ValueError", "(", "'Can only subsample with strictly positive integer rate'", ")", "subsample_inds", "=", "np", ".", "arange", "(", "self", ".", "num_points", ")", "[", ":", ":", "rate", "]", "subsampled_data", "=", "self", ".", "_data", "[", ":", ",", "subsample_inds", "]", "return", "NormalCloud", "(", "subsampled_data", ",", "self", ".", "_frame", ")" ]
Returns a subsampled version of the NormalCloud. Parameters ---------- rate : int Only every rate-th element of the NormalCloud is returned. Returns ------- :obj:`RateCloud` A subsampled point cloud with N / rate total samples. Raises ------ ValueError If rate is not a positive integer.
[ "Returns", "a", "subsampled", "version", "of", "the", "NormalCloud", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L897-L919
9,636
BerkeleyAutomation/autolab_core
autolab_core/points.py
NormalCloud.remove_zero_normals
def remove_zero_normals(self): """Removes normal vectors with a zero magnitude. Note ---- This returns nothing and updates the NormalCloud in-place. """ points_of_interest = np.where(np.linalg.norm(self._data, axis=0) != 0.0)[0] self._data = self._data[:, points_of_interest]
python
def remove_zero_normals(self): """Removes normal vectors with a zero magnitude. Note ---- This returns nothing and updates the NormalCloud in-place. """ points_of_interest = np.where(np.linalg.norm(self._data, axis=0) != 0.0)[0] self._data = self._data[:, points_of_interest]
[ "def", "remove_zero_normals", "(", "self", ")", ":", "points_of_interest", "=", "np", ".", "where", "(", "np", ".", "linalg", ".", "norm", "(", "self", ".", "_data", ",", "axis", "=", "0", ")", "!=", "0.0", ")", "[", "0", "]", "self", ".", "_data", "=", "self", ".", "_data", "[", ":", ",", "points_of_interest", "]" ]
Removes normal vectors with a zero magnitude. Note ---- This returns nothing and updates the NormalCloud in-place.
[ "Removes", "normal", "vectors", "with", "a", "zero", "magnitude", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L921-L929
9,637
BerkeleyAutomation/autolab_core
autolab_core/points.py
NormalCloud.remove_nan_normals
def remove_nan_normals(self): """Removes normal vectors with nan magnitude. Note ---- This returns nothing and updates the NormalCloud in-place. """ points_of_interest = np.where(np.isfinite(np.linalg.norm(self._data, axis=0)))[0] self._data = self._data[:, points_of_interest]
python
def remove_nan_normals(self): """Removes normal vectors with nan magnitude. Note ---- This returns nothing and updates the NormalCloud in-place. """ points_of_interest = np.where(np.isfinite(np.linalg.norm(self._data, axis=0)))[0] self._data = self._data[:, points_of_interest]
[ "def", "remove_nan_normals", "(", "self", ")", ":", "points_of_interest", "=", "np", ".", "where", "(", "np", ".", "isfinite", "(", "np", ".", "linalg", ".", "norm", "(", "self", ".", "_data", ",", "axis", "=", "0", ")", ")", ")", "[", "0", "]", "self", ".", "_data", "=", "self", ".", "_data", "[", ":", ",", "points_of_interest", "]" ]
Removes normal vectors with nan magnitude. Note ---- This returns nothing and updates the NormalCloud in-place.
[ "Removes", "normal", "vectors", "with", "nan", "magnitude", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L931-L939
9,638
BerkeleyAutomation/autolab_core
autolab_core/points.py
NormalCloud.open
def open(filename, frame='unspecified'): """Create a NormalCloud from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created NormalCloud. Returns ------- :obj:`NormalCloud` A NormalCloud created from the data in the file. """ data = BagOfPoints.load_data(filename) return NormalCloud(data, frame)
python
def open(filename, frame='unspecified'): """Create a NormalCloud from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created NormalCloud. Returns ------- :obj:`NormalCloud` A NormalCloud created from the data in the file. """ data = BagOfPoints.load_data(filename) return NormalCloud(data, frame)
[ "def", "open", "(", "filename", ",", "frame", "=", "'unspecified'", ")", ":", "data", "=", "BagOfPoints", ".", "load_data", "(", "filename", ")", "return", "NormalCloud", "(", "data", ",", "frame", ")" ]
Create a NormalCloud from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created NormalCloud. Returns ------- :obj:`NormalCloud` A NormalCloud created from the data in the file.
[ "Create", "a", "NormalCloud", "from", "data", "saved", "in", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L942-L959
9,639
BerkeleyAutomation/autolab_core
autolab_core/points.py
ImageCoords.open
def open(filename, frame='unspecified'): """Create an ImageCoords from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created ImageCoords. Returns ------- :obj:`ImageCoords` An ImageCoords created from the data in the file. """ data = BagOfPoints.load_data(filename) return ImageCoords(data, frame)
python
def open(filename, frame='unspecified'): """Create an ImageCoords from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created ImageCoords. Returns ------- :obj:`ImageCoords` An ImageCoords created from the data in the file. """ data = BagOfPoints.load_data(filename) return ImageCoords(data, frame)
[ "def", "open", "(", "filename", ",", "frame", "=", "'unspecified'", ")", ":", "data", "=", "BagOfPoints", ".", "load_data", "(", "filename", ")", "return", "ImageCoords", "(", "data", ",", "frame", ")" ]
Create an ImageCoords from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created ImageCoords. Returns ------- :obj:`ImageCoords` An ImageCoords created from the data in the file.
[ "Create", "an", "ImageCoords", "from", "data", "saved", "in", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L1015-L1032
9,640
BerkeleyAutomation/autolab_core
autolab_core/points.py
RgbCloud.open
def open(filename, frame='unspecified'): """Create a RgbCloud from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created RgbCloud. Returns ------- :obj:`RgbCloud` A RgdCloud created from the data in the file. """ data = BagOfPoints.load_data(filename) return RgbCloud(data, frame)
python
def open(filename, frame='unspecified'): """Create a RgbCloud from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created RgbCloud. Returns ------- :obj:`RgbCloud` A RgdCloud created from the data in the file. """ data = BagOfPoints.load_data(filename) return RgbCloud(data, frame)
[ "def", "open", "(", "filename", ",", "frame", "=", "'unspecified'", ")", ":", "data", "=", "BagOfPoints", ".", "load_data", "(", "filename", ")", "return", "RgbCloud", "(", "data", ",", "frame", ")" ]
Create a RgbCloud from data saved in a file. Parameters ---------- filename : :obj:`str` The file to load data from. frame : :obj:`str` The frame to apply to the created RgbCloud. Returns ------- :obj:`RgbCloud` A RgdCloud created from the data in the file.
[ "Create", "a", "RgbCloud", "from", "data", "saved", "in", "a", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L1093-L1110
9,641
BerkeleyAutomation/autolab_core
autolab_core/points.py
PointNormalCloud.remove_zero_points
def remove_zero_points(self): """Remove all elements where the norms and points are zero. Note ---- This returns nothing and updates the NormalCloud in-place. """ points_of_interest = np.where((np.linalg.norm(self.point_cloud.data, axis=0) != 0.0) & (np.linalg.norm(self.normal_cloud.data, axis=0) != 0.0) & (np.isfinite(self.normal_cloud.data[0,:])))[0] self.point_cloud._data = self.point_cloud.data[:, points_of_interest] self.normal_cloud._data = self.normal_cloud.data[:, points_of_interest]
python
def remove_zero_points(self): """Remove all elements where the norms and points are zero. Note ---- This returns nothing and updates the NormalCloud in-place. """ points_of_interest = np.where((np.linalg.norm(self.point_cloud.data, axis=0) != 0.0) & (np.linalg.norm(self.normal_cloud.data, axis=0) != 0.0) & (np.isfinite(self.normal_cloud.data[0,:])))[0] self.point_cloud._data = self.point_cloud.data[:, points_of_interest] self.normal_cloud._data = self.normal_cloud.data[:, points_of_interest]
[ "def", "remove_zero_points", "(", "self", ")", ":", "points_of_interest", "=", "np", ".", "where", "(", "(", "np", ".", "linalg", ".", "norm", "(", "self", ".", "point_cloud", ".", "data", ",", "axis", "=", "0", ")", "!=", "0.0", ")", "&", "(", "np", ".", "linalg", ".", "norm", "(", "self", ".", "normal_cloud", ".", "data", ",", "axis", "=", "0", ")", "!=", "0.0", ")", "&", "(", "np", ".", "isfinite", "(", "self", ".", "normal_cloud", ".", "data", "[", "0", ",", ":", "]", ")", ")", ")", "[", "0", "]", "self", ".", "point_cloud", ".", "_data", "=", "self", ".", "point_cloud", ".", "data", "[", ":", ",", "points_of_interest", "]", "self", ".", "normal_cloud", ".", "_data", "=", "self", ".", "normal_cloud", ".", "data", "[", ":", ",", "points_of_interest", "]" ]
Remove all elements where the norms and points are zero. Note ---- This returns nothing and updates the NormalCloud in-place.
[ "Remove", "all", "elements", "where", "the", "norms", "and", "points", "are", "zero", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L1201-L1212
9,642
BerkeleyAutomation/autolab_core
autolab_core/experiment_logger.py
ExperimentLogger.gen_experiment_ref
def gen_experiment_ref(experiment_tag, n=10): """ Generate a random string for naming. Parameters ---------- experiment_tag : :obj:`str` tag to prefix name with n : int number of random chars to use Returns ------- :obj:`str` string experiment ref """ experiment_id = gen_experiment_id(n=n) return '{0}_{1}'.format(experiment_tag, experiment_id)
python
def gen_experiment_ref(experiment_tag, n=10): """ Generate a random string for naming. Parameters ---------- experiment_tag : :obj:`str` tag to prefix name with n : int number of random chars to use Returns ------- :obj:`str` string experiment ref """ experiment_id = gen_experiment_id(n=n) return '{0}_{1}'.format(experiment_tag, experiment_id)
[ "def", "gen_experiment_ref", "(", "experiment_tag", ",", "n", "=", "10", ")", ":", "experiment_id", "=", "gen_experiment_id", "(", "n", "=", "n", ")", "return", "'{0}_{1}'", ".", "format", "(", "experiment_tag", ",", "experiment_id", ")" ]
Generate a random string for naming. Parameters ---------- experiment_tag : :obj:`str` tag to prefix name with n : int number of random chars to use Returns ------- :obj:`str` string experiment ref
[ "Generate", "a", "random", "string", "for", "naming", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/experiment_logger.py#L82-L98
9,643
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.add
def add(self, datapoint): """ Adds the datapoint to the tensor if room is available. """ if not self.is_full: self.set_datapoint(self.cur_index, datapoint) self.cur_index += 1
python
def add(self, datapoint): """ Adds the datapoint to the tensor if room is available. """ if not self.is_full: self.set_datapoint(self.cur_index, datapoint) self.cur_index += 1
[ "def", "add", "(", "self", ",", "datapoint", ")", ":", "if", "not", "self", ".", "is_full", ":", "self", ".", "set_datapoint", "(", "self", ".", "cur_index", ",", "datapoint", ")", "self", ".", "cur_index", "+=", "1" ]
Adds the datapoint to the tensor if room is available.
[ "Adds", "the", "datapoint", "to", "the", "tensor", "if", "room", "is", "available", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L121-L125
9,644
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.add_batch
def add_batch(self, datapoints): """ Adds a batch of datapoints to the tensor if room is available. """ num_datapoints_to_add = datapoints.shape[0] end_index = self.cur_index + num_datapoints_to_add if end_index <= self.num_datapoints: self.data[self.cur_index:end_index,...] = datapoints self.cur_index = end_index
python
def add_batch(self, datapoints): """ Adds a batch of datapoints to the tensor if room is available. """ num_datapoints_to_add = datapoints.shape[0] end_index = self.cur_index + num_datapoints_to_add if end_index <= self.num_datapoints: self.data[self.cur_index:end_index,...] = datapoints self.cur_index = end_index
[ "def", "add_batch", "(", "self", ",", "datapoints", ")", ":", "num_datapoints_to_add", "=", "datapoints", ".", "shape", "[", "0", "]", "end_index", "=", "self", ".", "cur_index", "+", "num_datapoints_to_add", "if", "end_index", "<=", "self", ".", "num_datapoints", ":", "self", ".", "data", "[", "self", ".", "cur_index", ":", "end_index", ",", "...", "]", "=", "datapoints", "self", ".", "cur_index", "=", "end_index" ]
Adds a batch of datapoints to the tensor if room is available.
[ "Adds", "a", "batch", "of", "datapoints", "to", "the", "tensor", "if", "room", "is", "available", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L127-L133
9,645
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.datapoint
def datapoint(self, ind): """ Returns the datapoint at the given index. """ if self.height is None: return self.data[ind] return self.data[ind, ...].copy()
python
def datapoint(self, ind): """ Returns the datapoint at the given index. """ if self.height is None: return self.data[ind] return self.data[ind, ...].copy()
[ "def", "datapoint", "(", "self", ",", "ind", ")", ":", "if", "self", ".", "height", "is", "None", ":", "return", "self", ".", "data", "[", "ind", "]", "return", "self", ".", "data", "[", "ind", ",", "...", "]", ".", "copy", "(", ")" ]
Returns the datapoint at the given index.
[ "Returns", "the", "datapoint", "at", "the", "given", "index", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L141-L145
9,646
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.set_datapoint
def set_datapoint(self, ind, datapoint): """ Sets the value of the datapoint at the given index. """ if ind >= self.num_datapoints: raise ValueError('Index %d out of bounds! Tensor has %d datapoints' %(ind, self.num_datapoints)) self.data[ind, ...] = np.array(datapoint).astype(self.dtype)
python
def set_datapoint(self, ind, datapoint): """ Sets the value of the datapoint at the given index. """ if ind >= self.num_datapoints: raise ValueError('Index %d out of bounds! Tensor has %d datapoints' %(ind, self.num_datapoints)) self.data[ind, ...] = np.array(datapoint).astype(self.dtype)
[ "def", "set_datapoint", "(", "self", ",", "ind", ",", "datapoint", ")", ":", "if", "ind", ">=", "self", ".", "num_datapoints", ":", "raise", "ValueError", "(", "'Index %d out of bounds! Tensor has %d datapoints'", "%", "(", "ind", ",", "self", ".", "num_datapoints", ")", ")", "self", ".", "data", "[", "ind", ",", "...", "]", "=", "np", ".", "array", "(", "datapoint", ")", ".", "astype", "(", "self", ".", "dtype", ")" ]
Sets the value of the datapoint at the given index.
[ "Sets", "the", "value", "of", "the", "datapoint", "at", "the", "given", "index", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L147-L151
9,647
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.data_slice
def data_slice(self, slice_ind): """ Returns a slice of datapoints """ if self.height is None: return self.data[slice_ind] return self.data[slice_ind, ...]
python
def data_slice(self, slice_ind): """ Returns a slice of datapoints """ if self.height is None: return self.data[slice_ind] return self.data[slice_ind, ...]
[ "def", "data_slice", "(", "self", ",", "slice_ind", ")", ":", "if", "self", ".", "height", "is", "None", ":", "return", "self", ".", "data", "[", "slice_ind", "]", "return", "self", ".", "data", "[", "slice_ind", ",", "...", "]" ]
Returns a slice of datapoints
[ "Returns", "a", "slice", "of", "datapoints" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L153-L157
9,648
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.save
def save(self, filename, compressed=True): """ Save a tensor to disk. """ # check for data if not self.has_data: return False # read ext and save accordingly _, file_ext = os.path.splitext(filename) if compressed: if file_ext != COMPRESSED_TENSOR_EXT: raise ValueError('Can only save compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT)) np.savez_compressed(filename, self.data[:self.cur_index,...]) else: if file_ext != TENSOR_EXT: raise ValueError('Can only save tensor with .npy extension') np.save(filename, self.data[:self.cur_index,...]) return True
python
def save(self, filename, compressed=True): """ Save a tensor to disk. """ # check for data if not self.has_data: return False # read ext and save accordingly _, file_ext = os.path.splitext(filename) if compressed: if file_ext != COMPRESSED_TENSOR_EXT: raise ValueError('Can only save compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT)) np.savez_compressed(filename, self.data[:self.cur_index,...]) else: if file_ext != TENSOR_EXT: raise ValueError('Can only save tensor with .npy extension') np.save(filename, self.data[:self.cur_index,...]) return True
[ "def", "save", "(", "self", ",", "filename", ",", "compressed", "=", "True", ")", ":", "# check for data", "if", "not", "self", ".", "has_data", ":", "return", "False", "# read ext and save accordingly", "_", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "compressed", ":", "if", "file_ext", "!=", "COMPRESSED_TENSOR_EXT", ":", "raise", "ValueError", "(", "'Can only save compressed tensor with %s extension'", "%", "(", "COMPRESSED_TENSOR_EXT", ")", ")", "np", ".", "savez_compressed", "(", "filename", ",", "self", ".", "data", "[", ":", "self", ".", "cur_index", ",", "...", "]", ")", "else", ":", "if", "file_ext", "!=", "TENSOR_EXT", ":", "raise", "ValueError", "(", "'Can only save tensor with .npy extension'", ")", "np", ".", "save", "(", "filename", ",", "self", ".", "data", "[", ":", "self", ".", "cur_index", ",", "...", "]", ")", "return", "True" ]
Save a tensor to disk.
[ "Save", "a", "tensor", "to", "disk", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L159-L176
9,649
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
Tensor.load
def load(filename, compressed=True, prealloc=None): """ Loads a tensor from disk. """ # switch load based on file ext _, file_ext = os.path.splitext(filename) if compressed: if file_ext != COMPRESSED_TENSOR_EXT: raise ValueError('Can only load compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT)) data = np.load(filename)['arr_0'] else: if file_ext != TENSOR_EXT: raise ValueError('Can only load tensor with .npy extension') data = np.load(filename) # fill prealloc tensor if prealloc is not None: prealloc.reset() prealloc.add_batch(data) return prealloc # init new tensor tensor = Tensor(data.shape, data.dtype, data=data) return tensor
python
def load(filename, compressed=True, prealloc=None): """ Loads a tensor from disk. """ # switch load based on file ext _, file_ext = os.path.splitext(filename) if compressed: if file_ext != COMPRESSED_TENSOR_EXT: raise ValueError('Can only load compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT)) data = np.load(filename)['arr_0'] else: if file_ext != TENSOR_EXT: raise ValueError('Can only load tensor with .npy extension') data = np.load(filename) # fill prealloc tensor if prealloc is not None: prealloc.reset() prealloc.add_batch(data) return prealloc # init new tensor tensor = Tensor(data.shape, data.dtype, data=data) return tensor
[ "def", "load", "(", "filename", ",", "compressed", "=", "True", ",", "prealloc", "=", "None", ")", ":", "# switch load based on file ext", "_", ",", "file_ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "compressed", ":", "if", "file_ext", "!=", "COMPRESSED_TENSOR_EXT", ":", "raise", "ValueError", "(", "'Can only load compressed tensor with %s extension'", "%", "(", "COMPRESSED_TENSOR_EXT", ")", ")", "data", "=", "np", ".", "load", "(", "filename", ")", "[", "'arr_0'", "]", "else", ":", "if", "file_ext", "!=", "TENSOR_EXT", ":", "raise", "ValueError", "(", "'Can only load tensor with .npy extension'", ")", "data", "=", "np", ".", "load", "(", "filename", ")", "# fill prealloc tensor", "if", "prealloc", "is", "not", "None", ":", "prealloc", ".", "reset", "(", ")", "prealloc", ".", "add_batch", "(", "data", ")", "return", "prealloc", "# init new tensor", "tensor", "=", "Tensor", "(", "data", ".", "shape", ",", "data", ".", "dtype", ",", "data", "=", "data", ")", "return", "tensor" ]
Loads a tensor from disk.
[ "Loads", "a", "tensor", "from", "disk", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L179-L200
9,650
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.datapoint_indices_for_tensor
def datapoint_indices_for_tensor(self, tensor_index): """ Returns the indices for all datapoints in the given tensor. """ if tensor_index >= self._num_tensors: raise ValueError('Tensor index %d is greater than the number of tensors (%d)' %(tensor_index, self._num_tensors)) return self._file_num_to_indices[tensor_index]
python
def datapoint_indices_for_tensor(self, tensor_index): """ Returns the indices for all datapoints in the given tensor. """ if tensor_index >= self._num_tensors: raise ValueError('Tensor index %d is greater than the number of tensors (%d)' %(tensor_index, self._num_tensors)) return self._file_num_to_indices[tensor_index]
[ "def", "datapoint_indices_for_tensor", "(", "self", ",", "tensor_index", ")", ":", "if", "tensor_index", ">=", "self", ".", "_num_tensors", ":", "raise", "ValueError", "(", "'Tensor index %d is greater than the number of tensors (%d)'", "%", "(", "tensor_index", ",", "self", ".", "_num_tensors", ")", ")", "return", "self", ".", "_file_num_to_indices", "[", "tensor_index", "]" ]
Returns the indices for all datapoints in the given tensor.
[ "Returns", "the", "indices", "for", "all", "datapoints", "in", "the", "given", "tensor", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L415-L419
9,651
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.tensor_index
def tensor_index(self, datapoint_index): """ Returns the index of the tensor containing the referenced datapoint. """ if datapoint_index >= self._num_datapoints: raise ValueError('Datapoint index %d is greater than the number of datapoints (%d)' %(datapoint_index, self._num_datapoints)) return self._index_to_file_num[datapoint_index]
python
def tensor_index(self, datapoint_index): """ Returns the index of the tensor containing the referenced datapoint. """ if datapoint_index >= self._num_datapoints: raise ValueError('Datapoint index %d is greater than the number of datapoints (%d)' %(datapoint_index, self._num_datapoints)) return self._index_to_file_num[datapoint_index]
[ "def", "tensor_index", "(", "self", ",", "datapoint_index", ")", ":", "if", "datapoint_index", ">=", "self", ".", "_num_datapoints", ":", "raise", "ValueError", "(", "'Datapoint index %d is greater than the number of datapoints (%d)'", "%", "(", "datapoint_index", ",", "self", ".", "_num_datapoints", ")", ")", "return", "self", ".", "_index_to_file_num", "[", "datapoint_index", "]" ]
Returns the index of the tensor containing the referenced datapoint.
[ "Returns", "the", "index", "of", "the", "tensor", "containing", "the", "referenced", "datapoint", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L421-L425
9,652
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.generate_tensor_filename
def generate_tensor_filename(self, field_name, file_num, compressed=True): """ Generate a filename for a tensor. """ file_ext = TENSOR_EXT if compressed: file_ext = COMPRESSED_TENSOR_EXT filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext)) return filename
python
def generate_tensor_filename(self, field_name, file_num, compressed=True): """ Generate a filename for a tensor. """ file_ext = TENSOR_EXT if compressed: file_ext = COMPRESSED_TENSOR_EXT filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext)) return filename
[ "def", "generate_tensor_filename", "(", "self", ",", "field_name", ",", "file_num", ",", "compressed", "=", "True", ")", ":", "file_ext", "=", "TENSOR_EXT", "if", "compressed", ":", "file_ext", "=", "COMPRESSED_TENSOR_EXT", "filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "filename", ",", "'tensors'", ",", "'%s_%05d%s'", "%", "(", "field_name", ",", "file_num", ",", "file_ext", ")", ")", "return", "filename" ]
Generate a filename for a tensor.
[ "Generate", "a", "filename", "for", "a", "tensor", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L427-L433
9,653
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset._allocate_tensors
def _allocate_tensors(self): """ Allocates the tensors in the dataset. """ # init tensors dict self._tensors = {} # allocate tensor for each data field for field_name, field_spec in self._config['fields'].items(): # parse attributes field_dtype = np.dtype(field_spec['dtype']) # parse shape field_shape = [self._datapoints_per_file] if 'height' in field_spec.keys(): field_shape.append(field_spec['height']) if 'width' in field_spec.keys(): field_shape.append(field_spec['width']) if 'channels' in field_spec.keys(): field_shape.append(field_spec['channels']) # create tensor self._tensors[field_name] = Tensor(field_shape, field_dtype)
python
def _allocate_tensors(self): """ Allocates the tensors in the dataset. """ # init tensors dict self._tensors = {} # allocate tensor for each data field for field_name, field_spec in self._config['fields'].items(): # parse attributes field_dtype = np.dtype(field_spec['dtype']) # parse shape field_shape = [self._datapoints_per_file] if 'height' in field_spec.keys(): field_shape.append(field_spec['height']) if 'width' in field_spec.keys(): field_shape.append(field_spec['width']) if 'channels' in field_spec.keys(): field_shape.append(field_spec['channels']) # create tensor self._tensors[field_name] = Tensor(field_shape, field_dtype)
[ "def", "_allocate_tensors", "(", "self", ")", ":", "# init tensors dict", "self", ".", "_tensors", "=", "{", "}", "# allocate tensor for each data field", "for", "field_name", ",", "field_spec", "in", "self", ".", "_config", "[", "'fields'", "]", ".", "items", "(", ")", ":", "# parse attributes", "field_dtype", "=", "np", ".", "dtype", "(", "field_spec", "[", "'dtype'", "]", ")", "# parse shape", "field_shape", "=", "[", "self", ".", "_datapoints_per_file", "]", "if", "'height'", "in", "field_spec", ".", "keys", "(", ")", ":", "field_shape", ".", "append", "(", "field_spec", "[", "'height'", "]", ")", "if", "'width'", "in", "field_spec", ".", "keys", "(", ")", ":", "field_shape", ".", "append", "(", "field_spec", "[", "'width'", "]", ")", "if", "'channels'", "in", "field_spec", ".", "keys", "(", ")", ":", "field_shape", ".", "append", "(", "field_spec", "[", "'channels'", "]", ")", "# create tensor", "self", ".", "_tensors", "[", "field_name", "]", "=", "Tensor", "(", "field_shape", ",", "field_dtype", ")" ]
Allocates the tensors in the dataset.
[ "Allocates", "the", "tensors", "in", "the", "dataset", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L459-L479
9,654
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.add
def add(self, datapoint): """ Adds a datapoint to the file. """ # check access level if self._access_mode == READ_ONLY_ACCESS: raise ValueError('Cannot add datapoints with read-only access') # read tensor datapoint ind tensor_ind = self._num_datapoints // self._datapoints_per_file # check datapoint fields for field_name in datapoint.keys(): if field_name not in self.field_names: raise ValueError('Field %s not specified in dataset' %(field_name)) # store data in tensor cur_num_tensors = self._num_tensors new_num_tensors = cur_num_tensors for field_name in self.field_names: if tensor_ind < cur_num_tensors: # load tensor if it was previously allocated self._tensors[field_name] = self.tensor(field_name, tensor_ind) else: # clear tensor if this is a new tensor self._tensors[field_name].reset() self._tensor_cache_file_num[field_name] = tensor_ind new_num_tensors = cur_num_tensors + 1 self._has_unsaved_data = True self._tensors[field_name].add(datapoint[field_name]) cur_size = self._tensors[field_name].size # update num tensors if new_num_tensors > cur_num_tensors: self._num_tensors = new_num_tensors # update file indices self._index_to_file_num[self._num_datapoints] = tensor_ind self._file_num_to_indices[tensor_ind] = tensor_ind * self._datapoints_per_file + np.arange(cur_size) # save if tensors are full field_name = self.field_names[0] if self._tensors[field_name].is_full: # save next tensors to file logging.info('Dataset %s: Writing tensor %d to disk' %(self.filename, tensor_ind)) self.write() # increment num datapoints self._num_datapoints += 1
python
def add(self, datapoint): """ Adds a datapoint to the file. """ # check access level if self._access_mode == READ_ONLY_ACCESS: raise ValueError('Cannot add datapoints with read-only access') # read tensor datapoint ind tensor_ind = self._num_datapoints // self._datapoints_per_file # check datapoint fields for field_name in datapoint.keys(): if field_name not in self.field_names: raise ValueError('Field %s not specified in dataset' %(field_name)) # store data in tensor cur_num_tensors = self._num_tensors new_num_tensors = cur_num_tensors for field_name in self.field_names: if tensor_ind < cur_num_tensors: # load tensor if it was previously allocated self._tensors[field_name] = self.tensor(field_name, tensor_ind) else: # clear tensor if this is a new tensor self._tensors[field_name].reset() self._tensor_cache_file_num[field_name] = tensor_ind new_num_tensors = cur_num_tensors + 1 self._has_unsaved_data = True self._tensors[field_name].add(datapoint[field_name]) cur_size = self._tensors[field_name].size # update num tensors if new_num_tensors > cur_num_tensors: self._num_tensors = new_num_tensors # update file indices self._index_to_file_num[self._num_datapoints] = tensor_ind self._file_num_to_indices[tensor_ind] = tensor_ind * self._datapoints_per_file + np.arange(cur_size) # save if tensors are full field_name = self.field_names[0] if self._tensors[field_name].is_full: # save next tensors to file logging.info('Dataset %s: Writing tensor %d to disk' %(self.filename, tensor_ind)) self.write() # increment num datapoints self._num_datapoints += 1
[ "def", "add", "(", "self", ",", "datapoint", ")", ":", "# check access level", "if", "self", ".", "_access_mode", "==", "READ_ONLY_ACCESS", ":", "raise", "ValueError", "(", "'Cannot add datapoints with read-only access'", ")", "# read tensor datapoint ind", "tensor_ind", "=", "self", ".", "_num_datapoints", "//", "self", ".", "_datapoints_per_file", "# check datapoint fields", "for", "field_name", "in", "datapoint", ".", "keys", "(", ")", ":", "if", "field_name", "not", "in", "self", ".", "field_names", ":", "raise", "ValueError", "(", "'Field %s not specified in dataset'", "%", "(", "field_name", ")", ")", "# store data in tensor", "cur_num_tensors", "=", "self", ".", "_num_tensors", "new_num_tensors", "=", "cur_num_tensors", "for", "field_name", "in", "self", ".", "field_names", ":", "if", "tensor_ind", "<", "cur_num_tensors", ":", "# load tensor if it was previously allocated", "self", ".", "_tensors", "[", "field_name", "]", "=", "self", ".", "tensor", "(", "field_name", ",", "tensor_ind", ")", "else", ":", "# clear tensor if this is a new tensor", "self", ".", "_tensors", "[", "field_name", "]", ".", "reset", "(", ")", "self", ".", "_tensor_cache_file_num", "[", "field_name", "]", "=", "tensor_ind", "new_num_tensors", "=", "cur_num_tensors", "+", "1", "self", ".", "_has_unsaved_data", "=", "True", "self", ".", "_tensors", "[", "field_name", "]", ".", "add", "(", "datapoint", "[", "field_name", "]", ")", "cur_size", "=", "self", ".", "_tensors", "[", "field_name", "]", ".", "size", "# update num tensors", "if", "new_num_tensors", ">", "cur_num_tensors", ":", "self", ".", "_num_tensors", "=", "new_num_tensors", "# update file indices", "self", ".", "_index_to_file_num", "[", "self", ".", "_num_datapoints", "]", "=", "tensor_ind", "self", ".", "_file_num_to_indices", "[", "tensor_ind", "]", "=", "tensor_ind", "*", "self", ".", "_datapoints_per_file", "+", "np", ".", "arange", "(", "cur_size", ")", "# save if tensors are full", "field_name", "=", "self", ".", "field_names", "[", "0", "]", "if", "self", ".", "_tensors", "[", "field_name", "]", ".", "is_full", ":", "# save next tensors to file", "logging", ".", "info", "(", "'Dataset %s: Writing tensor %d to disk'", "%", "(", "self", ".", "filename", ",", "tensor_ind", ")", ")", "self", ".", "write", "(", ")", "# increment num datapoints", "self", ".", "_num_datapoints", "+=", "1" ]
Adds a datapoint to the file.
[ "Adds", "a", "datapoint", "to", "the", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L481-L527
9,655
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.datapoint
def datapoint(self, ind, field_names=None): """ Loads a tensor datapoint for a given global index. Parameters ---------- ind : int global index in the tensor field_names : :obj:`list` of str field names to load Returns ------- :obj:`TensorDatapoint` the desired tensor datapoint """ # flush if necessary if self._has_unsaved_data: self.flush() # check valid input if ind >= self._num_datapoints: raise ValueError('Index %d larger than the number of datapoints in the dataset (%d)' %(ind, self._num_datapoints)) # load the field names if field_names is None: field_names = self.field_names # return the datapoint datapoint = TensorDatapoint(field_names) file_num = self._index_to_file_num[ind] for field_name in field_names: tensor = self.tensor(field_name, file_num) tensor_index = ind % self._datapoints_per_file datapoint[field_name] = tensor.datapoint(tensor_index) return datapoint
python
def datapoint(self, ind, field_names=None): """ Loads a tensor datapoint for a given global index. Parameters ---------- ind : int global index in the tensor field_names : :obj:`list` of str field names to load Returns ------- :obj:`TensorDatapoint` the desired tensor datapoint """ # flush if necessary if self._has_unsaved_data: self.flush() # check valid input if ind >= self._num_datapoints: raise ValueError('Index %d larger than the number of datapoints in the dataset (%d)' %(ind, self._num_datapoints)) # load the field names if field_names is None: field_names = self.field_names # return the datapoint datapoint = TensorDatapoint(field_names) file_num = self._index_to_file_num[ind] for field_name in field_names: tensor = self.tensor(field_name, file_num) tensor_index = ind % self._datapoints_per_file datapoint[field_name] = tensor.datapoint(tensor_index) return datapoint
[ "def", "datapoint", "(", "self", ",", "ind", ",", "field_names", "=", "None", ")", ":", "# flush if necessary", "if", "self", ".", "_has_unsaved_data", ":", "self", ".", "flush", "(", ")", "# check valid input", "if", "ind", ">=", "self", ".", "_num_datapoints", ":", "raise", "ValueError", "(", "'Index %d larger than the number of datapoints in the dataset (%d)'", "%", "(", "ind", ",", "self", ".", "_num_datapoints", ")", ")", "# load the field names", "if", "field_names", "is", "None", ":", "field_names", "=", "self", ".", "field_names", "# return the datapoint", "datapoint", "=", "TensorDatapoint", "(", "field_names", ")", "file_num", "=", "self", ".", "_index_to_file_num", "[", "ind", "]", "for", "field_name", "in", "field_names", ":", "tensor", "=", "self", ".", "tensor", "(", "field_name", ",", "file_num", ")", "tensor_index", "=", "ind", "%", "self", ".", "_datapoints_per_file", "datapoint", "[", "field_name", "]", "=", "tensor", ".", "datapoint", "(", "tensor_index", ")", "return", "datapoint" ]
Loads a tensor datapoint for a given global index. Parameters ---------- ind : int global index in the tensor field_names : :obj:`list` of str field names to load Returns ------- :obj:`TensorDatapoint` the desired tensor datapoint
[ "Loads", "a", "tensor", "datapoint", "for", "a", "given", "global", "index", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L533-L567
9,656
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.tensor
def tensor(self, field_name, tensor_ind): """ Returns the tensor for a given field and tensor index. Parameters ---------- field_name : str the name of the field to load tensor_index : int the index of the tensor Returns ------- :obj:`Tensor` the desired tensor """ if tensor_ind == self._tensor_cache_file_num[field_name]: return self._tensors[field_name] filename = self.generate_tensor_filename(field_name, tensor_ind, compressed=True) Tensor.load(filename, compressed=True, prealloc=self._tensors[field_name]) self._tensor_cache_file_num[field_name] = tensor_ind return self._tensors[field_name]
python
def tensor(self, field_name, tensor_ind): """ Returns the tensor for a given field and tensor index. Parameters ---------- field_name : str the name of the field to load tensor_index : int the index of the tensor Returns ------- :obj:`Tensor` the desired tensor """ if tensor_ind == self._tensor_cache_file_num[field_name]: return self._tensors[field_name] filename = self.generate_tensor_filename(field_name, tensor_ind, compressed=True) Tensor.load(filename, compressed=True, prealloc=self._tensors[field_name]) self._tensor_cache_file_num[field_name] = tensor_ind return self._tensors[field_name]
[ "def", "tensor", "(", "self", ",", "field_name", ",", "tensor_ind", ")", ":", "if", "tensor_ind", "==", "self", ".", "_tensor_cache_file_num", "[", "field_name", "]", ":", "return", "self", ".", "_tensors", "[", "field_name", "]", "filename", "=", "self", ".", "generate_tensor_filename", "(", "field_name", ",", "tensor_ind", ",", "compressed", "=", "True", ")", "Tensor", ".", "load", "(", "filename", ",", "compressed", "=", "True", ",", "prealloc", "=", "self", ".", "_tensors", "[", "field_name", "]", ")", "self", ".", "_tensor_cache_file_num", "[", "field_name", "]", "=", "tensor_ind", "return", "self", ".", "_tensors", "[", "field_name", "]" ]
Returns the tensor for a given field and tensor index. Parameters ---------- field_name : str the name of the field to load tensor_index : int the index of the tensor Returns ------- :obj:`Tensor` the desired tensor
[ "Returns", "the", "tensor", "for", "a", "given", "field", "and", "tensor", "index", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L569-L590
9,657
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.delete_last
def delete_last(self, num_to_delete=1): """ Deletes the last N datapoints from the dataset. Parameters ---------- num_to_delete : int the number of datapoints to remove from the end of the dataset """ # check access level if self._access_mode == READ_ONLY_ACCESS: raise ValueError('Cannot delete datapoints with read-only access') # check num to delete if num_to_delete > self._num_datapoints: raise ValueError('Cannot remove more than the number of datapoints in the dataset') # compute indices last_datapoint_ind = self._num_datapoints - 1 last_tensor_ind = last_datapoint_ind // self._datapoints_per_file new_last_datapoint_ind = self._num_datapoints - 1 - num_to_delete new_num_datapoints = new_last_datapoint_ind + 1 new_last_datapoint_ind = max(new_last_datapoint_ind, 0) new_last_tensor_ind = new_last_datapoint_ind // self._datapoints_per_file # delete all but the last tensor delete_tensor_ind = range(new_last_tensor_ind+1, last_tensor_ind+1) for tensor_ind in delete_tensor_ind: for field_name in self.field_names: filename = self.generate_tensor_filename(field_name, tensor_ind) os.remove(filename) # update last tensor dataset_empty = False target_tensor_size = new_num_datapoints % self._datapoints_per_file if target_tensor_size == 0: if new_num_datapoints > 0: target_tensor_size = self._datapoints_per_file else: dataset_empty = True for field_name in self.field_names: new_last_tensor = self.tensor(field_name, new_last_tensor_ind) while new_last_tensor.size > target_tensor_size: new_last_tensor.delete_last() filename = self.generate_tensor_filename(field_name, new_last_tensor_ind) new_last_tensor.save(filename, compressed=True) if not new_last_tensor.has_data: os.remove(filename) new_last_tensor.reset() # update num datapoints if self._num_datapoints - 1 - num_to_delete >= 0: self._num_datapoints = new_num_datapoints else: self._num_datapoints = 0 # handle deleted tensor self._num_tensors = new_last_tensor_ind + 1 if dataset_empty: self._num_tensors = 0
python
def delete_last(self, num_to_delete=1): """ Deletes the last N datapoints from the dataset. Parameters ---------- num_to_delete : int the number of datapoints to remove from the end of the dataset """ # check access level if self._access_mode == READ_ONLY_ACCESS: raise ValueError('Cannot delete datapoints with read-only access') # check num to delete if num_to_delete > self._num_datapoints: raise ValueError('Cannot remove more than the number of datapoints in the dataset') # compute indices last_datapoint_ind = self._num_datapoints - 1 last_tensor_ind = last_datapoint_ind // self._datapoints_per_file new_last_datapoint_ind = self._num_datapoints - 1 - num_to_delete new_num_datapoints = new_last_datapoint_ind + 1 new_last_datapoint_ind = max(new_last_datapoint_ind, 0) new_last_tensor_ind = new_last_datapoint_ind // self._datapoints_per_file # delete all but the last tensor delete_tensor_ind = range(new_last_tensor_ind+1, last_tensor_ind+1) for tensor_ind in delete_tensor_ind: for field_name in self.field_names: filename = self.generate_tensor_filename(field_name, tensor_ind) os.remove(filename) # update last tensor dataset_empty = False target_tensor_size = new_num_datapoints % self._datapoints_per_file if target_tensor_size == 0: if new_num_datapoints > 0: target_tensor_size = self._datapoints_per_file else: dataset_empty = True for field_name in self.field_names: new_last_tensor = self.tensor(field_name, new_last_tensor_ind) while new_last_tensor.size > target_tensor_size: new_last_tensor.delete_last() filename = self.generate_tensor_filename(field_name, new_last_tensor_ind) new_last_tensor.save(filename, compressed=True) if not new_last_tensor.has_data: os.remove(filename) new_last_tensor.reset() # update num datapoints if self._num_datapoints - 1 - num_to_delete >= 0: self._num_datapoints = new_num_datapoints else: self._num_datapoints = 0 # handle deleted tensor self._num_tensors = new_last_tensor_ind + 1 if dataset_empty: self._num_tensors = 0
[ "def", "delete_last", "(", "self", ",", "num_to_delete", "=", "1", ")", ":", "# check access level", "if", "self", ".", "_access_mode", "==", "READ_ONLY_ACCESS", ":", "raise", "ValueError", "(", "'Cannot delete datapoints with read-only access'", ")", "# check num to delete", "if", "num_to_delete", ">", "self", ".", "_num_datapoints", ":", "raise", "ValueError", "(", "'Cannot remove more than the number of datapoints in the dataset'", ")", "# compute indices", "last_datapoint_ind", "=", "self", ".", "_num_datapoints", "-", "1", "last_tensor_ind", "=", "last_datapoint_ind", "//", "self", ".", "_datapoints_per_file", "new_last_datapoint_ind", "=", "self", ".", "_num_datapoints", "-", "1", "-", "num_to_delete", "new_num_datapoints", "=", "new_last_datapoint_ind", "+", "1", "new_last_datapoint_ind", "=", "max", "(", "new_last_datapoint_ind", ",", "0", ")", "new_last_tensor_ind", "=", "new_last_datapoint_ind", "//", "self", ".", "_datapoints_per_file", "# delete all but the last tensor", "delete_tensor_ind", "=", "range", "(", "new_last_tensor_ind", "+", "1", ",", "last_tensor_ind", "+", "1", ")", "for", "tensor_ind", "in", "delete_tensor_ind", ":", "for", "field_name", "in", "self", ".", "field_names", ":", "filename", "=", "self", ".", "generate_tensor_filename", "(", "field_name", ",", "tensor_ind", ")", "os", ".", "remove", "(", "filename", ")", "# update last tensor", "dataset_empty", "=", "False", "target_tensor_size", "=", "new_num_datapoints", "%", "self", ".", "_datapoints_per_file", "if", "target_tensor_size", "==", "0", ":", "if", "new_num_datapoints", ">", "0", ":", "target_tensor_size", "=", "self", ".", "_datapoints_per_file", "else", ":", "dataset_empty", "=", "True", "for", "field_name", "in", "self", ".", "field_names", ":", "new_last_tensor", "=", "self", ".", "tensor", "(", "field_name", ",", "new_last_tensor_ind", ")", "while", "new_last_tensor", ".", "size", ">", "target_tensor_size", ":", "new_last_tensor", ".", "delete_last", "(", ")", "filename", "=", "self", ".", "generate_tensor_filename", "(", "field_name", ",", "new_last_tensor_ind", ")", "new_last_tensor", ".", "save", "(", "filename", ",", "compressed", "=", "True", ")", "if", "not", "new_last_tensor", ".", "has_data", ":", "os", ".", "remove", "(", "filename", ")", "new_last_tensor", ".", "reset", "(", ")", "# update num datapoints ", "if", "self", ".", "_num_datapoints", "-", "1", "-", "num_to_delete", ">=", "0", ":", "self", ".", "_num_datapoints", "=", "new_num_datapoints", "else", ":", "self", ".", "_num_datapoints", "=", "0", "# handle deleted tensor", "self", ".", "_num_tensors", "=", "new_last_tensor_ind", "+", "1", "if", "dataset_empty", ":", "self", ".", "_num_tensors", "=", "0" ]
Deletes the last N datapoints from the dataset. Parameters ---------- num_to_delete : int the number of datapoints to remove from the end of the dataset
[ "Deletes", "the", "last", "N", "datapoints", "from", "the", "dataset", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L617-L677
9,658
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.write
def write(self): """ Writes all tensors to the next file number. """ # write the next file for all fields for field_name in self.field_names: filename = self.generate_tensor_filename(field_name, self._num_tensors-1) self._tensors[field_name].save(filename, compressed=True) # write the current metadata to file json.dump(self._metadata, open(self.metadata_filename, 'w'), indent=JSON_INDENT, sort_keys=True) # update self._has_unsaved_data = False
python
def write(self): """ Writes all tensors to the next file number. """ # write the next file for all fields for field_name in self.field_names: filename = self.generate_tensor_filename(field_name, self._num_tensors-1) self._tensors[field_name].save(filename, compressed=True) # write the current metadata to file json.dump(self._metadata, open(self.metadata_filename, 'w'), indent=JSON_INDENT, sort_keys=True) # update self._has_unsaved_data = False
[ "def", "write", "(", "self", ")", ":", "# write the next file for all fields", "for", "field_name", "in", "self", ".", "field_names", ":", "filename", "=", "self", ".", "generate_tensor_filename", "(", "field_name", ",", "self", ".", "_num_tensors", "-", "1", ")", "self", ".", "_tensors", "[", "field_name", "]", ".", "save", "(", "filename", ",", "compressed", "=", "True", ")", "# write the current metadata to file", "json", ".", "dump", "(", "self", ".", "_metadata", ",", "open", "(", "self", ".", "metadata_filename", ",", "'w'", ")", ",", "indent", "=", "JSON_INDENT", ",", "sort_keys", "=", "True", ")", "# update", "self", ".", "_has_unsaved_data", "=", "False" ]
Writes all tensors to the next file number.
[ "Writes", "all", "tensors", "to", "the", "next", "file", "number", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L696-L709
9,659
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.open
def open(dataset_dir, access_mode=READ_ONLY_ACCESS): """ Opens a tensor dataset. """ # check access mode if access_mode == WRITE_ACCESS: raise ValueError('Cannot open a dataset with write-only access') # read config try: # json load config_filename = os.path.join(dataset_dir, 'config.json') config = json.load(open(config_filename, 'r')) except: # YAML load config_filename = os.path.join(dataset_dir, 'config.yaml') config = YamlConfig(config_filename) # open dataset dataset = TensorDataset(dataset_dir, config, access_mode=access_mode) return dataset
python
def open(dataset_dir, access_mode=READ_ONLY_ACCESS): """ Opens a tensor dataset. """ # check access mode if access_mode == WRITE_ACCESS: raise ValueError('Cannot open a dataset with write-only access') # read config try: # json load config_filename = os.path.join(dataset_dir, 'config.json') config = json.load(open(config_filename, 'r')) except: # YAML load config_filename = os.path.join(dataset_dir, 'config.yaml') config = YamlConfig(config_filename) # open dataset dataset = TensorDataset(dataset_dir, config, access_mode=access_mode) return dataset
[ "def", "open", "(", "dataset_dir", ",", "access_mode", "=", "READ_ONLY_ACCESS", ")", ":", "# check access mode", "if", "access_mode", "==", "WRITE_ACCESS", ":", "raise", "ValueError", "(", "'Cannot open a dataset with write-only access'", ")", "# read config", "try", ":", "# json load", "config_filename", "=", "os", ".", "path", ".", "join", "(", "dataset_dir", ",", "'config.json'", ")", "config", "=", "json", ".", "load", "(", "open", "(", "config_filename", ",", "'r'", ")", ")", "except", ":", "# YAML load", "config_filename", "=", "os", ".", "path", ".", "join", "(", "dataset_dir", ",", "'config.yaml'", ")", "config", "=", "YamlConfig", "(", "config_filename", ")", "# open dataset", "dataset", "=", "TensorDataset", "(", "dataset_dir", ",", "config", ",", "access_mode", "=", "access_mode", ")", "return", "dataset" ]
Opens a tensor dataset.
[ "Opens", "a", "tensor", "dataset", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L716-L734
9,660
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.split
def split(self, split_name): """ Return the training and validation indices for the requested split. Parameters ---------- split_name : str name of the split Returns ------- :obj:`numpy.ndarray` array of training indices in the global dataset :obj:`numpy.ndarray` array of validation indices in the global dataset dict metadata about the split """ if not self.has_split(split_name): raise ValueError('Split %s does not exist!' %(split_name)) metadata_filename = self.split_metadata_filename(split_name) train_filename = self.train_indices_filename(split_name) val_filename = self.val_indices_filename(split_name) metadata = json.load(open(metadata_filename, 'r')) train_indices = np.load(train_filename)['arr_0'] val_indices = np.load(val_filename)['arr_0'] return train_indices, val_indices, metadata
python
def split(self, split_name): """ Return the training and validation indices for the requested split. Parameters ---------- split_name : str name of the split Returns ------- :obj:`numpy.ndarray` array of training indices in the global dataset :obj:`numpy.ndarray` array of validation indices in the global dataset dict metadata about the split """ if not self.has_split(split_name): raise ValueError('Split %s does not exist!' %(split_name)) metadata_filename = self.split_metadata_filename(split_name) train_filename = self.train_indices_filename(split_name) val_filename = self.val_indices_filename(split_name) metadata = json.load(open(metadata_filename, 'r')) train_indices = np.load(train_filename)['arr_0'] val_indices = np.load(val_filename)['arr_0'] return train_indices, val_indices, metadata
[ "def", "split", "(", "self", ",", "split_name", ")", ":", "if", "not", "self", ".", "has_split", "(", "split_name", ")", ":", "raise", "ValueError", "(", "'Split %s does not exist!'", "%", "(", "split_name", ")", ")", "metadata_filename", "=", "self", ".", "split_metadata_filename", "(", "split_name", ")", "train_filename", "=", "self", ".", "train_indices_filename", "(", "split_name", ")", "val_filename", "=", "self", ".", "val_indices_filename", "(", "split_name", ")", "metadata", "=", "json", ".", "load", "(", "open", "(", "metadata_filename", ",", "'r'", ")", ")", "train_indices", "=", "np", ".", "load", "(", "train_filename", ")", "[", "'arr_0'", "]", "val_indices", "=", "np", ".", "load", "(", "val_filename", ")", "[", "'arr_0'", "]", "return", "train_indices", ",", "val_indices", ",", "metadata" ]
Return the training and validation indices for the requested split. Parameters ---------- split_name : str name of the split Returns ------- :obj:`numpy.ndarray` array of training indices in the global dataset :obj:`numpy.ndarray` array of validation indices in the global dataset dict metadata about the split
[ "Return", "the", "training", "and", "validation", "indices", "for", "the", "requested", "split", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L736-L762
9,661
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
TensorDataset.delete_split
def delete_split(self, split_name): """ Delete a split of the dataset. Parameters ---------- split_name : str name of the split to delete """ if self.has_split(split_name): shutil.rmtree(os.path.join(self.split_dir, split_name))
python
def delete_split(self, split_name): """ Delete a split of the dataset. Parameters ---------- split_name : str name of the split to delete """ if self.has_split(split_name): shutil.rmtree(os.path.join(self.split_dir, split_name))
[ "def", "delete_split", "(", "self", ",", "split_name", ")", ":", "if", "self", ".", "has_split", "(", "split_name", ")", ":", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "self", ".", "split_dir", ",", "split_name", ")", ")" ]
Delete a split of the dataset. Parameters ---------- split_name : str name of the split to delete
[ "Delete", "a", "split", "of", "the", "dataset", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L878-L887
9,662
BerkeleyAutomation/autolab_core
autolab_core/yaml_config.py
YamlConfig._load_config
def _load_config(self, filename): """Loads a yaml configuration file from the given filename. Parameters ---------- filename : :obj:`str` The filename of the .yaml file that contains the configuration. """ # Read entire file for metadata fh = open(filename, 'r') self.file_contents = fh.read() # Replace !include directives with content config_dir = os.path.split(filename)[0] include_re = re.compile('^(.*)!include\s+(.*)$', re.MULTILINE) def recursive_load(matchobj, path): first_spacing = matchobj.group(1) other_spacing = first_spacing.replace('-', ' ') fname = os.path.join(path, matchobj.group(2)) new_path, _ = os.path.split(fname) new_path = os.path.realpath(new_path) text = '' with open(fname) as f: text = f.read() text = first_spacing + text text = text.replace('\n', '\n{}'.format(other_spacing), text.count('\n') - 1) return re.sub(include_re, lambda m : recursive_load(m, new_path), text) # def include_repl(matchobj): # first_spacing = matchobj.group(1) # other_spacing = first_spacing.replace('-', ' ') # fname = os.path.join(config_dir, matchobj.group(2)) # text = '' # with open(fname) as f: # text = f.read() # text = first_spacing + text # text = text.replace('\n', '\n{}'.format(other_spacing), text.count('\n') - 1) # return text self.file_contents = re.sub(include_re, lambda m : recursive_load(m, config_dir), self.file_contents) # Read in dictionary self.config = self.__ordered_load(self.file_contents) # Convert functions of other params to true expressions for k in self.config.keys(): self.config[k] = YamlConfig.__convert_key(self.config[k]) fh.close() # Load core configuration return self.config
python
def _load_config(self, filename): """Loads a yaml configuration file from the given filename. Parameters ---------- filename : :obj:`str` The filename of the .yaml file that contains the configuration. """ # Read entire file for metadata fh = open(filename, 'r') self.file_contents = fh.read() # Replace !include directives with content config_dir = os.path.split(filename)[0] include_re = re.compile('^(.*)!include\s+(.*)$', re.MULTILINE) def recursive_load(matchobj, path): first_spacing = matchobj.group(1) other_spacing = first_spacing.replace('-', ' ') fname = os.path.join(path, matchobj.group(2)) new_path, _ = os.path.split(fname) new_path = os.path.realpath(new_path) text = '' with open(fname) as f: text = f.read() text = first_spacing + text text = text.replace('\n', '\n{}'.format(other_spacing), text.count('\n') - 1) return re.sub(include_re, lambda m : recursive_load(m, new_path), text) # def include_repl(matchobj): # first_spacing = matchobj.group(1) # other_spacing = first_spacing.replace('-', ' ') # fname = os.path.join(config_dir, matchobj.group(2)) # text = '' # with open(fname) as f: # text = f.read() # text = first_spacing + text # text = text.replace('\n', '\n{}'.format(other_spacing), text.count('\n') - 1) # return text self.file_contents = re.sub(include_re, lambda m : recursive_load(m, config_dir), self.file_contents) # Read in dictionary self.config = self.__ordered_load(self.file_contents) # Convert functions of other params to true expressions for k in self.config.keys(): self.config[k] = YamlConfig.__convert_key(self.config[k]) fh.close() # Load core configuration return self.config
[ "def", "_load_config", "(", "self", ",", "filename", ")", ":", "# Read entire file for metadata", "fh", "=", "open", "(", "filename", ",", "'r'", ")", "self", ".", "file_contents", "=", "fh", ".", "read", "(", ")", "# Replace !include directives with content", "config_dir", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "[", "0", "]", "include_re", "=", "re", ".", "compile", "(", "'^(.*)!include\\s+(.*)$'", ",", "re", ".", "MULTILINE", ")", "def", "recursive_load", "(", "matchobj", ",", "path", ")", ":", "first_spacing", "=", "matchobj", ".", "group", "(", "1", ")", "other_spacing", "=", "first_spacing", ".", "replace", "(", "'-'", ",", "' '", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "path", ",", "matchobj", ".", "group", "(", "2", ")", ")", "new_path", ",", "_", "=", "os", ".", "path", ".", "split", "(", "fname", ")", "new_path", "=", "os", ".", "path", ".", "realpath", "(", "new_path", ")", "text", "=", "''", "with", "open", "(", "fname", ")", "as", "f", ":", "text", "=", "f", ".", "read", "(", ")", "text", "=", "first_spacing", "+", "text", "text", "=", "text", ".", "replace", "(", "'\\n'", ",", "'\\n{}'", ".", "format", "(", "other_spacing", ")", ",", "text", ".", "count", "(", "'\\n'", ")", "-", "1", ")", "return", "re", ".", "sub", "(", "include_re", ",", "lambda", "m", ":", "recursive_load", "(", "m", ",", "new_path", ")", ",", "text", ")", "# def include_repl(matchobj):", "# first_spacing = matchobj.group(1)", "# other_spacing = first_spacing.replace('-', ' ')", "# fname = os.path.join(config_dir, matchobj.group(2))", "# text = ''", "# with open(fname) as f:", "# text = f.read()", "# text = first_spacing + text", "# text = text.replace('\\n', '\\n{}'.format(other_spacing), text.count('\\n') - 1)", "# return text", "self", ".", "file_contents", "=", "re", ".", "sub", "(", "include_re", ",", "lambda", "m", ":", "recursive_load", "(", "m", ",", "config_dir", ")", ",", "self", ".", "file_contents", ")", "# Read in dictionary", "self", ".", "config", "=", "self", ".", "__ordered_load", "(", "self", ".", "file_contents", ")", "# Convert functions of other params to true expressions", "for", "k", "in", "self", ".", "config", ".", "keys", "(", ")", ":", "self", ".", "config", "[", "k", "]", "=", "YamlConfig", ".", "__convert_key", "(", "self", ".", "config", "[", "k", "]", ")", "fh", ".", "close", "(", ")", "# Load core configuration", "return", "self", ".", "config" ]
Loads a yaml configuration file from the given filename. Parameters ---------- filename : :obj:`str` The filename of the .yaml file that contains the configuration.
[ "Loads", "a", "yaml", "configuration", "file", "from", "the", "given", "filename", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/yaml_config.py#L75-L126
9,663
BerkeleyAutomation/autolab_core
autolab_core/yaml_config.py
YamlConfig.__convert_key
def __convert_key(expression): """Converts keys in YAML that reference other keys. """ if type(expression) is str and len(expression) > 2 and expression[1] == '!': expression = eval(expression[2:-1]) return expression
python
def __convert_key(expression): """Converts keys in YAML that reference other keys. """ if type(expression) is str and len(expression) > 2 and expression[1] == '!': expression = eval(expression[2:-1]) return expression
[ "def", "__convert_key", "(", "expression", ")", ":", "if", "type", "(", "expression", ")", "is", "str", "and", "len", "(", "expression", ")", ">", "2", "and", "expression", "[", "1", "]", "==", "'!'", ":", "expression", "=", "eval", "(", "expression", "[", "2", ":", "-", "1", "]", ")", "return", "expression" ]
Converts keys in YAML that reference other keys.
[ "Converts", "keys", "in", "YAML", "that", "reference", "other", "keys", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/yaml_config.py#L129-L134
9,664
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
ClassificationResult.make_summary_table
def make_summary_table(train_result, val_result, plot=True, save_dir=None, prepend="", save=False): """ Makes a matplotlib table object with relevant data. Thanks to Lucas Manuelli for the contribution. Parameters ---------- train_result: ClassificationResult result on train split val_result: ClassificationResult result on validation split save_dir: str path pointing to where to save results Returns ------- dict dict with stored values, can be saved to a yaml file :obj:`matplotlibt.pyplot.fig` a figure containing the table """ table_key_list = ['error_rate', 'recall_at_99_precision', 'average_precision', 'precision', 'recall'] num_fields = len(table_key_list) import matplotlib.pyplot as plt ax = plt.subplot(111, frame_on=False) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) data = np.zeros([num_fields, 2]) data_dict = dict() names = ['train', 'validation'] for name, result in zip(names, [train_result, val_result]): data_dict[name] = {} data_dict[name]['error_rate'] = result.error_rate data_dict[name]['average_precision'] = result.ap_score * 100 data_dict[name]['precision'] = result.precision * 100 data_dict[name]['recall'] = result.recall * 100 precision_array, recall_array, _ = result.precision_recall_curve() recall_at_99_precision = recall_array[np.argmax(precision_array > 0.99)] * 100 # to put it in percentage terms data_dict[name]['recall_at_99_precision'] = recall_at_99_precision for i, key in enumerate(table_key_list): data_dict[name][key] = float("{0:.2f}".format(data_dict[name][key])) j = names.index(name) data[i, j] = data_dict[name][key] table = plt.table(cellText=data, rowLabels=table_key_list, colLabels=names) fig = plt.gcf() fig.subplots_adjust(bottom=0.15) if plot: plt.show() # save the results if save_dir is not None and save: fig_filename = os.path.join(save_dir, prepend + 'summary.png') yaml_filename = os.path.join(save_dir, prepend + 'summary.yaml') yaml.dump(data_dict, open(yaml_filename, 'w'), default_flow_style=False) fig.savefig(fig_filename, bbox_inches="tight") return data_dict, fig
python
def make_summary_table(train_result, val_result, plot=True, save_dir=None, prepend="", save=False): """ Makes a matplotlib table object with relevant data. Thanks to Lucas Manuelli for the contribution. Parameters ---------- train_result: ClassificationResult result on train split val_result: ClassificationResult result on validation split save_dir: str path pointing to where to save results Returns ------- dict dict with stored values, can be saved to a yaml file :obj:`matplotlibt.pyplot.fig` a figure containing the table """ table_key_list = ['error_rate', 'recall_at_99_precision', 'average_precision', 'precision', 'recall'] num_fields = len(table_key_list) import matplotlib.pyplot as plt ax = plt.subplot(111, frame_on=False) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) data = np.zeros([num_fields, 2]) data_dict = dict() names = ['train', 'validation'] for name, result in zip(names, [train_result, val_result]): data_dict[name] = {} data_dict[name]['error_rate'] = result.error_rate data_dict[name]['average_precision'] = result.ap_score * 100 data_dict[name]['precision'] = result.precision * 100 data_dict[name]['recall'] = result.recall * 100 precision_array, recall_array, _ = result.precision_recall_curve() recall_at_99_precision = recall_array[np.argmax(precision_array > 0.99)] * 100 # to put it in percentage terms data_dict[name]['recall_at_99_precision'] = recall_at_99_precision for i, key in enumerate(table_key_list): data_dict[name][key] = float("{0:.2f}".format(data_dict[name][key])) j = names.index(name) data[i, j] = data_dict[name][key] table = plt.table(cellText=data, rowLabels=table_key_list, colLabels=names) fig = plt.gcf() fig.subplots_adjust(bottom=0.15) if plot: plt.show() # save the results if save_dir is not None and save: fig_filename = os.path.join(save_dir, prepend + 'summary.png') yaml_filename = os.path.join(save_dir, prepend + 'summary.yaml') yaml.dump(data_dict, open(yaml_filename, 'w'), default_flow_style=False) fig.savefig(fig_filename, bbox_inches="tight") return data_dict, fig
[ "def", "make_summary_table", "(", "train_result", ",", "val_result", ",", "plot", "=", "True", ",", "save_dir", "=", "None", ",", "prepend", "=", "\"\"", ",", "save", "=", "False", ")", ":", "table_key_list", "=", "[", "'error_rate'", ",", "'recall_at_99_precision'", ",", "'average_precision'", ",", "'precision'", ",", "'recall'", "]", "num_fields", "=", "len", "(", "table_key_list", ")", "import", "matplotlib", ".", "pyplot", "as", "plt", "ax", "=", "plt", ".", "subplot", "(", "111", ",", "frame_on", "=", "False", ")", "ax", ".", "xaxis", ".", "set_visible", "(", "False", ")", "ax", ".", "yaxis", ".", "set_visible", "(", "False", ")", "data", "=", "np", ".", "zeros", "(", "[", "num_fields", ",", "2", "]", ")", "data_dict", "=", "dict", "(", ")", "names", "=", "[", "'train'", ",", "'validation'", "]", "for", "name", ",", "result", "in", "zip", "(", "names", ",", "[", "train_result", ",", "val_result", "]", ")", ":", "data_dict", "[", "name", "]", "=", "{", "}", "data_dict", "[", "name", "]", "[", "'error_rate'", "]", "=", "result", ".", "error_rate", "data_dict", "[", "name", "]", "[", "'average_precision'", "]", "=", "result", ".", "ap_score", "*", "100", "data_dict", "[", "name", "]", "[", "'precision'", "]", "=", "result", ".", "precision", "*", "100", "data_dict", "[", "name", "]", "[", "'recall'", "]", "=", "result", ".", "recall", "*", "100", "precision_array", ",", "recall_array", ",", "_", "=", "result", ".", "precision_recall_curve", "(", ")", "recall_at_99_precision", "=", "recall_array", "[", "np", ".", "argmax", "(", "precision_array", ">", "0.99", ")", "]", "*", "100", "# to put it in percentage terms", "data_dict", "[", "name", "]", "[", "'recall_at_99_precision'", "]", "=", "recall_at_99_precision", "for", "i", ",", "key", "in", "enumerate", "(", "table_key_list", ")", ":", "data_dict", "[", "name", "]", "[", "key", "]", "=", "float", "(", "\"{0:.2f}\"", ".", "format", "(", "data_dict", "[", "name", "]", "[", "key", "]", ")", ")", "j", "=", "names", ".", "index", "(", "name", ")", "data", "[", "i", ",", "j", "]", "=", "data_dict", "[", "name", "]", "[", "key", "]", "table", "=", "plt", ".", "table", "(", "cellText", "=", "data", ",", "rowLabels", "=", "table_key_list", ",", "colLabels", "=", "names", ")", "fig", "=", "plt", ".", "gcf", "(", ")", "fig", ".", "subplots_adjust", "(", "bottom", "=", "0.15", ")", "if", "plot", ":", "plt", ".", "show", "(", ")", "# save the results", "if", "save_dir", "is", "not", "None", "and", "save", ":", "fig_filename", "=", "os", ".", "path", ".", "join", "(", "save_dir", ",", "prepend", "+", "'summary.png'", ")", "yaml_filename", "=", "os", ".", "path", ".", "join", "(", "save_dir", ",", "prepend", "+", "'summary.yaml'", ")", "yaml", ".", "dump", "(", "data_dict", ",", "open", "(", "yaml_filename", ",", "'w'", ")", ",", "default_flow_style", "=", "False", ")", "fig", ".", "savefig", "(", "fig_filename", ",", "bbox_inches", "=", "\"tight\"", ")", "return", "data_dict", ",", "fig" ]
Makes a matplotlib table object with relevant data. Thanks to Lucas Manuelli for the contribution. Parameters ---------- train_result: ClassificationResult result on train split val_result: ClassificationResult result on validation split save_dir: str path pointing to where to save results Returns ------- dict dict with stored values, can be saved to a yaml file :obj:`matplotlibt.pyplot.fig` a figure containing the table
[ "Makes", "a", "matplotlib", "table", "object", "with", "relevant", "data", ".", "Thanks", "to", "Lucas", "Manuelli", "for", "the", "contribution", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L236-L304
9,665
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.app_score
def app_score(self): """ Computes the area under the app curve. """ # compute curve precisions, pct_pred_pos, taus = self.precision_pct_pred_pos_curve(interval=False) # compute area app = 0 total = 0 for k in range(len(precisions)-1): # read cur data cur_prec = precisions[k] cur_pp = pct_pred_pos[k] cur_tau = taus[k] # read next data next_prec = precisions[k+1] next_pp = pct_pred_pos[k+1] next_tau = taus[k+1] # approximate with rectangles mid_prec = (cur_prec + next_prec) / 2.0 width_pp = np.abs(next_pp - cur_pp) app += mid_prec * width_pp total += width_pp return app
python
def app_score(self): """ Computes the area under the app curve. """ # compute curve precisions, pct_pred_pos, taus = self.precision_pct_pred_pos_curve(interval=False) # compute area app = 0 total = 0 for k in range(len(precisions)-1): # read cur data cur_prec = precisions[k] cur_pp = pct_pred_pos[k] cur_tau = taus[k] # read next data next_prec = precisions[k+1] next_pp = pct_pred_pos[k+1] next_tau = taus[k+1] # approximate with rectangles mid_prec = (cur_prec + next_prec) / 2.0 width_pp = np.abs(next_pp - cur_pp) app += mid_prec * width_pp total += width_pp return app
[ "def", "app_score", "(", "self", ")", ":", "# compute curve", "precisions", ",", "pct_pred_pos", ",", "taus", "=", "self", ".", "precision_pct_pred_pos_curve", "(", "interval", "=", "False", ")", "# compute area", "app", "=", "0", "total", "=", "0", "for", "k", "in", "range", "(", "len", "(", "precisions", ")", "-", "1", ")", ":", "# read cur data", "cur_prec", "=", "precisions", "[", "k", "]", "cur_pp", "=", "pct_pred_pos", "[", "k", "]", "cur_tau", "=", "taus", "[", "k", "]", "# read next data", "next_prec", "=", "precisions", "[", "k", "+", "1", "]", "next_pp", "=", "pct_pred_pos", "[", "k", "+", "1", "]", "next_tau", "=", "taus", "[", "k", "+", "1", "]", "# approximate with rectangles", "mid_prec", "=", "(", "cur_prec", "+", "next_prec", ")", "/", "2.0", "width_pp", "=", "np", ".", "abs", "(", "next_pp", "-", "cur_pp", ")", "app", "+=", "mid_prec", "*", "width_pp", "total", "+=", "width_pp", "return", "app" ]
Computes the area under the app curve.
[ "Computes", "the", "area", "under", "the", "app", "curve", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L467-L492
9,666
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.accuracy_curve
def accuracy_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification accuracy. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.accuracy) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.accuracy) taus.append(tau) self.threshold = orig_thresh return scores, taus
python
def accuracy_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification accuracy. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.accuracy) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.accuracy) taus.append(tau) self.threshold = orig_thresh return scores, taus
[ "def", "accuracy_curve", "(", "self", ",", "delta_tau", "=", "0.01", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "scores", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "accuracy", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "accuracy", ")", "taus", ".", "append", "(", "tau", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "scores", ",", "taus" ]
Computes the relationship between probability threshold and classification accuracy.
[ "Computes", "the", "relationship", "between", "probability", "threshold", "and", "classification", "accuracy", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L515-L541
9,667
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.f1_curve
def f1_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification F1 score. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.f1_score) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.f1_score) taus.append(tau) self.threshold = orig_thresh return scores, taus
python
def f1_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification F1 score. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.f1_score) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.f1_score) taus.append(tau) self.threshold = orig_thresh return scores, taus
[ "def", "f1_curve", "(", "self", ",", "delta_tau", "=", "0.01", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "scores", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "f1_score", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "f1_score", ")", "taus", ".", "append", "(", "tau", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "scores", ",", "taus" ]
Computes the relationship between probability threshold and classification F1 score.
[ "Computes", "the", "relationship", "between", "probability", "threshold", "and", "classification", "F1", "score", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L599-L625
9,668
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.phi_coef_curve
def phi_coef_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification phi coefficient. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.phi_coef) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.phi_coef) taus.append(tau) self.threshold = orig_thresh return scores, taus
python
def phi_coef_curve(self, delta_tau=0.01): """ Computes the relationship between probability threshold and classification phi coefficient. """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values scores = [] taus = [] tau = 0 for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau scores.append(self.phi_coef) taus.append(tau) # update threshold tau = sorted_probs[k] # add last datapoint tau = 1.0 self.threshold = tau scores.append(self.phi_coef) taus.append(tau) self.threshold = orig_thresh return scores, taus
[ "def", "phi_coef_curve", "(", "self", ",", "delta_tau", "=", "0.01", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "scores", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "phi_coef", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "scores", ".", "append", "(", "self", ".", "phi_coef", ")", "taus", ".", "append", "(", "tau", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "scores", ",", "taus" ]
Computes the relationship between probability threshold and classification phi coefficient.
[ "Computes", "the", "relationship", "between", "probability", "threshold", "and", "classification", "phi", "coefficient", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L627-L653
9,669
BerkeleyAutomation/autolab_core
autolab_core/learning_analysis.py
BinaryClassificationResult.precision_pct_pred_pos_curve
def precision_pct_pred_pos_curve(self, interval=False, delta_tau=0.001): """ Computes the relationship between precision and the percent of positively classified datapoints . """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values precisions = [] pct_pred_pos = [] taus = [] tau = 0 if not interval: for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) # update threshold tau = sorted_probs[k] else: while tau < 1.0: # compute new accuracy self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) # update threshold tau += delta_tau # add last datapoint tau = 1.0 self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) precisions.append(1.0) pct_pred_pos.append(0.0) taus.append(1.0 + 1e-12) self.threshold = orig_thresh return precisions, pct_pred_pos, taus
python
def precision_pct_pred_pos_curve(self, interval=False, delta_tau=0.001): """ Computes the relationship between precision and the percent of positively classified datapoints . """ # compute thresholds based on the sorted probabilities orig_thresh = self.threshold sorted_labels, sorted_probs = self.sorted_values precisions = [] pct_pred_pos = [] taus = [] tau = 0 if not interval: for k in range(len(sorted_labels)): # compute new accuracy self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) # update threshold tau = sorted_probs[k] else: while tau < 1.0: # compute new accuracy self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) # update threshold tau += delta_tau # add last datapoint tau = 1.0 self.threshold = tau precisions.append(self.precision) pct_pred_pos.append(self.pct_pred_pos) taus.append(tau) precisions.append(1.0) pct_pred_pos.append(0.0) taus.append(1.0 + 1e-12) self.threshold = orig_thresh return precisions, pct_pred_pos, taus
[ "def", "precision_pct_pred_pos_curve", "(", "self", ",", "interval", "=", "False", ",", "delta_tau", "=", "0.001", ")", ":", "# compute thresholds based on the sorted probabilities", "orig_thresh", "=", "self", ".", "threshold", "sorted_labels", ",", "sorted_probs", "=", "self", ".", "sorted_values", "precisions", "=", "[", "]", "pct_pred_pos", "=", "[", "]", "taus", "=", "[", "]", "tau", "=", "0", "if", "not", "interval", ":", "for", "k", "in", "range", "(", "len", "(", "sorted_labels", ")", ")", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "precisions", ".", "append", "(", "self", ".", "precision", ")", "pct_pred_pos", ".", "append", "(", "self", ".", "pct_pred_pos", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "=", "sorted_probs", "[", "k", "]", "else", ":", "while", "tau", "<", "1.0", ":", "# compute new accuracy", "self", ".", "threshold", "=", "tau", "precisions", ".", "append", "(", "self", ".", "precision", ")", "pct_pred_pos", ".", "append", "(", "self", ".", "pct_pred_pos", ")", "taus", ".", "append", "(", "tau", ")", "# update threshold", "tau", "+=", "delta_tau", "# add last datapoint", "tau", "=", "1.0", "self", ".", "threshold", "=", "tau", "precisions", ".", "append", "(", "self", ".", "precision", ")", "pct_pred_pos", ".", "append", "(", "self", ".", "pct_pred_pos", ")", "taus", ".", "append", "(", "tau", ")", "precisions", ".", "append", "(", "1.0", ")", "pct_pred_pos", ".", "append", "(", "0.0", ")", "taus", ".", "append", "(", "1.0", "+", "1e-12", ")", "self", ".", "threshold", "=", "orig_thresh", "return", "precisions", ",", "pct_pred_pos", ",", "taus" ]
Computes the relationship between precision and the percent of positively classified datapoints .
[ "Computes", "the", "relationship", "between", "precision", "and", "the", "percent", "of", "positively", "classified", "datapoints", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/learning_analysis.py#L655-L700
9,670
BerkeleyAutomation/autolab_core
autolab_core/utils.py
gen_experiment_id
def gen_experiment_id(n=10): """Generate a random string with n characters. Parameters ---------- n : int The length of the string to be generated. Returns ------- :obj:`str` A string with only alphabetic characters. """ chrs = 'abcdefghijklmnopqrstuvwxyz' inds = np.random.randint(0,len(chrs), size=n) return ''.join([chrs[i] for i in inds])
python
def gen_experiment_id(n=10): """Generate a random string with n characters. Parameters ---------- n : int The length of the string to be generated. Returns ------- :obj:`str` A string with only alphabetic characters. """ chrs = 'abcdefghijklmnopqrstuvwxyz' inds = np.random.randint(0,len(chrs), size=n) return ''.join([chrs[i] for i in inds])
[ "def", "gen_experiment_id", "(", "n", "=", "10", ")", ":", "chrs", "=", "'abcdefghijklmnopqrstuvwxyz'", "inds", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "len", "(", "chrs", ")", ",", "size", "=", "n", ")", "return", "''", ".", "join", "(", "[", "chrs", "[", "i", "]", "for", "i", "in", "inds", "]", ")" ]
Generate a random string with n characters. Parameters ---------- n : int The length of the string to be generated. Returns ------- :obj:`str` A string with only alphabetic characters.
[ "Generate", "a", "random", "string", "with", "n", "characters", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L11-L26
9,671
BerkeleyAutomation/autolab_core
autolab_core/utils.py
histogram
def histogram(values, num_bins, bounds, normalized=True, plot=False, color='b'): """Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays. """ hist, bins = np.histogram(values, bins=num_bins, range=bounds) width = (bins[1] - bins[0]) if normalized: if np.sum(hist) > 0: hist = hist.astype(np.float32) / np.sum(hist) if plot: import matplotlib.pyplot as plt plt.bar(bins[:-1], hist, width=width, color=color) return hist, bins
python
def histogram(values, num_bins, bounds, normalized=True, plot=False, color='b'): """Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays. """ hist, bins = np.histogram(values, bins=num_bins, range=bounds) width = (bins[1] - bins[0]) if normalized: if np.sum(hist) > 0: hist = hist.astype(np.float32) / np.sum(hist) if plot: import matplotlib.pyplot as plt plt.bar(bins[:-1], hist, width=width, color=color) return hist, bins
[ "def", "histogram", "(", "values", ",", "num_bins", ",", "bounds", ",", "normalized", "=", "True", ",", "plot", "=", "False", ",", "color", "=", "'b'", ")", ":", "hist", ",", "bins", "=", "np", ".", "histogram", "(", "values", ",", "bins", "=", "num_bins", ",", "range", "=", "bounds", ")", "width", "=", "(", "bins", "[", "1", "]", "-", "bins", "[", "0", "]", ")", "if", "normalized", ":", "if", "np", ".", "sum", "(", "hist", ")", ">", "0", ":", "hist", "=", "hist", ".", "astype", "(", "np", ".", "float32", ")", "/", "np", ".", "sum", "(", "hist", ")", "if", "plot", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "bar", "(", "bins", "[", ":", "-", "1", "]", ",", "hist", ",", "width", "=", "width", ",", "color", "=", "color", ")", "return", "hist", ",", "bins" ]
Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays.
[ "Generate", "a", "histogram", "plot", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L64-L102
9,672
BerkeleyAutomation/autolab_core
autolab_core/utils.py
skew
def skew(xi): """Return the skew-symmetric matrix that can be used to calculate cross-products with vector xi. Multiplying this matrix by a vector `v` gives the same result as `xi x v`. Parameters ---------- xi : :obj:`numpy.ndarray` of float A 3-entry vector. Returns ------- :obj:`numpy.ndarray` of float The 3x3 skew-symmetric cross product matrix for the vector. """ S = np.array([[0, -xi[2], xi[1]], [xi[2], 0, -xi[0]], [-xi[1], xi[0], 0]]) return S
python
def skew(xi): """Return the skew-symmetric matrix that can be used to calculate cross-products with vector xi. Multiplying this matrix by a vector `v` gives the same result as `xi x v`. Parameters ---------- xi : :obj:`numpy.ndarray` of float A 3-entry vector. Returns ------- :obj:`numpy.ndarray` of float The 3x3 skew-symmetric cross product matrix for the vector. """ S = np.array([[0, -xi[2], xi[1]], [xi[2], 0, -xi[0]], [-xi[1], xi[0], 0]]) return S
[ "def", "skew", "(", "xi", ")", ":", "S", "=", "np", ".", "array", "(", "[", "[", "0", ",", "-", "xi", "[", "2", "]", ",", "xi", "[", "1", "]", "]", ",", "[", "xi", "[", "2", "]", ",", "0", ",", "-", "xi", "[", "0", "]", "]", ",", "[", "-", "xi", "[", "1", "]", ",", "xi", "[", "0", "]", ",", "0", "]", "]", ")", "return", "S" ]
Return the skew-symmetric matrix that can be used to calculate cross-products with vector xi. Multiplying this matrix by a vector `v` gives the same result as `xi x v`. Parameters ---------- xi : :obj:`numpy.ndarray` of float A 3-entry vector. Returns ------- :obj:`numpy.ndarray` of float The 3x3 skew-symmetric cross product matrix for the vector.
[ "Return", "the", "skew", "-", "symmetric", "matrix", "that", "can", "be", "used", "to", "calculate", "cross", "-", "products", "with", "vector", "xi", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L104-L124
9,673
BerkeleyAutomation/autolab_core
autolab_core/utils.py
deskew
def deskew(S): """Converts a skew-symmetric cross-product matrix to its corresponding vector. Only works for 3x3 matrices. Parameters ---------- S : :obj:`numpy.ndarray` of float A 3x3 skew-symmetric matrix. Returns ------- :obj:`numpy.ndarray` of float A 3-entry vector that corresponds to the given cross product matrix. """ x = np.zeros(3) x[0] = S[2,1] x[1] = S[0,2] x[2] = S[1,0] return x
python
def deskew(S): """Converts a skew-symmetric cross-product matrix to its corresponding vector. Only works for 3x3 matrices. Parameters ---------- S : :obj:`numpy.ndarray` of float A 3x3 skew-symmetric matrix. Returns ------- :obj:`numpy.ndarray` of float A 3-entry vector that corresponds to the given cross product matrix. """ x = np.zeros(3) x[0] = S[2,1] x[1] = S[0,2] x[2] = S[1,0] return x
[ "def", "deskew", "(", "S", ")", ":", "x", "=", "np", ".", "zeros", "(", "3", ")", "x", "[", "0", "]", "=", "S", "[", "2", ",", "1", "]", "x", "[", "1", "]", "=", "S", "[", "0", ",", "2", "]", "x", "[", "2", "]", "=", "S", "[", "1", ",", "0", "]", "return", "x" ]
Converts a skew-symmetric cross-product matrix to its corresponding vector. Only works for 3x3 matrices. Parameters ---------- S : :obj:`numpy.ndarray` of float A 3x3 skew-symmetric matrix. Returns ------- :obj:`numpy.ndarray` of float A 3-entry vector that corresponds to the given cross product matrix.
[ "Converts", "a", "skew", "-", "symmetric", "cross", "-", "product", "matrix", "to", "its", "corresponding", "vector", ".", "Only", "works", "for", "3x3", "matrices", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L126-L144
9,674
BerkeleyAutomation/autolab_core
autolab_core/utils.py
reverse_dictionary
def reverse_dictionary(d): """ Reverses the key value pairs for a given dictionary. Parameters ---------- d : :obj:`dict` dictionary to reverse Returns ------- :obj:`dict` dictionary with keys and values swapped """ rev_d = {} [rev_d.update({v:k}) for k, v in d.items()] return rev_d
python
def reverse_dictionary(d): """ Reverses the key value pairs for a given dictionary. Parameters ---------- d : :obj:`dict` dictionary to reverse Returns ------- :obj:`dict` dictionary with keys and values swapped """ rev_d = {} [rev_d.update({v:k}) for k, v in d.items()] return rev_d
[ "def", "reverse_dictionary", "(", "d", ")", ":", "rev_d", "=", "{", "}", "[", "rev_d", ".", "update", "(", "{", "v", ":", "k", "}", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", "]", "return", "rev_d" ]
Reverses the key value pairs for a given dictionary. Parameters ---------- d : :obj:`dict` dictionary to reverse Returns ------- :obj:`dict` dictionary with keys and values swapped
[ "Reverses", "the", "key", "value", "pairs", "for", "a", "given", "dictionary", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L146-L161
9,675
BerkeleyAutomation/autolab_core
autolab_core/utils.py
filenames
def filenames(directory, tag='', sorted=False, recursive=False): """ Reads in all filenames from a directory that contain a specified substring. Parameters ---------- directory : :obj:`str` the directory to read from tag : :obj:`str` optional tag to match in the filenames sorted : bool whether or not to sort the filenames recursive : bool whether or not to search for the files recursively Returns ------- :obj:`list` of :obj:`str` filenames to read from """ if recursive: f = [os.path.join(directory, f) for directory, _, filename in os.walk(directory) for f in filename if f.find(tag) > -1] else: f = [os.path.join(directory, f) for f in os.listdir(directory) if f.find(tag) > -1] if sorted: f.sort() return f
python
def filenames(directory, tag='', sorted=False, recursive=False): """ Reads in all filenames from a directory that contain a specified substring. Parameters ---------- directory : :obj:`str` the directory to read from tag : :obj:`str` optional tag to match in the filenames sorted : bool whether or not to sort the filenames recursive : bool whether or not to search for the files recursively Returns ------- :obj:`list` of :obj:`str` filenames to read from """ if recursive: f = [os.path.join(directory, f) for directory, _, filename in os.walk(directory) for f in filename if f.find(tag) > -1] else: f = [os.path.join(directory, f) for f in os.listdir(directory) if f.find(tag) > -1] if sorted: f.sort() return f
[ "def", "filenames", "(", "directory", ",", "tag", "=", "''", ",", "sorted", "=", "False", ",", "recursive", "=", "False", ")", ":", "if", "recursive", ":", "f", "=", "[", "os", ".", "path", ".", "join", "(", "directory", ",", "f", ")", "for", "directory", ",", "_", ",", "filename", "in", "os", ".", "walk", "(", "directory", ")", "for", "f", "in", "filename", "if", "f", ".", "find", "(", "tag", ")", ">", "-", "1", "]", "else", ":", "f", "=", "[", "os", ".", "path", ".", "join", "(", "directory", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "directory", ")", "if", "f", ".", "find", "(", "tag", ")", ">", "-", "1", "]", "if", "sorted", ":", "f", ".", "sort", "(", ")", "return", "f" ]
Reads in all filenames from a directory that contain a specified substring. Parameters ---------- directory : :obj:`str` the directory to read from tag : :obj:`str` optional tag to match in the filenames sorted : bool whether or not to sort the filenames recursive : bool whether or not to search for the files recursively Returns ------- :obj:`list` of :obj:`str` filenames to read from
[ "Reads", "in", "all", "filenames", "from", "a", "directory", "that", "contain", "a", "specified", "substring", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L178-L203
9,676
BerkeleyAutomation/autolab_core
autolab_core/utils.py
sph2cart
def sph2cart(r, az, elev): """ Convert spherical to cartesian coordinates. Attributes ---------- r : float radius az : float aziumth (angle about z axis) elev : float elevation from xy plane Returns ------- float x-coordinate float y-coordinate float z-coordinate """ x = r * np.cos(az) * np.sin(elev) y = r * np.sin(az) * np.sin(elev) z = r * np.cos(elev) return x, y, z
python
def sph2cart(r, az, elev): """ Convert spherical to cartesian coordinates. Attributes ---------- r : float radius az : float aziumth (angle about z axis) elev : float elevation from xy plane Returns ------- float x-coordinate float y-coordinate float z-coordinate """ x = r * np.cos(az) * np.sin(elev) y = r * np.sin(az) * np.sin(elev) z = r * np.cos(elev) return x, y, z
[ "def", "sph2cart", "(", "r", ",", "az", ",", "elev", ")", ":", "x", "=", "r", "*", "np", ".", "cos", "(", "az", ")", "*", "np", ".", "sin", "(", "elev", ")", "y", "=", "r", "*", "np", ".", "sin", "(", "az", ")", "*", "np", ".", "sin", "(", "elev", ")", "z", "=", "r", "*", "np", ".", "cos", "(", "elev", ")", "return", "x", ",", "y", ",", "z" ]
Convert spherical to cartesian coordinates. Attributes ---------- r : float radius az : float aziumth (angle about z axis) elev : float elevation from xy plane Returns ------- float x-coordinate float y-coordinate float z-coordinate
[ "Convert", "spherical", "to", "cartesian", "coordinates", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L205-L229
9,677
BerkeleyAutomation/autolab_core
autolab_core/utils.py
cart2sph
def cart2sph(x, y, z): """ Convert cartesian to spherical coordinates. Attributes ---------- x : float x-coordinate y : float y-coordinate z : float z-coordinate Returns ------- float radius float aziumth float elevation """ r = np.sqrt(x**2 + y**2 + z**2) if x > 0 and y > 0: az = np.arctan(y / x) elif x > 0 and y < 0: az = 2*np.pi - np.arctan(-y / x) elif x < 0 and y > 0: az = np.pi - np.arctan(-y / x) elif x < 0 and y < 0: az = np.pi + np.arctan(y / x) elif x == 0 and y > 0: az = np.pi / 2 elif x == 0 and y < 0: az = 3 * np.pi / 2 elif y == 0 and x > 0: az = 0 elif y == 0 and x < 0: az = np.pi elev = np.arccos(z / r) return r, az, elev
python
def cart2sph(x, y, z): """ Convert cartesian to spherical coordinates. Attributes ---------- x : float x-coordinate y : float y-coordinate z : float z-coordinate Returns ------- float radius float aziumth float elevation """ r = np.sqrt(x**2 + y**2 + z**2) if x > 0 and y > 0: az = np.arctan(y / x) elif x > 0 and y < 0: az = 2*np.pi - np.arctan(-y / x) elif x < 0 and y > 0: az = np.pi - np.arctan(-y / x) elif x < 0 and y < 0: az = np.pi + np.arctan(y / x) elif x == 0 and y > 0: az = np.pi / 2 elif x == 0 and y < 0: az = 3 * np.pi / 2 elif y == 0 and x > 0: az = 0 elif y == 0 and x < 0: az = np.pi elev = np.arccos(z / r) return r, az, elev
[ "def", "cart2sph", "(", "x", ",", "y", ",", "z", ")", ":", "r", "=", "np", ".", "sqrt", "(", "x", "**", "2", "+", "y", "**", "2", "+", "z", "**", "2", ")", "if", "x", ">", "0", "and", "y", ">", "0", ":", "az", "=", "np", ".", "arctan", "(", "y", "/", "x", ")", "elif", "x", ">", "0", "and", "y", "<", "0", ":", "az", "=", "2", "*", "np", ".", "pi", "-", "np", ".", "arctan", "(", "-", "y", "/", "x", ")", "elif", "x", "<", "0", "and", "y", ">", "0", ":", "az", "=", "np", ".", "pi", "-", "np", ".", "arctan", "(", "-", "y", "/", "x", ")", "elif", "x", "<", "0", "and", "y", "<", "0", ":", "az", "=", "np", ".", "pi", "+", "np", ".", "arctan", "(", "y", "/", "x", ")", "elif", "x", "==", "0", "and", "y", ">", "0", ":", "az", "=", "np", ".", "pi", "/", "2", "elif", "x", "==", "0", "and", "y", "<", "0", ":", "az", "=", "3", "*", "np", ".", "pi", "/", "2", "elif", "y", "==", "0", "and", "x", ">", "0", ":", "az", "=", "0", "elif", "y", "==", "0", "and", "x", "<", "0", ":", "az", "=", "np", ".", "pi", "elev", "=", "np", ".", "arccos", "(", "z", "/", "r", ")", "return", "r", ",", "az", ",", "elev" ]
Convert cartesian to spherical coordinates. Attributes ---------- x : float x-coordinate y : float y-coordinate z : float z-coordinate Returns ------- float radius float aziumth float elevation
[ "Convert", "cartesian", "to", "spherical", "coordinates", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L231-L270
9,678
BerkeleyAutomation/autolab_core
autolab_core/utils.py
keyboard_input
def keyboard_input(message, yesno=False): """ Get keyboard input from a human, optionally reasking for valid yes or no input. Parameters ---------- message : :obj:`str` the message to display to the user yesno : :obj:`bool` whether or not to enforce yes or no inputs Returns ------- :obj:`str` string input by the human """ # add space for readability message += ' ' # add yes or no to message if yesno: message += '[y/n] ' # ask human human_input = input(message) if yesno: while human_input.lower() != 'n' and human_input.lower() != 'y': logging.info('Did not understand input. Please answer \'y\' or \'n\'') human_input = input(message) return human_input
python
def keyboard_input(message, yesno=False): """ Get keyboard input from a human, optionally reasking for valid yes or no input. Parameters ---------- message : :obj:`str` the message to display to the user yesno : :obj:`bool` whether or not to enforce yes or no inputs Returns ------- :obj:`str` string input by the human """ # add space for readability message += ' ' # add yes or no to message if yesno: message += '[y/n] ' # ask human human_input = input(message) if yesno: while human_input.lower() != 'n' and human_input.lower() != 'y': logging.info('Did not understand input. Please answer \'y\' or \'n\'') human_input = input(message) return human_input
[ "def", "keyboard_input", "(", "message", ",", "yesno", "=", "False", ")", ":", "# add space for readability", "message", "+=", "' '", "# add yes or no to message", "if", "yesno", ":", "message", "+=", "'[y/n] '", "# ask human", "human_input", "=", "input", "(", "message", ")", "if", "yesno", ":", "while", "human_input", ".", "lower", "(", ")", "!=", "'n'", "and", "human_input", ".", "lower", "(", ")", "!=", "'y'", ":", "logging", ".", "info", "(", "'Did not understand input. Please answer \\'y\\' or \\'n\\''", ")", "human_input", "=", "input", "(", "message", ")", "return", "human_input" ]
Get keyboard input from a human, optionally reasking for valid yes or no input. Parameters ---------- message : :obj:`str` the message to display to the user yesno : :obj:`bool` whether or not to enforce yes or no inputs Returns ------- :obj:`str` string input by the human
[ "Get", "keyboard", "input", "from", "a", "human", "optionally", "reasking", "for", "valid", "yes", "or", "no", "input", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/utils.py#L272-L301
9,679
BerkeleyAutomation/autolab_core
autolab_core/dual_quaternion.py
DualQuaternion.interpolate
def interpolate(dq0, dq1, t): """Return the interpolation of two DualQuaternions. This uses the Dual Quaternion Linear Blending Method as described by Matthew Smith's 'Applications of Dual Quaternions in Three Dimensional Transformation and Interpolation' https://www.cosc.canterbury.ac.nz/research/reports/HonsReps/2013/hons_1305.pdf Parameters ---------- dq0 : :obj:`DualQuaternion` The first DualQuaternion. dq1 : :obj:`DualQuaternion` The second DualQuaternion. t : float The interpolation step in [0,1]. When t=0, this returns dq0, and when t=1, this returns dq1. Returns ------- :obj:`DualQuaternion` The interpolated DualQuaternion. Raises ------ ValueError If t isn't in [0,1]. """ if not 0 <= t <= 1: raise ValueError("Interpolation step must be between 0 and 1! Got {0}".format(t)) dqt = dq0 * (1-t) + dq1 * t return dqt.normalized
python
def interpolate(dq0, dq1, t): """Return the interpolation of two DualQuaternions. This uses the Dual Quaternion Linear Blending Method as described by Matthew Smith's 'Applications of Dual Quaternions in Three Dimensional Transformation and Interpolation' https://www.cosc.canterbury.ac.nz/research/reports/HonsReps/2013/hons_1305.pdf Parameters ---------- dq0 : :obj:`DualQuaternion` The first DualQuaternion. dq1 : :obj:`DualQuaternion` The second DualQuaternion. t : float The interpolation step in [0,1]. When t=0, this returns dq0, and when t=1, this returns dq1. Returns ------- :obj:`DualQuaternion` The interpolated DualQuaternion. Raises ------ ValueError If t isn't in [0,1]. """ if not 0 <= t <= 1: raise ValueError("Interpolation step must be between 0 and 1! Got {0}".format(t)) dqt = dq0 * (1-t) + dq1 * t return dqt.normalized
[ "def", "interpolate", "(", "dq0", ",", "dq1", ",", "t", ")", ":", "if", "not", "0", "<=", "t", "<=", "1", ":", "raise", "ValueError", "(", "\"Interpolation step must be between 0 and 1! Got {0}\"", ".", "format", "(", "t", ")", ")", "dqt", "=", "dq0", "*", "(", "1", "-", "t", ")", "+", "dq1", "*", "t", "return", "dqt", ".", "normalized" ]
Return the interpolation of two DualQuaternions. This uses the Dual Quaternion Linear Blending Method as described by Matthew Smith's 'Applications of Dual Quaternions in Three Dimensional Transformation and Interpolation' https://www.cosc.canterbury.ac.nz/research/reports/HonsReps/2013/hons_1305.pdf Parameters ---------- dq0 : :obj:`DualQuaternion` The first DualQuaternion. dq1 : :obj:`DualQuaternion` The second DualQuaternion. t : float The interpolation step in [0,1]. When t=0, this returns dq0, and when t=1, this returns dq1. Returns ------- :obj:`DualQuaternion` The interpolated DualQuaternion. Raises ------ ValueError If t isn't in [0,1].
[ "Return", "the", "interpolation", "of", "two", "DualQuaternions", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/dual_quaternion.py#L129-L162
9,680
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel._save
def _save(self): """Save the model to a .csv file """ # if not first time saving, copy .csv to a backup if os.path.isfile(self._full_filename): shutil.copyfile(self._full_filename, self._full_backup_filename) # write to csv with open(self._full_filename, 'w') as file: writer = csv.DictWriter(file, fieldnames=self._headers) writer.writeheader() for row in self._table: writer.writerow(row)
python
def _save(self): """Save the model to a .csv file """ # if not first time saving, copy .csv to a backup if os.path.isfile(self._full_filename): shutil.copyfile(self._full_filename, self._full_backup_filename) # write to csv with open(self._full_filename, 'w') as file: writer = csv.DictWriter(file, fieldnames=self._headers) writer.writeheader() for row in self._table: writer.writerow(row)
[ "def", "_save", "(", "self", ")", ":", "# if not first time saving, copy .csv to a backup", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "_full_filename", ")", ":", "shutil", ".", "copyfile", "(", "self", ".", "_full_filename", ",", "self", ".", "_full_backup_filename", ")", "# write to csv", "with", "open", "(", "self", ".", "_full_filename", ",", "'w'", ")", "as", "file", ":", "writer", "=", "csv", ".", "DictWriter", "(", "file", ",", "fieldnames", "=", "self", ".", "_headers", ")", "writer", ".", "writeheader", "(", ")", "for", "row", "in", "self", ".", "_table", ":", "writer", ".", "writerow", "(", "row", ")" ]
Save the model to a .csv file
[ "Save", "the", "model", "to", "a", ".", "csv", "file" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L103-L115
9,681
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.insert
def insert(self, data): """Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = {key:self._default_entry for key in self._headers} row['_uid'] = self._get_new_uid() for key, val in data.items(): if key in ('_uid', '_default'): logging.warn("Cannot manually set columns _uid or _default of a row! Given data: {0}".format(data)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._table.append(row) self._save() return row['_uid']
python
def insert(self, data): """Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = {key:self._default_entry for key in self._headers} row['_uid'] = self._get_new_uid() for key, val in data.items(): if key in ('_uid', '_default'): logging.warn("Cannot manually set columns _uid or _default of a row! Given data: {0}".format(data)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._table.append(row) self._save() return row['_uid']
[ "def", "insert", "(", "self", ",", "data", ")", ":", "row", "=", "{", "key", ":", "self", ".", "_default_entry", "for", "key", "in", "self", ".", "_headers", "}", "row", "[", "'_uid'", "]", "=", "self", ".", "_get_new_uid", "(", ")", "for", "key", ",", "val", "in", "data", ".", "items", "(", ")", ":", "if", "key", "in", "(", "'_uid'", ",", "'_default'", ")", ":", "logging", ".", "warn", "(", "\"Cannot manually set columns _uid or _default of a row! Given data: {0}\"", ".", "format", "(", "data", ")", ")", "continue", "if", "not", "isinstance", "(", "val", ",", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "self", ".", "_headers_types", "[", "key", "]", "]", ")", ":", "raise", "Exception", "(", "'Data type mismatch for column {0}. Expected: {1}, got: {2}'", ".", "format", "(", "key", ",", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "self", ".", "_headers_types", "[", "key", "]", "]", ",", "type", "(", "val", ")", ")", ")", "row", "[", "key", "]", "=", "val", "self", ".", "_table", ".", "append", "(", "row", ")", "self", ".", "_save", "(", ")", "return", "row", "[", "'_uid'", "]" ]
Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type.
[ "Insert", "a", "row", "into", "the", ".", "csv", "file", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L117-L149
9,682
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.update_by_uid
def update_by_uid(self, uid, data): """Update a row with the given data. Parameters ---------- uid : int The UID of the row to update. data : :obj:`dict` A dictionary mapping keys (header strings) to values. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = self._table[uid+1] for key, val in data.items(): if key == '_uid' or key == '_default': continue if key not in self._headers: logging.warn("Unknown column name: {0}".format(key)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._save()
python
def update_by_uid(self, uid, data): """Update a row with the given data. Parameters ---------- uid : int The UID of the row to update. data : :obj:`dict` A dictionary mapping keys (header strings) to values. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = self._table[uid+1] for key, val in data.items(): if key == '_uid' or key == '_default': continue if key not in self._headers: logging.warn("Unknown column name: {0}".format(key)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._save()
[ "def", "update_by_uid", "(", "self", ",", "uid", ",", "data", ")", ":", "row", "=", "self", ".", "_table", "[", "uid", "+", "1", "]", "for", "key", ",", "val", "in", "data", ".", "items", "(", ")", ":", "if", "key", "==", "'_uid'", "or", "key", "==", "'_default'", ":", "continue", "if", "key", "not", "in", "self", ".", "_headers", ":", "logging", ".", "warn", "(", "\"Unknown column name: {0}\"", ".", "format", "(", "key", ")", ")", "continue", "if", "not", "isinstance", "(", "val", ",", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "self", ".", "_headers_types", "[", "key", "]", "]", ")", ":", "raise", "Exception", "(", "'Data type mismatch for column {0}. Expected: {1}, got: {2}'", ".", "format", "(", "key", ",", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "self", ".", "_headers_types", "[", "key", "]", "]", ",", "type", "(", "val", ")", ")", ")", "row", "[", "key", "]", "=", "val", "self", ".", "_save", "(", ")" ]
Update a row with the given data. Parameters ---------- uid : int The UID of the row to update. data : :obj:`dict` A dictionary mapping keys (header strings) to values. Raises ------ Exception If the value for a given header is not of the appropriate type.
[ "Update", "a", "row", "with", "the", "given", "data", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L151-L178
9,683
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.get_col
def get_col(self, col_name, filter = lambda _ : True): """Return all values in the column corresponding to col_name that satisfies filter, which is a function that takes in a value of the column's type and returns True or False Parameters ---------- col_name : str Name of desired column filter : function, optional A function that takes in a value of the column's type and returns True or False Defaults to a function that always returns True Returns ------- list A list of values in the desired columns by order of their storage in the model Raises ------ ValueError If the desired column name is not found in the model """ if col_name not in self._headers: raise ValueError("{} not found! Model has headers: {}".format(col_name, self._headers)) col = [] for i in range(self.num_rows): row = self._table[i + 1] val = row[col_name] if filter(val): col.append(val) return col
python
def get_col(self, col_name, filter = lambda _ : True): """Return all values in the column corresponding to col_name that satisfies filter, which is a function that takes in a value of the column's type and returns True or False Parameters ---------- col_name : str Name of desired column filter : function, optional A function that takes in a value of the column's type and returns True or False Defaults to a function that always returns True Returns ------- list A list of values in the desired columns by order of their storage in the model Raises ------ ValueError If the desired column name is not found in the model """ if col_name not in self._headers: raise ValueError("{} not found! Model has headers: {}".format(col_name, self._headers)) col = [] for i in range(self.num_rows): row = self._table[i + 1] val = row[col_name] if filter(val): col.append(val) return col
[ "def", "get_col", "(", "self", ",", "col_name", ",", "filter", "=", "lambda", "_", ":", "True", ")", ":", "if", "col_name", "not", "in", "self", ".", "_headers", ":", "raise", "ValueError", "(", "\"{} not found! Model has headers: {}\"", ".", "format", "(", "col_name", ",", "self", ".", "_headers", ")", ")", "col", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "num_rows", ")", ":", "row", "=", "self", ".", "_table", "[", "i", "+", "1", "]", "val", "=", "row", "[", "col_name", "]", "if", "filter", "(", "val", ")", ":", "col", ".", "append", "(", "val", ")", "return", "col" ]
Return all values in the column corresponding to col_name that satisfies filter, which is a function that takes in a value of the column's type and returns True or False Parameters ---------- col_name : str Name of desired column filter : function, optional A function that takes in a value of the column's type and returns True or False Defaults to a function that always returns True Returns ------- list A list of values in the desired columns by order of their storage in the model Raises ------ ValueError If the desired column name is not found in the model
[ "Return", "all", "values", "in", "the", "column", "corresponding", "to", "col_name", "that", "satisfies", "filter", "which", "is", "a", "function", "that", "takes", "in", "a", "value", "of", "the", "column", "s", "type", "and", "returns", "True", "or", "False" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L212-L243
9,684
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.get_by_cols
def get_by_cols(self, cols, direction=1): """Return the first or last row that satisfies the given col value constraints, or None if no row contains the given value. Parameters ---------- cols: :obj:'dict' Dictionary of col values for a specific row. direction: int, optional Either 1 or -1. 1 means find the first row, -1 means find the last row. Returns ------- :obj:`dict` A dictionary mapping keys (header strings) to values, which represents a row of the table. This row contains the given value in the specified column. """ if direction == 1: iterator = range(self.num_rows) elif direction == -1: iterator = range(self.num_rows-1, -1, -1) else: raise ValueError("Direction can only be 1 (first) or -1 (last). Got: {0}".format(direction)) for i in iterator: row = self._table[i+1] all_sat = True for key, val in cols.items(): if row[key] != val: all_sat = False break if all_sat: return row.copy() return None
python
def get_by_cols(self, cols, direction=1): """Return the first or last row that satisfies the given col value constraints, or None if no row contains the given value. Parameters ---------- cols: :obj:'dict' Dictionary of col values for a specific row. direction: int, optional Either 1 or -1. 1 means find the first row, -1 means find the last row. Returns ------- :obj:`dict` A dictionary mapping keys (header strings) to values, which represents a row of the table. This row contains the given value in the specified column. """ if direction == 1: iterator = range(self.num_rows) elif direction == -1: iterator = range(self.num_rows-1, -1, -1) else: raise ValueError("Direction can only be 1 (first) or -1 (last). Got: {0}".format(direction)) for i in iterator: row = self._table[i+1] all_sat = True for key, val in cols.items(): if row[key] != val: all_sat = False break if all_sat: return row.copy() return None
[ "def", "get_by_cols", "(", "self", ",", "cols", ",", "direction", "=", "1", ")", ":", "if", "direction", "==", "1", ":", "iterator", "=", "range", "(", "self", ".", "num_rows", ")", "elif", "direction", "==", "-", "1", ":", "iterator", "=", "range", "(", "self", ".", "num_rows", "-", "1", ",", "-", "1", ",", "-", "1", ")", "else", ":", "raise", "ValueError", "(", "\"Direction can only be 1 (first) or -1 (last). Got: {0}\"", ".", "format", "(", "direction", ")", ")", "for", "i", "in", "iterator", ":", "row", "=", "self", ".", "_table", "[", "i", "+", "1", "]", "all_sat", "=", "True", "for", "key", ",", "val", "in", "cols", ".", "items", "(", ")", ":", "if", "row", "[", "key", "]", "!=", "val", ":", "all_sat", "=", "False", "break", "if", "all_sat", ":", "return", "row", ".", "copy", "(", ")", "return", "None" ]
Return the first or last row that satisfies the given col value constraints, or None if no row contains the given value. Parameters ---------- cols: :obj:'dict' Dictionary of col values for a specific row. direction: int, optional Either 1 or -1. 1 means find the first row, -1 means find the last row. Returns ------- :obj:`dict` A dictionary mapping keys (header strings) to values, which represents a row of the table. This row contains the given value in the specified column.
[ "Return", "the", "first", "or", "last", "row", "that", "satisfies", "the", "given", "col", "value", "constraints", "or", "None", "if", "no", "row", "contains", "the", "given", "value", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L245-L282
9,685
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.get_rows_by_cols
def get_rows_by_cols(self, matching_dict): """Return all rows where the cols match the elements given in the matching_dict Parameters ---------- matching_dict: :obj:'dict' Desired dictionary of col values. Returns ------- :obj:`list` A list of rows that satisfy the matching_dict """ result = [] for i in range(self.num_rows): row = self._table[i+1] matching = True for key, val in matching_dict.items(): if row[key] != val: matching = False break if matching: result.append(row) return result
python
def get_rows_by_cols(self, matching_dict): """Return all rows where the cols match the elements given in the matching_dict Parameters ---------- matching_dict: :obj:'dict' Desired dictionary of col values. Returns ------- :obj:`list` A list of rows that satisfy the matching_dict """ result = [] for i in range(self.num_rows): row = self._table[i+1] matching = True for key, val in matching_dict.items(): if row[key] != val: matching = False break if matching: result.append(row) return result
[ "def", "get_rows_by_cols", "(", "self", ",", "matching_dict", ")", ":", "result", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "num_rows", ")", ":", "row", "=", "self", ".", "_table", "[", "i", "+", "1", "]", "matching", "=", "True", "for", "key", ",", "val", "in", "matching_dict", ".", "items", "(", ")", ":", "if", "row", "[", "key", "]", "!=", "val", ":", "matching", "=", "False", "break", "if", "matching", ":", "result", ".", "append", "(", "row", ")", "return", "result" ]
Return all rows where the cols match the elements given in the matching_dict Parameters ---------- matching_dict: :obj:'dict' Desired dictionary of col values. Returns ------- :obj:`list` A list of rows that satisfy the matching_dict
[ "Return", "all", "rows", "where", "the", "cols", "match", "the", "elements", "given", "in", "the", "matching_dict" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L326-L351
9,686
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.next
def next(self): """ Returns the next row in the CSV, for iteration """ if self._cur_row >= len(self._table): raise StopIteration data = self._table[self._cur_row].copy() self._cur_row += 1 return data
python
def next(self): """ Returns the next row in the CSV, for iteration """ if self._cur_row >= len(self._table): raise StopIteration data = self._table[self._cur_row].copy() self._cur_row += 1 return data
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_cur_row", ">=", "len", "(", "self", ".", "_table", ")", ":", "raise", "StopIteration", "data", "=", "self", ".", "_table", "[", "self", ".", "_cur_row", "]", ".", "copy", "(", ")", "self", ".", "_cur_row", "+=", "1", "return", "data" ]
Returns the next row in the CSV, for iteration
[ "Returns", "the", "next", "row", "in", "the", "CSV", "for", "iteration" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L358-L364
9,687
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.load
def load(full_filename): """Load a .csv file into a CSVModel. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file. Raises ------ Excetpion If the CSV file does not exist or is malformed. """ with open(full_filename, 'r') as file: reader = csv.DictReader(file) headers = reader.fieldnames if '_uid' not in headers or '_default' not in headers: raise Exception("Malformed CSVModel file!") all_rows = [row for row in reader] types = all_rows[0] table = [types] default_entry = table[0]['_default'] for i in range(1, len(all_rows)): raw_row = all_rows[i] row = {} for column_name in headers: if raw_row[column_name] != default_entry and column_name != '': if types[column_name] == 'bool': row[column_name] = CSVModel._str_to_bool(raw_row[column_name]) else: try: row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](raw_row[column_name]) except: logging.error('{}, {}, {}'.format(column_name, types[column_name], raw_row[column_name])) row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](bool(raw_row[column_name])) else: row[column_name] = default_entry table.append(row) if len(table) == 1: next_valid_uid = 0 else: next_valid_uid = int(table[-1]['_uid']) + 1 headers_init = headers[1:-1] types_init = [types[column_name] for column_name in headers_init] headers_types_list = zip(headers_init, types_init) csv_model = CSVModel(full_filename, headers_types_list, default_entry=default_entry) csv_model._uid = next_valid_uid csv_model._table = table csv_model._save() return csv_model
python
def load(full_filename): """Load a .csv file into a CSVModel. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file. Raises ------ Excetpion If the CSV file does not exist or is malformed. """ with open(full_filename, 'r') as file: reader = csv.DictReader(file) headers = reader.fieldnames if '_uid' not in headers or '_default' not in headers: raise Exception("Malformed CSVModel file!") all_rows = [row for row in reader] types = all_rows[0] table = [types] default_entry = table[0]['_default'] for i in range(1, len(all_rows)): raw_row = all_rows[i] row = {} for column_name in headers: if raw_row[column_name] != default_entry and column_name != '': if types[column_name] == 'bool': row[column_name] = CSVModel._str_to_bool(raw_row[column_name]) else: try: row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](raw_row[column_name]) except: logging.error('{}, {}, {}'.format(column_name, types[column_name], raw_row[column_name])) row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](bool(raw_row[column_name])) else: row[column_name] = default_entry table.append(row) if len(table) == 1: next_valid_uid = 0 else: next_valid_uid = int(table[-1]['_uid']) + 1 headers_init = headers[1:-1] types_init = [types[column_name] for column_name in headers_init] headers_types_list = zip(headers_init, types_init) csv_model = CSVModel(full_filename, headers_types_list, default_entry=default_entry) csv_model._uid = next_valid_uid csv_model._table = table csv_model._save() return csv_model
[ "def", "load", "(", "full_filename", ")", ":", "with", "open", "(", "full_filename", ",", "'r'", ")", "as", "file", ":", "reader", "=", "csv", ".", "DictReader", "(", "file", ")", "headers", "=", "reader", ".", "fieldnames", "if", "'_uid'", "not", "in", "headers", "or", "'_default'", "not", "in", "headers", ":", "raise", "Exception", "(", "\"Malformed CSVModel file!\"", ")", "all_rows", "=", "[", "row", "for", "row", "in", "reader", "]", "types", "=", "all_rows", "[", "0", "]", "table", "=", "[", "types", "]", "default_entry", "=", "table", "[", "0", "]", "[", "'_default'", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "all_rows", ")", ")", ":", "raw_row", "=", "all_rows", "[", "i", "]", "row", "=", "{", "}", "for", "column_name", "in", "headers", ":", "if", "raw_row", "[", "column_name", "]", "!=", "default_entry", "and", "column_name", "!=", "''", ":", "if", "types", "[", "column_name", "]", "==", "'bool'", ":", "row", "[", "column_name", "]", "=", "CSVModel", ".", "_str_to_bool", "(", "raw_row", "[", "column_name", "]", ")", "else", ":", "try", ":", "row", "[", "column_name", "]", "=", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "types", "[", "column_name", "]", "]", "(", "raw_row", "[", "column_name", "]", ")", "except", ":", "logging", ".", "error", "(", "'{}, {}, {}'", ".", "format", "(", "column_name", ",", "types", "[", "column_name", "]", ",", "raw_row", "[", "column_name", "]", ")", ")", "row", "[", "column_name", "]", "=", "CSVModel", ".", "_KNOWN_TYPES_MAP", "[", "types", "[", "column_name", "]", "]", "(", "bool", "(", "raw_row", "[", "column_name", "]", ")", ")", "else", ":", "row", "[", "column_name", "]", "=", "default_entry", "table", ".", "append", "(", "row", ")", "if", "len", "(", "table", ")", "==", "1", ":", "next_valid_uid", "=", "0", "else", ":", "next_valid_uid", "=", "int", "(", "table", "[", "-", "1", "]", "[", "'_uid'", "]", ")", "+", "1", "headers_init", "=", "headers", "[", "1", ":", "-", "1", "]", "types_init", "=", "[", "types", "[", "column_name", "]", "for", "column_name", "in", "headers_init", "]", "headers_types_list", "=", "zip", "(", "headers_init", ",", "types_init", ")", "csv_model", "=", "CSVModel", "(", "full_filename", ",", "headers_types_list", ",", "default_entry", "=", "default_entry", ")", "csv_model", ".", "_uid", "=", "next_valid_uid", "csv_model", ".", "_table", "=", "table", "csv_model", ".", "_save", "(", ")", "return", "csv_model" ]
Load a .csv file into a CSVModel. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file. Raises ------ Excetpion If the CSV file does not exist or is malformed.
[ "Load", "a", ".", "csv", "file", "into", "a", "CSVModel", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L379-L438
9,688
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
CSVModel.get_or_create
def get_or_create(full_filename, headers_types=None, default_entry=''): """Load a .csv file into a CSVModel if the file exists, or create a new CSVModel with the given filename if the file does not exist. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. headers_types : :obj:`list` of :obj:`tuple` of :obj:`str`, :obj:`str` A list of tuples, where the first element in each tuple is the string header for a column and the second element is that column's data type as a string. default_entry : :obj:`str` The default entry for cells in the CSV. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file, or a new CSVModel tied to the filename if the file doesn't currently exist. """ # convert dictionaries to list if isinstance(headers_types, dict): headers_types_list = [(k,v) for k,v in headers_types.items()] headers_types = headers_types_list if os.path.isfile(full_filename): return CSVModel.load(full_filename) else: return CSVModel(full_filename, headers_types, default_entry=default_entry)
python
def get_or_create(full_filename, headers_types=None, default_entry=''): """Load a .csv file into a CSVModel if the file exists, or create a new CSVModel with the given filename if the file does not exist. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. headers_types : :obj:`list` of :obj:`tuple` of :obj:`str`, :obj:`str` A list of tuples, where the first element in each tuple is the string header for a column and the second element is that column's data type as a string. default_entry : :obj:`str` The default entry for cells in the CSV. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file, or a new CSVModel tied to the filename if the file doesn't currently exist. """ # convert dictionaries to list if isinstance(headers_types, dict): headers_types_list = [(k,v) for k,v in headers_types.items()] headers_types = headers_types_list if os.path.isfile(full_filename): return CSVModel.load(full_filename) else: return CSVModel(full_filename, headers_types, default_entry=default_entry)
[ "def", "get_or_create", "(", "full_filename", ",", "headers_types", "=", "None", ",", "default_entry", "=", "''", ")", ":", "# convert dictionaries to list", "if", "isinstance", "(", "headers_types", ",", "dict", ")", ":", "headers_types_list", "=", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "headers_types", ".", "items", "(", ")", "]", "headers_types", "=", "headers_types_list", "if", "os", ".", "path", ".", "isfile", "(", "full_filename", ")", ":", "return", "CSVModel", ".", "load", "(", "full_filename", ")", "else", ":", "return", "CSVModel", "(", "full_filename", ",", "headers_types", ",", "default_entry", "=", "default_entry", ")" ]
Load a .csv file into a CSVModel if the file exists, or create a new CSVModel with the given filename if the file does not exist. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. headers_types : :obj:`list` of :obj:`tuple` of :obj:`str`, :obj:`str` A list of tuples, where the first element in each tuple is the string header for a column and the second element is that column's data type as a string. default_entry : :obj:`str` The default entry for cells in the CSV. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file, or a new CSVModel tied to the filename if the file doesn't currently exist.
[ "Load", "a", ".", "csv", "file", "into", "a", "CSVModel", "if", "the", "file", "exists", "or", "create", "a", "new", "CSVModel", "with", "the", "given", "filename", "if", "the", "file", "does", "not", "exist", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L441-L472
9,689
BerkeleyAutomation/autolab_core
autolab_core/transformations.py
projection_matrix
def projection_matrix(point, normal, direction=None, perspective=None, pseudo=False): """Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. If pseudo is True, perspective projections will preserve relative depth such that Perspective = dot(Orthogonal, PseudoPerspective). >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) True >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> P1 = projection_matrix(point, normal, direction=direct) >>> P2 = projection_matrix(point, normal, perspective=persp) >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> is_same_transform(P2, numpy.dot(P0, P3)) True >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 >>> v0[3] = 1.0 >>> v1 = numpy.dot(P, v0) >>> numpy.allclose(v1[1], v0[1]) True >>> numpy.allclose(v1[0], 3.0-v1[1]) True """ M = numpy.identity(4) point = numpy.array(point[:3], dtype=numpy.float64, copy=False) normal = unit_vector(normal[:3]) if perspective is not None: # perspective projection perspective = numpy.array(perspective[:3], dtype=numpy.float64, copy=False) M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) M[:3, :3] -= numpy.outer(perspective, normal) if pseudo: # preserve relative depth M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) else: M[:3, 3] = numpy.dot(point, normal) * perspective M[3, :3] = -normal M[3, 3] = numpy.dot(perspective, normal) elif direction is not None: # parallel projection direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) scale = numpy.dot(direction, normal) M[:3, :3] -= numpy.outer(direction, normal) / scale M[:3, 3] = direction * (numpy.dot(point, normal) / scale) else: # orthogonal projection M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * normal return M
python
def projection_matrix(point, normal, direction=None, perspective=None, pseudo=False): """Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. If pseudo is True, perspective projections will preserve relative depth such that Perspective = dot(Orthogonal, PseudoPerspective). >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) True >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> P1 = projection_matrix(point, normal, direction=direct) >>> P2 = projection_matrix(point, normal, perspective=persp) >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> is_same_transform(P2, numpy.dot(P0, P3)) True >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 >>> v0[3] = 1.0 >>> v1 = numpy.dot(P, v0) >>> numpy.allclose(v1[1], v0[1]) True >>> numpy.allclose(v1[0], 3.0-v1[1]) True """ M = numpy.identity(4) point = numpy.array(point[:3], dtype=numpy.float64, copy=False) normal = unit_vector(normal[:3]) if perspective is not None: # perspective projection perspective = numpy.array(perspective[:3], dtype=numpy.float64, copy=False) M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal) M[:3, :3] -= numpy.outer(perspective, normal) if pseudo: # preserve relative depth M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * (perspective+normal) else: M[:3, 3] = numpy.dot(point, normal) * perspective M[3, :3] = -normal M[3, 3] = numpy.dot(perspective, normal) elif direction is not None: # parallel projection direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) scale = numpy.dot(direction, normal) M[:3, :3] -= numpy.outer(direction, normal) / scale M[:3, 3] = direction * (numpy.dot(point, normal) / scale) else: # orthogonal projection M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * normal return M
[ "def", "projection_matrix", "(", "point", ",", "normal", ",", "direction", "=", "None", ",", "perspective", "=", "None", ",", "pseudo", "=", "False", ")", ":", "M", "=", "numpy", ".", "identity", "(", "4", ")", "point", "=", "numpy", ".", "array", "(", "point", "[", ":", "3", "]", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "normal", "=", "unit_vector", "(", "normal", "[", ":", "3", "]", ")", "if", "perspective", "is", "not", "None", ":", "# perspective projection", "perspective", "=", "numpy", ".", "array", "(", "perspective", "[", ":", "3", "]", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "M", "[", "0", ",", "0", "]", "=", "M", "[", "1", ",", "1", "]", "=", "M", "[", "2", ",", "2", "]", "=", "numpy", ".", "dot", "(", "perspective", "-", "point", ",", "normal", ")", "M", "[", ":", "3", ",", ":", "3", "]", "-=", "numpy", ".", "outer", "(", "perspective", ",", "normal", ")", "if", "pseudo", ":", "# preserve relative depth", "M", "[", ":", "3", ",", ":", "3", "]", "-=", "numpy", ".", "outer", "(", "normal", ",", "normal", ")", "M", "[", ":", "3", ",", "3", "]", "=", "numpy", ".", "dot", "(", "point", ",", "normal", ")", "*", "(", "perspective", "+", "normal", ")", "else", ":", "M", "[", ":", "3", ",", "3", "]", "=", "numpy", ".", "dot", "(", "point", ",", "normal", ")", "*", "perspective", "M", "[", "3", ",", ":", "3", "]", "=", "-", "normal", "M", "[", "3", ",", "3", "]", "=", "numpy", ".", "dot", "(", "perspective", ",", "normal", ")", "elif", "direction", "is", "not", "None", ":", "# parallel projection", "direction", "=", "numpy", ".", "array", "(", "direction", "[", ":", "3", "]", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "scale", "=", "numpy", ".", "dot", "(", "direction", ",", "normal", ")", "M", "[", ":", "3", ",", ":", "3", "]", "-=", "numpy", ".", "outer", "(", "direction", ",", "normal", ")", "/", "scale", "M", "[", ":", "3", ",", "3", "]", "=", "direction", "*", "(", "numpy", ".", "dot", "(", "point", ",", "normal", ")", "/", "scale", ")", "else", ":", "# orthogonal projection", "M", "[", ":", "3", ",", ":", "3", "]", "-=", "numpy", ".", "outer", "(", "normal", ",", "normal", ")", "M", "[", ":", "3", ",", "3", "]", "=", "numpy", ".", "dot", "(", "point", ",", "normal", ")", "*", "normal", "return", "M" ]
Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. If pseudo is True, perspective projections will preserve relative depth such that Perspective = dot(Orthogonal, PseudoPerspective). >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) True >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> P1 = projection_matrix(point, normal, direction=direct) >>> P2 = projection_matrix(point, normal, perspective=persp) >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> is_same_transform(P2, numpy.dot(P0, P3)) True >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0 >>> v0[3] = 1.0 >>> v1 = numpy.dot(P, v0) >>> numpy.allclose(v1[1], v0[1]) True >>> numpy.allclose(v1[0], 3.0-v1[1]) True
[ "Return", "matrix", "to", "project", "onto", "plane", "defined", "by", "point", "and", "normal", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/transformations.py#L437-L496
9,690
BerkeleyAutomation/autolab_core
autolab_core/transformations.py
projection_from_matrix
def projection_from_matrix(matrix, pseudo=False): """Return projection plane and perspective point from projection matrix. Return values are same as arguments for projection_matrix function: point, normal, direction, perspective, and pseudo. >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, direct) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) >>> result = projection_from_matrix(P0, pseudo=False) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> result = projection_from_matrix(P0, pseudo=True) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True """ M = numpy.array(matrix, dtype=numpy.float64, copy=False) M33 = M[:3, :3] l, V = numpy.linalg.eig(M) i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] if not pseudo and len(i): # point: any eigenvector corresponding to eigenvalue 1 point = numpy.real(V[:, i[-1]]).squeeze() point /= point[3] # direction: unit eigenvector corresponding to eigenvalue 0 l, V = numpy.linalg.eig(M33) i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] if not len(i): raise ValueError("no eigenvector corresponding to eigenvalue 0") direction = numpy.real(V[:, i[0]]).squeeze() direction /= vector_norm(direction) # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 l, V = numpy.linalg.eig(M33.T) i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] if len(i): # parallel projection normal = numpy.real(V[:, i[0]]).squeeze() normal /= vector_norm(normal) return point, normal, direction, None, False else: # orthogonal projection, where normal equals direction vector return point, direction, None, None, False else: # perspective projection i = numpy.where(abs(numpy.real(l)) > 1e-8)[0] if not len(i): raise ValueError( "no eigenvector not corresponding to eigenvalue 0") point = numpy.real(V[:, i[-1]]).squeeze() point /= point[3] normal = - M[3, :3] perspective = M[:3, 3] / numpy.dot(point[:3], normal) if pseudo: perspective -= normal return point, normal, None, perspective, pseudo
python
def projection_from_matrix(matrix, pseudo=False): """Return projection plane and perspective point from projection matrix. Return values are same as arguments for projection_matrix function: point, normal, direction, perspective, and pseudo. >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, direct) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) >>> result = projection_from_matrix(P0, pseudo=False) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> result = projection_from_matrix(P0, pseudo=True) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True """ M = numpy.array(matrix, dtype=numpy.float64, copy=False) M33 = M[:3, :3] l, V = numpy.linalg.eig(M) i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0] if not pseudo and len(i): # point: any eigenvector corresponding to eigenvalue 1 point = numpy.real(V[:, i[-1]]).squeeze() point /= point[3] # direction: unit eigenvector corresponding to eigenvalue 0 l, V = numpy.linalg.eig(M33) i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] if not len(i): raise ValueError("no eigenvector corresponding to eigenvalue 0") direction = numpy.real(V[:, i[0]]).squeeze() direction /= vector_norm(direction) # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 l, V = numpy.linalg.eig(M33.T) i = numpy.where(abs(numpy.real(l)) < 1e-8)[0] if len(i): # parallel projection normal = numpy.real(V[:, i[0]]).squeeze() normal /= vector_norm(normal) return point, normal, direction, None, False else: # orthogonal projection, where normal equals direction vector return point, direction, None, None, False else: # perspective projection i = numpy.where(abs(numpy.real(l)) > 1e-8)[0] if not len(i): raise ValueError( "no eigenvector not corresponding to eigenvalue 0") point = numpy.real(V[:, i[-1]]).squeeze() point /= point[3] normal = - M[3, :3] perspective = M[:3, 3] / numpy.dot(point[:3], normal) if pseudo: perspective -= normal return point, normal, None, perspective, pseudo
[ "def", "projection_from_matrix", "(", "matrix", ",", "pseudo", "=", "False", ")", ":", "M", "=", "numpy", ".", "array", "(", "matrix", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "M33", "=", "M", "[", ":", "3", ",", ":", "3", "]", "l", ",", "V", "=", "numpy", ".", "linalg", ".", "eig", "(", "M", ")", "i", "=", "numpy", ".", "where", "(", "abs", "(", "numpy", ".", "real", "(", "l", ")", "-", "1.0", ")", "<", "1e-8", ")", "[", "0", "]", "if", "not", "pseudo", "and", "len", "(", "i", ")", ":", "# point: any eigenvector corresponding to eigenvalue 1", "point", "=", "numpy", ".", "real", "(", "V", "[", ":", ",", "i", "[", "-", "1", "]", "]", ")", ".", "squeeze", "(", ")", "point", "/=", "point", "[", "3", "]", "# direction: unit eigenvector corresponding to eigenvalue 0", "l", ",", "V", "=", "numpy", ".", "linalg", ".", "eig", "(", "M33", ")", "i", "=", "numpy", ".", "where", "(", "abs", "(", "numpy", ".", "real", "(", "l", ")", ")", "<", "1e-8", ")", "[", "0", "]", "if", "not", "len", "(", "i", ")", ":", "raise", "ValueError", "(", "\"no eigenvector corresponding to eigenvalue 0\"", ")", "direction", "=", "numpy", ".", "real", "(", "V", "[", ":", ",", "i", "[", "0", "]", "]", ")", ".", "squeeze", "(", ")", "direction", "/=", "vector_norm", "(", "direction", ")", "# normal: unit eigenvector of M33.T corresponding to eigenvalue 0", "l", ",", "V", "=", "numpy", ".", "linalg", ".", "eig", "(", "M33", ".", "T", ")", "i", "=", "numpy", ".", "where", "(", "abs", "(", "numpy", ".", "real", "(", "l", ")", ")", "<", "1e-8", ")", "[", "0", "]", "if", "len", "(", "i", ")", ":", "# parallel projection", "normal", "=", "numpy", ".", "real", "(", "V", "[", ":", ",", "i", "[", "0", "]", "]", ")", ".", "squeeze", "(", ")", "normal", "/=", "vector_norm", "(", "normal", ")", "return", "point", ",", "normal", ",", "direction", ",", "None", ",", "False", "else", ":", "# orthogonal projection, where normal equals direction vector", "return", "point", ",", "direction", ",", "None", ",", "None", ",", "False", "else", ":", "# perspective projection", "i", "=", "numpy", ".", "where", "(", "abs", "(", "numpy", ".", "real", "(", "l", ")", ")", ">", "1e-8", ")", "[", "0", "]", "if", "not", "len", "(", "i", ")", ":", "raise", "ValueError", "(", "\"no eigenvector not corresponding to eigenvalue 0\"", ")", "point", "=", "numpy", ".", "real", "(", "V", "[", ":", ",", "i", "[", "-", "1", "]", "]", ")", ".", "squeeze", "(", ")", "point", "/=", "point", "[", "3", "]", "normal", "=", "-", "M", "[", "3", ",", ":", "3", "]", "perspective", "=", "M", "[", ":", "3", ",", "3", "]", "/", "numpy", ".", "dot", "(", "point", "[", ":", "3", "]", ",", "normal", ")", "if", "pseudo", ":", "perspective", "-=", "normal", "return", "point", ",", "normal", ",", "None", ",", "perspective", ",", "pseudo" ]
Return projection plane and perspective point from projection matrix. Return values are same as arguments for projection_matrix function: point, normal, direction, perspective, and pseudo. >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, direct) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) >>> result = projection_from_matrix(P0, pseudo=False) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> result = projection_from_matrix(P0, pseudo=True) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True
[ "Return", "projection", "plane", "and", "perspective", "point", "from", "projection", "matrix", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/transformations.py#L499-L569
9,691
BerkeleyAutomation/autolab_core
autolab_core/transformations.py
unit_vector
def unit_vector(data, axis=None, out=None): """Return ndarray normalized by length, i.e. eucledian norm, along axis. >>> v0 = numpy.random.random(3) >>> v1 = unit_vector(v0) >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) True >>> v0 = numpy.random.rand(5, 4, 3) >>> v1 = unit_vector(v0, axis=-1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) >>> numpy.allclose(v1, v2) True >>> v1 = unit_vector(v0, axis=1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) >>> numpy.allclose(v1, v2) True >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) >>> unit_vector(v0, axis=1, out=v1) >>> numpy.allclose(v1, v2) True >>> list(unit_vector([])) [] >>> list(unit_vector([1.0])) [1.0] """ if out is None: data = numpy.array(data, dtype=numpy.float64, copy=True) if data.ndim == 1: data /= math.sqrt(numpy.dot(data, data)) return data else: if out is not data: out[:] = numpy.array(data, copy=False) data = out length = numpy.atleast_1d(numpy.sum(data*data, axis)) numpy.sqrt(length, length) if axis is not None: length = numpy.expand_dims(length, axis) data /= length if out is None: return data
python
def unit_vector(data, axis=None, out=None): """Return ndarray normalized by length, i.e. eucledian norm, along axis. >>> v0 = numpy.random.random(3) >>> v1 = unit_vector(v0) >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) True >>> v0 = numpy.random.rand(5, 4, 3) >>> v1 = unit_vector(v0, axis=-1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) >>> numpy.allclose(v1, v2) True >>> v1 = unit_vector(v0, axis=1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) >>> numpy.allclose(v1, v2) True >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) >>> unit_vector(v0, axis=1, out=v1) >>> numpy.allclose(v1, v2) True >>> list(unit_vector([])) [] >>> list(unit_vector([1.0])) [1.0] """ if out is None: data = numpy.array(data, dtype=numpy.float64, copy=True) if data.ndim == 1: data /= math.sqrt(numpy.dot(data, data)) return data else: if out is not data: out[:] = numpy.array(data, copy=False) data = out length = numpy.atleast_1d(numpy.sum(data*data, axis)) numpy.sqrt(length, length) if axis is not None: length = numpy.expand_dims(length, axis) data /= length if out is None: return data
[ "def", "unit_vector", "(", "data", ",", "axis", "=", "None", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "data", "=", "numpy", ".", "array", "(", "data", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "True", ")", "if", "data", ".", "ndim", "==", "1", ":", "data", "/=", "math", ".", "sqrt", "(", "numpy", ".", "dot", "(", "data", ",", "data", ")", ")", "return", "data", "else", ":", "if", "out", "is", "not", "data", ":", "out", "[", ":", "]", "=", "numpy", ".", "array", "(", "data", ",", "copy", "=", "False", ")", "data", "=", "out", "length", "=", "numpy", ".", "atleast_1d", "(", "numpy", ".", "sum", "(", "data", "*", "data", ",", "axis", ")", ")", "numpy", ".", "sqrt", "(", "length", ",", "length", ")", "if", "axis", "is", "not", "None", ":", "length", "=", "numpy", ".", "expand_dims", "(", "length", ",", "axis", ")", "data", "/=", "length", "if", "out", "is", "None", ":", "return", "data" ]
Return ndarray normalized by length, i.e. eucledian norm, along axis. >>> v0 = numpy.random.random(3) >>> v1 = unit_vector(v0) >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) True >>> v0 = numpy.random.rand(5, 4, 3) >>> v1 = unit_vector(v0, axis=-1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) >>> numpy.allclose(v1, v2) True >>> v1 = unit_vector(v0, axis=1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) >>> numpy.allclose(v1, v2) True >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) >>> unit_vector(v0, axis=1, out=v1) >>> numpy.allclose(v1, v2) True >>> list(unit_vector([])) [] >>> list(unit_vector([1.0])) [1.0]
[ "Return", "ndarray", "normalized", "by", "length", "i", ".", "e", ".", "eucledian", "norm", "along", "axis", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/transformations.py#L1574-L1615
9,692
BerkeleyAutomation/autolab_core
autolab_core/json_serialization.py
json_numpy_obj_hook
def json_numpy_obj_hook(dct): """Decodes a previously encoded numpy ndarray with proper shape and dtype. Parameters ---------- dct : :obj:`dict` The encoded dictionary. Returns ------- :obj:`numpy.ndarray` The ndarray that `dct` was encoding. """ if isinstance(dct, dict) and '__ndarray__' in dct: data = np.asarray(dct['__ndarray__'], dtype=dct['dtype']) return data.reshape(dct['shape']) return dct
python
def json_numpy_obj_hook(dct): """Decodes a previously encoded numpy ndarray with proper shape and dtype. Parameters ---------- dct : :obj:`dict` The encoded dictionary. Returns ------- :obj:`numpy.ndarray` The ndarray that `dct` was encoding. """ if isinstance(dct, dict) and '__ndarray__' in dct: data = np.asarray(dct['__ndarray__'], dtype=dct['dtype']) return data.reshape(dct['shape']) return dct
[ "def", "json_numpy_obj_hook", "(", "dct", ")", ":", "if", "isinstance", "(", "dct", ",", "dict", ")", "and", "'__ndarray__'", "in", "dct", ":", "data", "=", "np", ".", "asarray", "(", "dct", "[", "'__ndarray__'", "]", ",", "dtype", "=", "dct", "[", "'dtype'", "]", ")", "return", "data", ".", "reshape", "(", "dct", "[", "'shape'", "]", ")", "return", "dct" ]
Decodes a previously encoded numpy ndarray with proper shape and dtype. Parameters ---------- dct : :obj:`dict` The encoded dictionary. Returns ------- :obj:`numpy.ndarray` The ndarray that `dct` was encoding.
[ "Decodes", "a", "previously", "encoded", "numpy", "ndarray", "with", "proper", "shape", "and", "dtype", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/json_serialization.py#L45-L61
9,693
BerkeleyAutomation/autolab_core
autolab_core/json_serialization.py
dump
def dump(*args, **kwargs): """Dump a numpy.ndarray to file stream. This works exactly like the usual `json.dump()` function, but it uses our custom serializer. """ kwargs.update(dict(cls=NumpyEncoder, sort_keys=True, indent=4, separators=(',', ': '))) return _json.dump(*args, **kwargs)
python
def dump(*args, **kwargs): """Dump a numpy.ndarray to file stream. This works exactly like the usual `json.dump()` function, but it uses our custom serializer. """ kwargs.update(dict(cls=NumpyEncoder, sort_keys=True, indent=4, separators=(',', ': '))) return _json.dump(*args, **kwargs)
[ "def", "dump", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "dict", "(", "cls", "=", "NumpyEncoder", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")", "return", "_json", ".", "dump", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Dump a numpy.ndarray to file stream. This works exactly like the usual `json.dump()` function, but it uses our custom serializer.
[ "Dump", "a", "numpy", ".", "ndarray", "to", "file", "stream", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/json_serialization.py#L63-L73
9,694
BerkeleyAutomation/autolab_core
autolab_core/json_serialization.py
load
def load(*args, **kwargs): """Load an numpy.ndarray from a file stream. This works exactly like the usual `json.load()` function, but it uses our custom deserializer. """ kwargs.update(dict(object_hook=json_numpy_obj_hook)) return _json.load(*args, **kwargs)
python
def load(*args, **kwargs): """Load an numpy.ndarray from a file stream. This works exactly like the usual `json.load()` function, but it uses our custom deserializer. """ kwargs.update(dict(object_hook=json_numpy_obj_hook)) return _json.load(*args, **kwargs)
[ "def", "load", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "dict", "(", "object_hook", "=", "json_numpy_obj_hook", ")", ")", "return", "_json", ".", "load", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Load an numpy.ndarray from a file stream. This works exactly like the usual `json.load()` function, but it uses our custom deserializer.
[ "Load", "an", "numpy", ".", "ndarray", "from", "a", "file", "stream", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/json_serialization.py#L75-L82
9,695
BerkeleyAutomation/autolab_core
autolab_core/json_serialization.py
NumpyEncoder.default
def default(self, obj): """Converts an ndarray into a dictionary for efficient serialization. The dict has three keys: - dtype : The datatype of the array as a string. - shape : The shape of the array as a tuple. - __ndarray__ : The data of the array as a list. Parameters ---------- obj : :obj:`numpy.ndarray` The ndarray to encode. Returns ------- :obj:`dict` The dictionary serialization of obj. Raises ------ TypeError If obj isn't an ndarray. """ if isinstance(obj, np.ndarray): return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape) # Let the base class default method raise the TypeError return _json.JSONEncoder(self, obj)
python
def default(self, obj): """Converts an ndarray into a dictionary for efficient serialization. The dict has three keys: - dtype : The datatype of the array as a string. - shape : The shape of the array as a tuple. - __ndarray__ : The data of the array as a list. Parameters ---------- obj : :obj:`numpy.ndarray` The ndarray to encode. Returns ------- :obj:`dict` The dictionary serialization of obj. Raises ------ TypeError If obj isn't an ndarray. """ if isinstance(obj, np.ndarray): return dict(__ndarray__=obj.tolist(), dtype=str(obj.dtype), shape=obj.shape) # Let the base class default method raise the TypeError return _json.JSONEncoder(self, obj)
[ "def", "default", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "np", ".", "ndarray", ")", ":", "return", "dict", "(", "__ndarray__", "=", "obj", ".", "tolist", "(", ")", ",", "dtype", "=", "str", "(", "obj", ".", "dtype", ")", ",", "shape", "=", "obj", ".", "shape", ")", "# Let the base class default method raise the TypeError", "return", "_json", ".", "JSONEncoder", "(", "self", ",", "obj", ")" ]
Converts an ndarray into a dictionary for efficient serialization. The dict has three keys: - dtype : The datatype of the array as a string. - shape : The shape of the array as a tuple. - __ndarray__ : The data of the array as a list. Parameters ---------- obj : :obj:`numpy.ndarray` The ndarray to encode. Returns ------- :obj:`dict` The dictionary serialization of obj. Raises ------ TypeError If obj isn't an ndarray.
[ "Converts", "an", "ndarray", "into", "a", "dictionary", "for", "efficient", "serialization", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/json_serialization.py#L15-L43
9,696
BerkeleyAutomation/autolab_core
autolab_core/random_variables.py
RandomVariable._preallocate_samples
def _preallocate_samples(self): """Preallocate samples for faster adaptive sampling. """ self.prealloc_samples_ = [] for i in range(self.num_prealloc_samples_): self.prealloc_samples_.append(self.sample())
python
def _preallocate_samples(self): """Preallocate samples for faster adaptive sampling. """ self.prealloc_samples_ = [] for i in range(self.num_prealloc_samples_): self.prealloc_samples_.append(self.sample())
[ "def", "_preallocate_samples", "(", "self", ")", ":", "self", ".", "prealloc_samples_", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "num_prealloc_samples_", ")", ":", "self", ".", "prealloc_samples_", ".", "append", "(", "self", ".", "sample", "(", ")", ")" ]
Preallocate samples for faster adaptive sampling.
[ "Preallocate", "samples", "for", "faster", "adaptive", "sampling", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/random_variables.py#L30-L35
9,697
BerkeleyAutomation/autolab_core
autolab_core/random_variables.py
RandomVariable.rvs
def rvs(self, size=1, iteration=1): """Sample the random variable, using the preallocated samples if possible. Parameters ---------- size : int The number of samples to generate. iteration : int The location in the preallocated sample array to start sampling from. Returns ------- :obj:`numpy.ndarray` of float or int The samples of the random variable. If `size == 1`, then the returned value will not be wrapped in an array. """ if self.num_prealloc_samples_ > 0: samples = [] for i in range(size): samples.append(self.prealloc_samples_[(iteration + i) % self.num_prealloc_samples_]) if size == 1: return samples[0] return samples # generate a new sample return self.sample(size=size)
python
def rvs(self, size=1, iteration=1): """Sample the random variable, using the preallocated samples if possible. Parameters ---------- size : int The number of samples to generate. iteration : int The location in the preallocated sample array to start sampling from. Returns ------- :obj:`numpy.ndarray` of float or int The samples of the random variable. If `size == 1`, then the returned value will not be wrapped in an array. """ if self.num_prealloc_samples_ > 0: samples = [] for i in range(size): samples.append(self.prealloc_samples_[(iteration + i) % self.num_prealloc_samples_]) if size == 1: return samples[0] return samples # generate a new sample return self.sample(size=size)
[ "def", "rvs", "(", "self", ",", "size", "=", "1", ",", "iteration", "=", "1", ")", ":", "if", "self", ".", "num_prealloc_samples_", ">", "0", ":", "samples", "=", "[", "]", "for", "i", "in", "range", "(", "size", ")", ":", "samples", ".", "append", "(", "self", ".", "prealloc_samples_", "[", "(", "iteration", "+", "i", ")", "%", "self", ".", "num_prealloc_samples_", "]", ")", "if", "size", "==", "1", ":", "return", "samples", "[", "0", "]", "return", "samples", "# generate a new sample", "return", "self", ".", "sample", "(", "size", "=", "size", ")" ]
Sample the random variable, using the preallocated samples if possible. Parameters ---------- size : int The number of samples to generate. iteration : int The location in the preallocated sample array to start sampling from. Returns ------- :obj:`numpy.ndarray` of float or int The samples of the random variable. If `size == 1`, then the returned value will not be wrapped in an array.
[ "Sample", "the", "random", "variable", "using", "the", "preallocated", "samples", "if", "possible", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/random_variables.py#L54-L81
9,698
BerkeleyAutomation/autolab_core
autolab_core/random_variables.py
IsotropicGaussianRigidTransformRandomVariable.sample
def sample(self, size=1): """ Sample rigid transform random variables. Parameters ---------- size : int number of sample to take Returns ------- :obj:`list` of :obj:`RigidTransform` sampled rigid transformations """ samples = [] for i in range(size): # sample random pose xi = self._r_xi_rv.rvs(size=1) S_xi = skew(xi) R_sample = scipy.linalg.expm(S_xi) t_sample = self._t_rv.rvs(size=1) samples.append(RigidTransform(rotation=R_sample, translation=t_sample, from_frame=self._from_frame, to_frame=self._to_frame)) # not a list if only 1 sample if size == 1 and len(samples) > 0: return samples[0] return samples
python
def sample(self, size=1): """ Sample rigid transform random variables. Parameters ---------- size : int number of sample to take Returns ------- :obj:`list` of :obj:`RigidTransform` sampled rigid transformations """ samples = [] for i in range(size): # sample random pose xi = self._r_xi_rv.rvs(size=1) S_xi = skew(xi) R_sample = scipy.linalg.expm(S_xi) t_sample = self._t_rv.rvs(size=1) samples.append(RigidTransform(rotation=R_sample, translation=t_sample, from_frame=self._from_frame, to_frame=self._to_frame)) # not a list if only 1 sample if size == 1 and len(samples) > 0: return samples[0] return samples
[ "def", "sample", "(", "self", ",", "size", "=", "1", ")", ":", "samples", "=", "[", "]", "for", "i", "in", "range", "(", "size", ")", ":", "# sample random pose", "xi", "=", "self", ".", "_r_xi_rv", ".", "rvs", "(", "size", "=", "1", ")", "S_xi", "=", "skew", "(", "xi", ")", "R_sample", "=", "scipy", ".", "linalg", ".", "expm", "(", "S_xi", ")", "t_sample", "=", "self", ".", "_t_rv", ".", "rvs", "(", "size", "=", "1", ")", "samples", ".", "append", "(", "RigidTransform", "(", "rotation", "=", "R_sample", ",", "translation", "=", "t_sample", ",", "from_frame", "=", "self", ".", "_from_frame", ",", "to_frame", "=", "self", ".", "_to_frame", ")", ")", "# not a list if only 1 sample", "if", "size", "==", "1", "and", "len", "(", "samples", ")", ">", "0", ":", "return", "samples", "[", "0", "]", "return", "samples" ]
Sample rigid transform random variables. Parameters ---------- size : int number of sample to take Returns ------- :obj:`list` of :obj:`RigidTransform` sampled rigid transformations
[ "Sample", "rigid", "transform", "random", "variables", "." ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/random_variables.py#L221-L249
9,699
BerkeleyAutomation/autolab_core
autolab_core/data_stream_recorder.py
DataStreamRecorder._flush
def _flush(self): """ Returns a list of all current data """ if self._recording: raise Exception("Cannot flush data queue while recording!") if self._saving_cache: logging.warn("Flush when using cache means unsaved data will be lost and not returned!") self._cmds_q.put(("reset_data_segment",)) else: data = self._extract_q(0) return data
python
def _flush(self): """ Returns a list of all current data """ if self._recording: raise Exception("Cannot flush data queue while recording!") if self._saving_cache: logging.warn("Flush when using cache means unsaved data will be lost and not returned!") self._cmds_q.put(("reset_data_segment",)) else: data = self._extract_q(0) return data
[ "def", "_flush", "(", "self", ")", ":", "if", "self", ".", "_recording", ":", "raise", "Exception", "(", "\"Cannot flush data queue while recording!\"", ")", "if", "self", ".", "_saving_cache", ":", "logging", ".", "warn", "(", "\"Flush when using cache means unsaved data will be lost and not returned!\"", ")", "self", ".", "_cmds_q", ".", "put", "(", "(", "\"reset_data_segment\"", ",", ")", ")", "else", ":", "data", "=", "self", ".", "_extract_q", "(", "0", ")", "return", "data" ]
Returns a list of all current data
[ "Returns", "a", "list", "of", "all", "current", "data" ]
8f3813f6401972868cc5e3981ba1b4382d4418d5
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/data_stream_recorder.py#L193-L202