idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
53,600
def rotation_from_quaternion ( q_wxyz ) : q_xyzw = np . array ( [ q_wxyz [ 1 ] , q_wxyz [ 2 ] , q_wxyz [ 3 ] , q_wxyz [ 0 ] ] ) R = transformations . quaternion_matrix ( q_xyzw ) [ : 3 , : 3 ] return R
Convert quaternion array to rotation matrix .
53,601
def quaternion_from_axis_angle ( v ) : theta = np . linalg . norm ( v ) if theta > 0 : v = v / np . linalg . norm ( v ) ax , ay , az = v qx = ax * np . sin ( 0.5 * theta ) qy = ay * np . sin ( 0.5 * theta ) qz = az * np . sin ( 0.5 * theta ) qw = np . cos ( 0.5 * theta ) q = np . array ( [ qw , qx , qy , qz ] ) return q
Convert axis - angle representation to a quaternion vector .
53,602
def transform_from_dual_quaternion ( dq , from_frame = 'unassigned' , to_frame = 'world' ) : quaternion = dq . qr translation = 2 * dq . qd [ 1 : ] return RigidTransform ( rotation = quaternion , translation = translation , from_frame = from_frame , to_frame = to_frame )
Create a RigidTransform from a DualQuaternion .
53,603
def rotation_and_translation_from_matrix ( matrix ) : if not isinstance ( matrix , np . ndarray ) or matrix . shape [ 0 ] != 4 or matrix . shape [ 1 ] != 4 : raise ValueError ( 'Matrix must be specified as a 4x4 ndarray' ) rotation = matrix [ : 3 , : 3 ] translation = matrix [ : 3 , 3 ] return rotation , translation
Helper to convert 4x4 matrix to rotation matrix and translation vector .
53,604
def rotation_from_axis_and_origin ( axis , origin , angle , to_frame = 'world' ) : axis_hat = np . array ( [ [ 0 , - axis [ 2 ] , axis [ 1 ] ] , [ axis [ 2 ] , 0 , - axis [ 0 ] ] , [ - axis [ 1 ] , axis [ 0 ] , 0 ] ] ) R = RigidTransform ( np . eye ( 3 ) + np . sin ( angle ) * axis_hat + ( 1 - np . cos ( angle ) ) * axis_hat . dot ( axis_hat ) , from_frame = to_frame , to_frame = to_frame ) return RigidTransform ( translation = origin , from_frame = to_frame , to_frame = to_frame ) . dot ( R ) . dot ( RigidTransform ( translation = - origin , from_frame = to_frame , to_frame = to_frame ) )
Returns a rotation matrix around some arbitrary axis about the point origin using Rodrigues Formula
53,605
def x_axis_rotation ( theta ) : R = np . array ( [ [ 1 , 0 , 0 , ] , [ 0 , np . cos ( theta ) , - np . sin ( theta ) ] , [ 0 , np . sin ( theta ) , np . cos ( theta ) ] ] ) return R
Generates a 3x3 rotation matrix for a rotation of angle theta about the x axis .
53,606
def y_axis_rotation ( theta ) : R = np . array ( [ [ np . cos ( theta ) , 0 , np . sin ( theta ) ] , [ 0 , 1 , 0 ] , [ - np . sin ( theta ) , 0 , np . cos ( theta ) ] ] ) return R
Generates a 3x3 rotation matrix for a rotation of angle theta about the y axis .
53,607
def z_axis_rotation ( theta ) : R = np . array ( [ [ np . cos ( theta ) , - np . sin ( theta ) , 0 ] , [ np . sin ( theta ) , np . cos ( theta ) , 0 ] , [ 0 , 0 , 1 ] ] ) return R
Generates a 3x3 rotation matrix for a rotation of angle theta about the z axis .
53,608
def random_rotation ( ) : rand_seed = np . random . rand ( 3 , 3 ) U , S , V = np . linalg . svd ( rand_seed ) return U
Generates a random 3x3 rotation matrix with SVD .
53,609
def rotation_from_axes ( x_axis , y_axis , z_axis ) : return np . hstack ( ( x_axis [ : , np . newaxis ] , y_axis [ : , np . newaxis ] , z_axis [ : , np . newaxis ] ) )
Convert specification of axis in target frame to a rotation matrix from source to target frame .
53,610
def interpolate ( T0 , T1 , t ) : if T0 . to_frame != T1 . to_frame : raise ValueError ( 'Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}' . format ( T0 . to_frame , T1 . to_frame ) ) dq0 = T0 . dual_quaternion dq1 = T1 . dual_quaternion dqt = DualQuaternion . interpolate ( dq0 , dq1 , t ) from_frame = "{0}_{1}_{2}" . format ( T0 . from_frame , T1 . from_frame , t ) return RigidTransform . transform_from_dual_quaternion ( dqt , from_frame , T0 . to_frame )
Return an interpolation of two RigidTransforms .
53,611
def load ( filename ) : file_root , file_ext = os . path . splitext ( filename ) if file_ext . lower ( ) != TF_EXTENSION : raise ValueError ( 'Extension %s not supported for RigidTransform. Can only load extension %s' % ( file_ext , TF_EXTENSION ) ) f = open ( filename , 'r' ) lines = list ( f ) from_frame = lines [ 0 ] [ : - 1 ] to_frame = lines [ 1 ] [ : - 1 ] t = np . zeros ( 3 ) t_tokens = lines [ 2 ] [ : - 1 ] . split ( ) t [ 0 ] = float ( t_tokens [ 0 ] ) t [ 1 ] = float ( t_tokens [ 1 ] ) t [ 2 ] = float ( t_tokens [ 2 ] ) R = np . zeros ( [ 3 , 3 ] ) r_tokens = lines [ 3 ] [ : - 1 ] . split ( ) R [ 0 , 0 ] = float ( r_tokens [ 0 ] ) R [ 0 , 1 ] = float ( r_tokens [ 1 ] ) R [ 0 , 2 ] = float ( r_tokens [ 2 ] ) r_tokens = lines [ 4 ] [ : - 1 ] . split ( ) R [ 1 , 0 ] = float ( r_tokens [ 0 ] ) R [ 1 , 1 ] = float ( r_tokens [ 1 ] ) R [ 1 , 2 ] = float ( r_tokens [ 2 ] ) r_tokens = lines [ 5 ] [ : - 1 ] . split ( ) R [ 2 , 0 ] = float ( r_tokens [ 0 ] ) R [ 2 , 1 ] = float ( r_tokens [ 1 ] ) R [ 2 , 2 ] = float ( r_tokens [ 2 ] ) f . close ( ) return RigidTransform ( rotation = R , translation = t , from_frame = from_frame , to_frame = to_frame )
Load a RigidTransform from a file .
53,612
def dot ( self , other_tf ) : if other_tf . to_frame != self . from_frame : raise ValueError ( 'To frame of right hand side ({0}) must match from frame of left hand side ({1})' . format ( other_tf . to_frame , self . from_frame ) ) if not isinstance ( other_tf , RigidTransform ) : raise ValueError ( 'Can only compose with other RigidTransform classes' ) other_scale = 1.0 if isinstance ( other_tf , SimilarityTransform ) : other_scale = other_tf . scale rotation = self . rotation . dot ( other_tf . rotation ) translation = self . translation + self . scale * self . rotation . dot ( other_tf . translation ) scale = self . scale * other_scale return SimilarityTransform ( rotation , translation , scale , from_frame = other_tf . from_frame , to_frame = self . to_frame )
Compose this simliarity transform with another .
53,613
def inverse ( self ) : inv_rot = np . linalg . inv ( self . rotation ) inv_scale = 1.0 / self . scale inv_trans = - inv_scale * inv_rot . dot ( self . translation ) return SimilarityTransform ( inv_rot , inv_trans , inv_scale , from_frame = self . _to_frame , to_frame = self . _from_frame )
Take the inverse of the similarity transform .
53,614
def save ( self , filename ) : file_root , file_ext = os . path . splitext ( filename ) if file_ext == '.npy' : np . save ( filename , self . _data ) elif file_ext == '.npz' : np . savez_compressed ( filename , self . _data ) else : raise ValueError ( 'Extension %s not supported for point saves.' % ( file_ext ) )
Saves the collection to a file .
53,615
def load_data ( filename ) : file_root , file_ext = os . path . splitext ( filename ) data = None if file_ext == '.npy' : data = np . load ( filename ) elif file_ext == '.npz' : data = np . load ( filename ) [ 'arr_0' ] else : raise ValueError ( 'Extension %s not supported for point reads' % ( file_ext ) ) return data
Loads data from a file .
53,616
def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return Point ( data , frame )
Create a Point from data saved in a file .
53,617
def _check_valid_data ( self , data ) : if len ( data . shape ) == 2 and data . shape [ 1 ] != 1 : raise ValueError ( 'Can only initialize Direction from a single Nx1 array' ) if np . abs ( np . linalg . norm ( data ) - 1.0 ) > 1e-4 : raise ValueError ( 'Direction data must have norm=1.0' )
Checks that the incoming data is a Nx1 ndarray .
53,618
def orthogonal_basis ( self ) : if self . dim == 3 : x_arr = np . array ( [ - self . data [ 1 ] , self . data [ 0 ] , 0 ] ) if np . linalg . norm ( x_arr ) == 0 : x_arr = np . array ( [ self . data [ 2 ] , 0 , 0 ] ) x_arr = x_arr / np . linalg . norm ( x_arr ) y_arr = np . cross ( self . data , x_arr ) return Direction ( x_arr , frame = self . frame ) , Direction ( y_arr , frame = self . frame ) raise NotImplementedError ( 'Orthogonal basis only supported for 3 dimensions' )
Return an orthogonal basis to this direction .
53,619
def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return Direction ( data , frame )
Create a Direction from data saved in a file .
53,620
def split_points ( self , point_cloud ) : if not isinstance ( point_cloud , PointCloud ) : raise ValueError ( 'Can only split point clouds' ) above_plane = point_cloud . _data - np . tile ( self . _x0 . data , [ 1 , point_cloud . num_points ] ) . T . dot ( self . _n ) > 0 above_plane = point_cloud . z_coords > 0 & above_plane below_plane = point_cloud . _data - np . tile ( self . _x0 . data , [ 1 , point_cloud . num_points ] ) . T . dot ( self . _n ) <= 0 below_plane = point_cloud . z_coords > 0 & below_plane above_data = point_cloud . data [ : , above_plane ] below_data = point_cloud . data [ : , below_plane ] return PointCloud ( above_data , point_cloud . frame ) , PointCloud ( below_data , point_cloud . frame )
Split a point cloud into two along this plane .
53,621
def mean ( self ) : mean_point_data = np . mean ( self . _data , axis = 1 ) return Point ( mean_point_data , self . _frame )
Returns the average point in the cloud .
53,622
def subsample ( self , rate , random = False ) : if type ( rate ) != int and rate < 1 : raise ValueError ( 'Can only subsample with strictly positive integer rate' ) indices = np . arange ( self . num_points ) if random : np . random . shuffle ( indices ) subsample_inds = indices [ : : rate ] subsampled_data = self . _data [ : , subsample_inds ] return PointCloud ( subsampled_data , self . _frame ) , subsample_inds
Returns a subsampled version of the PointCloud .
53,623
def box_mask ( self , box ) : if not isinstance ( box , Box ) : raise ValueError ( 'Must provide Box object' ) if box . frame != self . frame : raise ValueError ( 'Box must be in same frame as PointCloud' ) all_points = self . data . T cond1 = np . all ( box . min_pt <= all_points , axis = 1 ) cond2 = np . all ( all_points <= box . max_pt , axis = 1 ) valid_point_indices = np . where ( np . logical_and ( cond1 , cond2 ) ) [ 0 ] valid_points = all_points [ valid_point_indices ] return PointCloud ( valid_points . T , self . frame ) , valid_point_indices
Return a PointCloud containing only points within the given Box .
53,624
def best_fit_plane ( self ) : X = np . c_ [ self . x_coords , self . y_coords , np . ones ( self . num_points ) ] y = self . z_coords A = X . T . dot ( X ) b = X . T . dot ( y ) w = np . linalg . inv ( A ) . dot ( b ) n = np . array ( [ w [ 0 ] , w [ 1 ] , - 1 ] ) n = n / np . linalg . norm ( n ) n = Direction ( n , self . _frame ) x0 = self . mean ( ) return n , x0
Fits a plane to the point cloud using least squares .
53,625
def remove_zero_points ( self ) : points_of_interest = np . where ( self . z_coords != 0.0 ) [ 0 ] self . _data = self . data [ : , points_of_interest ]
Removes points with a zero in the z - axis .
53,626
def remove_infinite_points ( self ) : points_of_interest = np . where ( np . all ( np . isfinite ( self . data ) , axis = 0 ) ) [ 0 ] self . _data = self . data [ : , points_of_interest ]
Removes infinite points .
53,627
def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return PointCloud ( data , frame )
Create a PointCloud from data saved in a file .
53,628
def subsample ( self , rate ) : if type ( rate ) != int and rate < 1 : raise ValueError ( 'Can only subsample with strictly positive integer rate' ) subsample_inds = np . arange ( self . num_points ) [ : : rate ] subsampled_data = self . _data [ : , subsample_inds ] return NormalCloud ( subsampled_data , self . _frame )
Returns a subsampled version of the NormalCloud .
53,629
def remove_zero_normals ( self ) : points_of_interest = np . where ( np . linalg . norm ( self . _data , axis = 0 ) != 0.0 ) [ 0 ] self . _data = self . _data [ : , points_of_interest ]
Removes normal vectors with a zero magnitude .
53,630
def remove_nan_normals ( self ) : points_of_interest = np . where ( np . isfinite ( np . linalg . norm ( self . _data , axis = 0 ) ) ) [ 0 ] self . _data = self . _data [ : , points_of_interest ]
Removes normal vectors with nan magnitude .
53,631
def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return NormalCloud ( data , frame )
Create a NormalCloud from data saved in a file .
53,632
def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return ImageCoords ( data , frame )
Create an ImageCoords from data saved in a file .
53,633
def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return RgbCloud ( data , frame )
Create a RgbCloud from data saved in a file .
53,634
def remove_zero_points ( self ) : points_of_interest = np . where ( ( np . linalg . norm ( self . point_cloud . data , axis = 0 ) != 0.0 ) & ( np . linalg . norm ( self . normal_cloud . data , axis = 0 ) != 0.0 ) & ( np . isfinite ( self . normal_cloud . data [ 0 , : ] ) ) ) [ 0 ] self . point_cloud . _data = self . point_cloud . data [ : , points_of_interest ] self . normal_cloud . _data = self . normal_cloud . data [ : , points_of_interest ]
Remove all elements where the norms and points are zero .
53,635
def gen_experiment_ref ( experiment_tag , n = 10 ) : experiment_id = gen_experiment_id ( n = n ) return '{0}_{1}' . format ( experiment_tag , experiment_id )
Generate a random string for naming .
53,636
def add ( self , datapoint ) : if not self . is_full : self . set_datapoint ( self . cur_index , datapoint ) self . cur_index += 1
Adds the datapoint to the tensor if room is available .
53,637
def add_batch ( self , datapoints ) : num_datapoints_to_add = datapoints . shape [ 0 ] end_index = self . cur_index + num_datapoints_to_add if end_index <= self . num_datapoints : self . data [ self . cur_index : end_index , ... ] = datapoints self . cur_index = end_index
Adds a batch of datapoints to the tensor if room is available .
53,638
def datapoint ( self , ind ) : if self . height is None : return self . data [ ind ] return self . data [ ind , ... ] . copy ( )
Returns the datapoint at the given index .
53,639
def set_datapoint ( self , ind , datapoint ) : if ind >= self . num_datapoints : raise ValueError ( 'Index %d out of bounds! Tensor has %d datapoints' % ( ind , self . num_datapoints ) ) self . data [ ind , ... ] = np . array ( datapoint ) . astype ( self . dtype )
Sets the value of the datapoint at the given index .
53,640
def data_slice ( self , slice_ind ) : if self . height is None : return self . data [ slice_ind ] return self . data [ slice_ind , ... ]
Returns a slice of datapoints
53,641
def save ( self , filename , compressed = True ) : if not self . has_data : return False _ , file_ext = os . path . splitext ( filename ) if compressed : if file_ext != COMPRESSED_TENSOR_EXT : raise ValueError ( 'Can only save compressed tensor with %s extension' % ( COMPRESSED_TENSOR_EXT ) ) np . savez_compressed ( filename , self . data [ : self . cur_index , ... ] ) else : if file_ext != TENSOR_EXT : raise ValueError ( 'Can only save tensor with .npy extension' ) np . save ( filename , self . data [ : self . cur_index , ... ] ) return True
Save a tensor to disk .
53,642
def load ( filename , compressed = True , prealloc = None ) : _ , file_ext = os . path . splitext ( filename ) if compressed : if file_ext != COMPRESSED_TENSOR_EXT : raise ValueError ( 'Can only load compressed tensor with %s extension' % ( COMPRESSED_TENSOR_EXT ) ) data = np . load ( filename ) [ 'arr_0' ] else : if file_ext != TENSOR_EXT : raise ValueError ( 'Can only load tensor with .npy extension' ) data = np . load ( filename ) if prealloc is not None : prealloc . reset ( ) prealloc . add_batch ( data ) return prealloc tensor = Tensor ( data . shape , data . dtype , data = data ) return tensor
Loads a tensor from disk .
53,643
def datapoint_indices_for_tensor ( self , tensor_index ) : if tensor_index >= self . _num_tensors : raise ValueError ( 'Tensor index %d is greater than the number of tensors (%d)' % ( tensor_index , self . _num_tensors ) ) return self . _file_num_to_indices [ tensor_index ]
Returns the indices for all datapoints in the given tensor .
53,644
def tensor_index ( self , datapoint_index ) : if datapoint_index >= self . _num_datapoints : raise ValueError ( 'Datapoint index %d is greater than the number of datapoints (%d)' % ( datapoint_index , self . _num_datapoints ) ) return self . _index_to_file_num [ datapoint_index ]
Returns the index of the tensor containing the referenced datapoint .
53,645
def generate_tensor_filename ( self , field_name , file_num , compressed = True ) : file_ext = TENSOR_EXT if compressed : file_ext = COMPRESSED_TENSOR_EXT filename = os . path . join ( self . filename , 'tensors' , '%s_%05d%s' % ( field_name , file_num , file_ext ) ) return filename
Generate a filename for a tensor .
53,646
def _allocate_tensors ( self ) : self . _tensors = { } for field_name , field_spec in self . _config [ 'fields' ] . items ( ) : field_dtype = np . dtype ( field_spec [ 'dtype' ] ) field_shape = [ self . _datapoints_per_file ] if 'height' in field_spec . keys ( ) : field_shape . append ( field_spec [ 'height' ] ) if 'width' in field_spec . keys ( ) : field_shape . append ( field_spec [ 'width' ] ) if 'channels' in field_spec . keys ( ) : field_shape . append ( field_spec [ 'channels' ] ) self . _tensors [ field_name ] = Tensor ( field_shape , field_dtype )
Allocates the tensors in the dataset .
53,647
def add ( self , datapoint ) : if self . _access_mode == READ_ONLY_ACCESS : raise ValueError ( 'Cannot add datapoints with read-only access' ) tensor_ind = self . _num_datapoints // self . _datapoints_per_file for field_name in datapoint . keys ( ) : if field_name not in self . field_names : raise ValueError ( 'Field %s not specified in dataset' % ( field_name ) ) cur_num_tensors = self . _num_tensors new_num_tensors = cur_num_tensors for field_name in self . field_names : if tensor_ind < cur_num_tensors : self . _tensors [ field_name ] = self . tensor ( field_name , tensor_ind ) else : self . _tensors [ field_name ] . reset ( ) self . _tensor_cache_file_num [ field_name ] = tensor_ind new_num_tensors = cur_num_tensors + 1 self . _has_unsaved_data = True self . _tensors [ field_name ] . add ( datapoint [ field_name ] ) cur_size = self . _tensors [ field_name ] . size if new_num_tensors > cur_num_tensors : self . _num_tensors = new_num_tensors self . _index_to_file_num [ self . _num_datapoints ] = tensor_ind self . _file_num_to_indices [ tensor_ind ] = tensor_ind * self . _datapoints_per_file + np . arange ( cur_size ) field_name = self . field_names [ 0 ] if self . _tensors [ field_name ] . is_full : logging . info ( 'Dataset %s: Writing tensor %d to disk' % ( self . filename , tensor_ind ) ) self . write ( ) self . _num_datapoints += 1
Adds a datapoint to the file .
53,648
def datapoint ( self , ind , field_names = None ) : if self . _has_unsaved_data : self . flush ( ) if ind >= self . _num_datapoints : raise ValueError ( 'Index %d larger than the number of datapoints in the dataset (%d)' % ( ind , self . _num_datapoints ) ) if field_names is None : field_names = self . field_names datapoint = TensorDatapoint ( field_names ) file_num = self . _index_to_file_num [ ind ] for field_name in field_names : tensor = self . tensor ( field_name , file_num ) tensor_index = ind % self . _datapoints_per_file datapoint [ field_name ] = tensor . datapoint ( tensor_index ) return datapoint
Loads a tensor datapoint for a given global index .
53,649
def tensor ( self , field_name , tensor_ind ) : if tensor_ind == self . _tensor_cache_file_num [ field_name ] : return self . _tensors [ field_name ] filename = self . generate_tensor_filename ( field_name , tensor_ind , compressed = True ) Tensor . load ( filename , compressed = True , prealloc = self . _tensors [ field_name ] ) self . _tensor_cache_file_num [ field_name ] = tensor_ind return self . _tensors [ field_name ]
Returns the tensor for a given field and tensor index .
53,650
def delete_last ( self , num_to_delete = 1 ) : if self . _access_mode == READ_ONLY_ACCESS : raise ValueError ( 'Cannot delete datapoints with read-only access' ) if num_to_delete > self . _num_datapoints : raise ValueError ( 'Cannot remove more than the number of datapoints in the dataset' ) last_datapoint_ind = self . _num_datapoints - 1 last_tensor_ind = last_datapoint_ind // self . _datapoints_per_file new_last_datapoint_ind = self . _num_datapoints - 1 - num_to_delete new_num_datapoints = new_last_datapoint_ind + 1 new_last_datapoint_ind = max ( new_last_datapoint_ind , 0 ) new_last_tensor_ind = new_last_datapoint_ind // self . _datapoints_per_file delete_tensor_ind = range ( new_last_tensor_ind + 1 , last_tensor_ind + 1 ) for tensor_ind in delete_tensor_ind : for field_name in self . field_names : filename = self . generate_tensor_filename ( field_name , tensor_ind ) os . remove ( filename ) dataset_empty = False target_tensor_size = new_num_datapoints % self . _datapoints_per_file if target_tensor_size == 0 : if new_num_datapoints > 0 : target_tensor_size = self . _datapoints_per_file else : dataset_empty = True for field_name in self . field_names : new_last_tensor = self . tensor ( field_name , new_last_tensor_ind ) while new_last_tensor . size > target_tensor_size : new_last_tensor . delete_last ( ) filename = self . generate_tensor_filename ( field_name , new_last_tensor_ind ) new_last_tensor . save ( filename , compressed = True ) if not new_last_tensor . has_data : os . remove ( filename ) new_last_tensor . reset ( ) if self . _num_datapoints - 1 - num_to_delete >= 0 : self . _num_datapoints = new_num_datapoints else : self . _num_datapoints = 0 self . _num_tensors = new_last_tensor_ind + 1 if dataset_empty : self . _num_tensors = 0
Deletes the last N datapoints from the dataset .
53,651
def write ( self ) : for field_name in self . field_names : filename = self . generate_tensor_filename ( field_name , self . _num_tensors - 1 ) self . _tensors [ field_name ] . save ( filename , compressed = True ) json . dump ( self . _metadata , open ( self . metadata_filename , 'w' ) , indent = JSON_INDENT , sort_keys = True ) self . _has_unsaved_data = False
Writes all tensors to the next file number .
53,652
def open ( dataset_dir , access_mode = READ_ONLY_ACCESS ) : if access_mode == WRITE_ACCESS : raise ValueError ( 'Cannot open a dataset with write-only access' ) try : config_filename = os . path . join ( dataset_dir , 'config.json' ) config = json . load ( open ( config_filename , 'r' ) ) except : config_filename = os . path . join ( dataset_dir , 'config.yaml' ) config = YamlConfig ( config_filename ) dataset = TensorDataset ( dataset_dir , config , access_mode = access_mode ) return dataset
Opens a tensor dataset .
53,653
def split ( self , split_name ) : if not self . has_split ( split_name ) : raise ValueError ( 'Split %s does not exist!' % ( split_name ) ) metadata_filename = self . split_metadata_filename ( split_name ) train_filename = self . train_indices_filename ( split_name ) val_filename = self . val_indices_filename ( split_name ) metadata = json . load ( open ( metadata_filename , 'r' ) ) train_indices = np . load ( train_filename ) [ 'arr_0' ] val_indices = np . load ( val_filename ) [ 'arr_0' ] return train_indices , val_indices , metadata
Return the training and validation indices for the requested split .
53,654
def delete_split ( self , split_name ) : if self . has_split ( split_name ) : shutil . rmtree ( os . path . join ( self . split_dir , split_name ) )
Delete a split of the dataset .
53,655
def _load_config ( self , filename ) : fh = open ( filename , 'r' ) self . file_contents = fh . read ( ) config_dir = os . path . split ( filename ) [ 0 ] include_re = re . compile ( '^(.*)!include\s+(.*)$' , re . MULTILINE ) def recursive_load ( matchobj , path ) : first_spacing = matchobj . group ( 1 ) other_spacing = first_spacing . replace ( '-' , ' ' ) fname = os . path . join ( path , matchobj . group ( 2 ) ) new_path , _ = os . path . split ( fname ) new_path = os . path . realpath ( new_path ) text = '' with open ( fname ) as f : text = f . read ( ) text = first_spacing + text text = text . replace ( '\n' , '\n{}' . format ( other_spacing ) , text . count ( '\n' ) - 1 ) return re . sub ( include_re , lambda m : recursive_load ( m , new_path ) , text ) self . file_contents = re . sub ( include_re , lambda m : recursive_load ( m , config_dir ) , self . file_contents ) self . config = self . __ordered_load ( self . file_contents ) for k in self . config . keys ( ) : self . config [ k ] = YamlConfig . __convert_key ( self . config [ k ] ) fh . close ( ) return self . config
Loads a yaml configuration file from the given filename .
53,656
def __convert_key ( expression ) : if type ( expression ) is str and len ( expression ) > 2 and expression [ 1 ] == '!' : expression = eval ( expression [ 2 : - 1 ] ) return expression
Converts keys in YAML that reference other keys .
53,657
def make_summary_table ( train_result , val_result , plot = True , save_dir = None , prepend = "" , save = False ) : table_key_list = [ 'error_rate' , 'recall_at_99_precision' , 'average_precision' , 'precision' , 'recall' ] num_fields = len ( table_key_list ) import matplotlib . pyplot as plt ax = plt . subplot ( 111 , frame_on = False ) ax . xaxis . set_visible ( False ) ax . yaxis . set_visible ( False ) data = np . zeros ( [ num_fields , 2 ] ) data_dict = dict ( ) names = [ 'train' , 'validation' ] for name , result in zip ( names , [ train_result , val_result ] ) : data_dict [ name ] = { } data_dict [ name ] [ 'error_rate' ] = result . error_rate data_dict [ name ] [ 'average_precision' ] = result . ap_score * 100 data_dict [ name ] [ 'precision' ] = result . precision * 100 data_dict [ name ] [ 'recall' ] = result . recall * 100 precision_array , recall_array , _ = result . precision_recall_curve ( ) recall_at_99_precision = recall_array [ np . argmax ( precision_array > 0.99 ) ] * 100 data_dict [ name ] [ 'recall_at_99_precision' ] = recall_at_99_precision for i , key in enumerate ( table_key_list ) : data_dict [ name ] [ key ] = float ( "{0:.2f}" . format ( data_dict [ name ] [ key ] ) ) j = names . index ( name ) data [ i , j ] = data_dict [ name ] [ key ] table = plt . table ( cellText = data , rowLabels = table_key_list , colLabels = names ) fig = plt . gcf ( ) fig . subplots_adjust ( bottom = 0.15 ) if plot : plt . show ( ) if save_dir is not None and save : fig_filename = os . path . join ( save_dir , prepend + 'summary.png' ) yaml_filename = os . path . join ( save_dir , prepend + 'summary.yaml' ) yaml . dump ( data_dict , open ( yaml_filename , 'w' ) , default_flow_style = False ) fig . savefig ( fig_filename , bbox_inches = "tight" ) return data_dict , fig
Makes a matplotlib table object with relevant data . Thanks to Lucas Manuelli for the contribution .
53,658
def app_score ( self ) : precisions , pct_pred_pos , taus = self . precision_pct_pred_pos_curve ( interval = False ) app = 0 total = 0 for k in range ( len ( precisions ) - 1 ) : cur_prec = precisions [ k ] cur_pp = pct_pred_pos [ k ] cur_tau = taus [ k ] next_prec = precisions [ k + 1 ] next_pp = pct_pred_pos [ k + 1 ] next_tau = taus [ k + 1 ] mid_prec = ( cur_prec + next_prec ) / 2.0 width_pp = np . abs ( next_pp - cur_pp ) app += mid_prec * width_pp total += width_pp return app
Computes the area under the app curve .
53,659
def accuracy_curve ( self , delta_tau = 0.01 ) : orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values scores = [ ] taus = [ ] tau = 0 for k in range ( len ( sorted_labels ) ) : self . threshold = tau scores . append ( self . accuracy ) taus . append ( tau ) tau = sorted_probs [ k ] tau = 1.0 self . threshold = tau scores . append ( self . accuracy ) taus . append ( tau ) self . threshold = orig_thresh return scores , taus
Computes the relationship between probability threshold and classification accuracy .
53,660
def f1_curve ( self , delta_tau = 0.01 ) : orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values scores = [ ] taus = [ ] tau = 0 for k in range ( len ( sorted_labels ) ) : self . threshold = tau scores . append ( self . f1_score ) taus . append ( tau ) tau = sorted_probs [ k ] tau = 1.0 self . threshold = tau scores . append ( self . f1_score ) taus . append ( tau ) self . threshold = orig_thresh return scores , taus
Computes the relationship between probability threshold and classification F1 score .
53,661
def phi_coef_curve ( self , delta_tau = 0.01 ) : orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values scores = [ ] taus = [ ] tau = 0 for k in range ( len ( sorted_labels ) ) : self . threshold = tau scores . append ( self . phi_coef ) taus . append ( tau ) tau = sorted_probs [ k ] tau = 1.0 self . threshold = tau scores . append ( self . phi_coef ) taus . append ( tau ) self . threshold = orig_thresh return scores , taus
Computes the relationship between probability threshold and classification phi coefficient .
53,662
def precision_pct_pred_pos_curve ( self , interval = False , delta_tau = 0.001 ) : orig_thresh = self . threshold sorted_labels , sorted_probs = self . sorted_values precisions = [ ] pct_pred_pos = [ ] taus = [ ] tau = 0 if not interval : for k in range ( len ( sorted_labels ) ) : self . threshold = tau precisions . append ( self . precision ) pct_pred_pos . append ( self . pct_pred_pos ) taus . append ( tau ) tau = sorted_probs [ k ] else : while tau < 1.0 : self . threshold = tau precisions . append ( self . precision ) pct_pred_pos . append ( self . pct_pred_pos ) taus . append ( tau ) tau += delta_tau tau = 1.0 self . threshold = tau precisions . append ( self . precision ) pct_pred_pos . append ( self . pct_pred_pos ) taus . append ( tau ) precisions . append ( 1.0 ) pct_pred_pos . append ( 0.0 ) taus . append ( 1.0 + 1e-12 ) self . threshold = orig_thresh return precisions , pct_pred_pos , taus
Computes the relationship between precision and the percent of positively classified datapoints .
53,663
def gen_experiment_id ( n = 10 ) : chrs = 'abcdefghijklmnopqrstuvwxyz' inds = np . random . randint ( 0 , len ( chrs ) , size = n ) return '' . join ( [ chrs [ i ] for i in inds ] )
Generate a random string with n characters .
53,664
def histogram ( values , num_bins , bounds , normalized = True , plot = False , color = 'b' ) : hist , bins = np . histogram ( values , bins = num_bins , range = bounds ) width = ( bins [ 1 ] - bins [ 0 ] ) if normalized : if np . sum ( hist ) > 0 : hist = hist . astype ( np . float32 ) / np . sum ( hist ) if plot : import matplotlib . pyplot as plt plt . bar ( bins [ : - 1 ] , hist , width = width , color = color ) return hist , bins
Generate a histogram plot .
53,665
def skew ( xi ) : S = np . array ( [ [ 0 , - xi [ 2 ] , xi [ 1 ] ] , [ xi [ 2 ] , 0 , - xi [ 0 ] ] , [ - xi [ 1 ] , xi [ 0 ] , 0 ] ] ) return S
Return the skew - symmetric matrix that can be used to calculate cross - products with vector xi .
53,666
def deskew ( S ) : x = np . zeros ( 3 ) x [ 0 ] = S [ 2 , 1 ] x [ 1 ] = S [ 0 , 2 ] x [ 2 ] = S [ 1 , 0 ] return x
Converts a skew - symmetric cross - product matrix to its corresponding vector . Only works for 3x3 matrices .
53,667
def reverse_dictionary ( d ) : rev_d = { } [ rev_d . update ( { v : k } ) for k , v in d . items ( ) ] return rev_d
Reverses the key value pairs for a given dictionary .
53,668
def filenames ( directory , tag = '' , sorted = False , recursive = False ) : if recursive : f = [ os . path . join ( directory , f ) for directory , _ , filename in os . walk ( directory ) for f in filename if f . find ( tag ) > - 1 ] else : f = [ os . path . join ( directory , f ) for f in os . listdir ( directory ) if f . find ( tag ) > - 1 ] if sorted : f . sort ( ) return f
Reads in all filenames from a directory that contain a specified substring .
53,669
def sph2cart ( r , az , elev ) : x = r * np . cos ( az ) * np . sin ( elev ) y = r * np . sin ( az ) * np . sin ( elev ) z = r * np . cos ( elev ) return x , y , z
Convert spherical to cartesian coordinates .
53,670
def cart2sph ( x , y , z ) : r = np . sqrt ( x ** 2 + y ** 2 + z ** 2 ) if x > 0 and y > 0 : az = np . arctan ( y / x ) elif x > 0 and y < 0 : az = 2 * np . pi - np . arctan ( - y / x ) elif x < 0 and y > 0 : az = np . pi - np . arctan ( - y / x ) elif x < 0 and y < 0 : az = np . pi + np . arctan ( y / x ) elif x == 0 and y > 0 : az = np . pi / 2 elif x == 0 and y < 0 : az = 3 * np . pi / 2 elif y == 0 and x > 0 : az = 0 elif y == 0 and x < 0 : az = np . pi elev = np . arccos ( z / r ) return r , az , elev
Convert cartesian to spherical coordinates .
53,671
def keyboard_input ( message , yesno = False ) : message += ' ' if yesno : message += '[y/n] ' human_input = input ( message ) if yesno : while human_input . lower ( ) != 'n' and human_input . lower ( ) != 'y' : logging . info ( 'Did not understand input. Please answer \'y\' or \'n\'' ) human_input = input ( message ) return human_input
Get keyboard input from a human optionally reasking for valid yes or no input .
53,672
def interpolate ( dq0 , dq1 , t ) : if not 0 <= t <= 1 : raise ValueError ( "Interpolation step must be between 0 and 1! Got {0}" . format ( t ) ) dqt = dq0 * ( 1 - t ) + dq1 * t return dqt . normalized
Return the interpolation of two DualQuaternions .
53,673
def _save ( self ) : if os . path . isfile ( self . _full_filename ) : shutil . copyfile ( self . _full_filename , self . _full_backup_filename ) with open ( self . _full_filename , 'w' ) as file : writer = csv . DictWriter ( file , fieldnames = self . _headers ) writer . writeheader ( ) for row in self . _table : writer . writerow ( row )
Save the model to a . csv file
53,674
def insert ( self , data ) : row = { key : self . _default_entry for key in self . _headers } row [ '_uid' ] = self . _get_new_uid ( ) for key , val in data . items ( ) : if key in ( '_uid' , '_default' ) : logging . warn ( "Cannot manually set columns _uid or _default of a row! Given data: {0}" . format ( data ) ) continue if not isinstance ( val , CSVModel . _KNOWN_TYPES_MAP [ self . _headers_types [ key ] ] ) : raise Exception ( 'Data type mismatch for column {0}. Expected: {1}, got: {2}' . format ( key , CSVModel . _KNOWN_TYPES_MAP [ self . _headers_types [ key ] ] , type ( val ) ) ) row [ key ] = val self . _table . append ( row ) self . _save ( ) return row [ '_uid' ]
Insert a row into the . csv file .
53,675
def update_by_uid ( self , uid , data ) : row = self . _table [ uid + 1 ] for key , val in data . items ( ) : if key == '_uid' or key == '_default' : continue if key not in self . _headers : logging . warn ( "Unknown column name: {0}" . format ( key ) ) continue if not isinstance ( val , CSVModel . _KNOWN_TYPES_MAP [ self . _headers_types [ key ] ] ) : raise Exception ( 'Data type mismatch for column {0}. Expected: {1}, got: {2}' . format ( key , CSVModel . _KNOWN_TYPES_MAP [ self . _headers_types [ key ] ] , type ( val ) ) ) row [ key ] = val self . _save ( )
Update a row with the given data .
53,676
def get_col ( self , col_name , filter = lambda _ : True ) : if col_name not in self . _headers : raise ValueError ( "{} not found! Model has headers: {}" . format ( col_name , self . _headers ) ) col = [ ] for i in range ( self . num_rows ) : row = self . _table [ i + 1 ] val = row [ col_name ] if filter ( val ) : col . append ( val ) return col
Return all values in the column corresponding to col_name that satisfies filter which is a function that takes in a value of the column s type and returns True or False
53,677
def get_by_cols ( self , cols , direction = 1 ) : if direction == 1 : iterator = range ( self . num_rows ) elif direction == - 1 : iterator = range ( self . num_rows - 1 , - 1 , - 1 ) else : raise ValueError ( "Direction can only be 1 (first) or -1 (last). Got: {0}" . format ( direction ) ) for i in iterator : row = self . _table [ i + 1 ] all_sat = True for key , val in cols . items ( ) : if row [ key ] != val : all_sat = False break if all_sat : return row . copy ( ) return None
Return the first or last row that satisfies the given col value constraints or None if no row contains the given value .
53,678
def get_rows_by_cols ( self , matching_dict ) : result = [ ] for i in range ( self . num_rows ) : row = self . _table [ i + 1 ] matching = True for key , val in matching_dict . items ( ) : if row [ key ] != val : matching = False break if matching : result . append ( row ) return result
Return all rows where the cols match the elements given in the matching_dict
53,679
def next ( self ) : if self . _cur_row >= len ( self . _table ) : raise StopIteration data = self . _table [ self . _cur_row ] . copy ( ) self . _cur_row += 1 return data
Returns the next row in the CSV for iteration
53,680
def load ( full_filename ) : with open ( full_filename , 'r' ) as file : reader = csv . DictReader ( file ) headers = reader . fieldnames if '_uid' not in headers or '_default' not in headers : raise Exception ( "Malformed CSVModel file!" ) all_rows = [ row for row in reader ] types = all_rows [ 0 ] table = [ types ] default_entry = table [ 0 ] [ '_default' ] for i in range ( 1 , len ( all_rows ) ) : raw_row = all_rows [ i ] row = { } for column_name in headers : if raw_row [ column_name ] != default_entry and column_name != '' : if types [ column_name ] == 'bool' : row [ column_name ] = CSVModel . _str_to_bool ( raw_row [ column_name ] ) else : try : row [ column_name ] = CSVModel . _KNOWN_TYPES_MAP [ types [ column_name ] ] ( raw_row [ column_name ] ) except : logging . error ( '{}, {}, {}' . format ( column_name , types [ column_name ] , raw_row [ column_name ] ) ) row [ column_name ] = CSVModel . _KNOWN_TYPES_MAP [ types [ column_name ] ] ( bool ( raw_row [ column_name ] ) ) else : row [ column_name ] = default_entry table . append ( row ) if len ( table ) == 1 : next_valid_uid = 0 else : next_valid_uid = int ( table [ - 1 ] [ '_uid' ] ) + 1 headers_init = headers [ 1 : - 1 ] types_init = [ types [ column_name ] for column_name in headers_init ] headers_types_list = zip ( headers_init , types_init ) csv_model = CSVModel ( full_filename , headers_types_list , default_entry = default_entry ) csv_model . _uid = next_valid_uid csv_model . _table = table csv_model . _save ( ) return csv_model
Load a . csv file into a CSVModel .
53,681
def get_or_create ( full_filename , headers_types = None , default_entry = '' ) : if isinstance ( headers_types , dict ) : headers_types_list = [ ( k , v ) for k , v in headers_types . items ( ) ] headers_types = headers_types_list if os . path . isfile ( full_filename ) : return CSVModel . load ( full_filename ) else : return CSVModel ( full_filename , headers_types , default_entry = default_entry )
Load a . csv file into a CSVModel if the file exists or create a new CSVModel with the given filename if the file does not exist .
53,682
def projection_matrix ( point , normal , direction = None , perspective = None , pseudo = False ) : M = numpy . identity ( 4 ) point = numpy . array ( point [ : 3 ] , dtype = numpy . float64 , copy = False ) normal = unit_vector ( normal [ : 3 ] ) if perspective is not None : perspective = numpy . array ( perspective [ : 3 ] , dtype = numpy . float64 , copy = False ) M [ 0 , 0 ] = M [ 1 , 1 ] = M [ 2 , 2 ] = numpy . dot ( perspective - point , normal ) M [ : 3 , : 3 ] -= numpy . outer ( perspective , normal ) if pseudo : M [ : 3 , : 3 ] -= numpy . outer ( normal , normal ) M [ : 3 , 3 ] = numpy . dot ( point , normal ) * ( perspective + normal ) else : M [ : 3 , 3 ] = numpy . dot ( point , normal ) * perspective M [ 3 , : 3 ] = - normal M [ 3 , 3 ] = numpy . dot ( perspective , normal ) elif direction is not None : direction = numpy . array ( direction [ : 3 ] , dtype = numpy . float64 , copy = False ) scale = numpy . dot ( direction , normal ) M [ : 3 , : 3 ] -= numpy . outer ( direction , normal ) / scale M [ : 3 , 3 ] = direction * ( numpy . dot ( point , normal ) / scale ) else : M [ : 3 , : 3 ] -= numpy . outer ( normal , normal ) M [ : 3 , 3 ] = numpy . dot ( point , normal ) * normal return M
Return matrix to project onto plane defined by point and normal .
53,683
def projection_from_matrix ( matrix , pseudo = False ) : M = numpy . array ( matrix , dtype = numpy . float64 , copy = False ) M33 = M [ : 3 , : 3 ] l , V = numpy . linalg . eig ( M ) i = numpy . where ( abs ( numpy . real ( l ) - 1.0 ) < 1e-8 ) [ 0 ] if not pseudo and len ( i ) : point = numpy . real ( V [ : , i [ - 1 ] ] ) . squeeze ( ) point /= point [ 3 ] l , V = numpy . linalg . eig ( M33 ) i = numpy . where ( abs ( numpy . real ( l ) ) < 1e-8 ) [ 0 ] if not len ( i ) : raise ValueError ( "no eigenvector corresponding to eigenvalue 0" ) direction = numpy . real ( V [ : , i [ 0 ] ] ) . squeeze ( ) direction /= vector_norm ( direction ) l , V = numpy . linalg . eig ( M33 . T ) i = numpy . where ( abs ( numpy . real ( l ) ) < 1e-8 ) [ 0 ] if len ( i ) : normal = numpy . real ( V [ : , i [ 0 ] ] ) . squeeze ( ) normal /= vector_norm ( normal ) return point , normal , direction , None , False else : return point , direction , None , None , False else : i = numpy . where ( abs ( numpy . real ( l ) ) > 1e-8 ) [ 0 ] if not len ( i ) : raise ValueError ( "no eigenvector not corresponding to eigenvalue 0" ) point = numpy . real ( V [ : , i [ - 1 ] ] ) . squeeze ( ) point /= point [ 3 ] normal = - M [ 3 , : 3 ] perspective = M [ : 3 , 3 ] / numpy . dot ( point [ : 3 ] , normal ) if pseudo : perspective -= normal return point , normal , None , perspective , pseudo
Return projection plane and perspective point from projection matrix .
53,684
def unit_vector ( data , axis = None , out = None ) : if out is None : data = numpy . array ( data , dtype = numpy . float64 , copy = True ) if data . ndim == 1 : data /= math . sqrt ( numpy . dot ( data , data ) ) return data else : if out is not data : out [ : ] = numpy . array ( data , copy = False ) data = out length = numpy . atleast_1d ( numpy . sum ( data * data , axis ) ) numpy . sqrt ( length , length ) if axis is not None : length = numpy . expand_dims ( length , axis ) data /= length if out is None : return data
Return ndarray normalized by length i . e . eucledian norm along axis .
53,685
def json_numpy_obj_hook ( dct ) : if isinstance ( dct , dict ) and '__ndarray__' in dct : data = np . asarray ( dct [ '__ndarray__' ] , dtype = dct [ 'dtype' ] ) return data . reshape ( dct [ 'shape' ] ) return dct
Decodes a previously encoded numpy ndarray with proper shape and dtype .
53,686
def dump ( * args , ** kwargs ) : kwargs . update ( dict ( cls = NumpyEncoder , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) ) ) return _json . dump ( * args , ** kwargs )
Dump a numpy . ndarray to file stream .
53,687
def load ( * args , ** kwargs ) : kwargs . update ( dict ( object_hook = json_numpy_obj_hook ) ) return _json . load ( * args , ** kwargs )
Load an numpy . ndarray from a file stream .
53,688
def default ( self , obj ) : if isinstance ( obj , np . ndarray ) : return dict ( __ndarray__ = obj . tolist ( ) , dtype = str ( obj . dtype ) , shape = obj . shape ) return _json . JSONEncoder ( self , obj )
Converts an ndarray into a dictionary for efficient serialization .
53,689
def _preallocate_samples ( self ) : self . prealloc_samples_ = [ ] for i in range ( self . num_prealloc_samples_ ) : self . prealloc_samples_ . append ( self . sample ( ) )
Preallocate samples for faster adaptive sampling .
53,690
def rvs ( self , size = 1 , iteration = 1 ) : if self . num_prealloc_samples_ > 0 : samples = [ ] for i in range ( size ) : samples . append ( self . prealloc_samples_ [ ( iteration + i ) % self . num_prealloc_samples_ ] ) if size == 1 : return samples [ 0 ] return samples return self . sample ( size = size )
Sample the random variable using the preallocated samples if possible .
53,691
def sample ( self , size = 1 ) : samples = [ ] for i in range ( size ) : xi = self . _r_xi_rv . rvs ( size = 1 ) S_xi = skew ( xi ) R_sample = scipy . linalg . expm ( S_xi ) t_sample = self . _t_rv . rvs ( size = 1 ) samples . append ( RigidTransform ( rotation = R_sample , translation = t_sample , from_frame = self . _from_frame , to_frame = self . _to_frame ) ) if size == 1 and len ( samples ) > 0 : return samples [ 0 ] return samples
Sample rigid transform random variables .
53,692
def _flush ( self ) : if self . _recording : raise Exception ( "Cannot flush data queue while recording!" ) if self . _saving_cache : logging . warn ( "Flush when using cache means unsaved data will be lost and not returned!" ) self . _cmds_q . put ( ( "reset_data_segment" , ) ) else : data = self . _extract_q ( 0 ) return data
Returns a list of all current data
53,693
def _stop ( self ) : self . _pause ( ) self . _cmds_q . put ( ( "stop" , ) ) try : self . _recorder . terminate ( ) except Exception : pass self . _recording = False
Stops recording . Returns all recorded data and their timestamps . Destroys recorder process .
53,694
def _listdir ( self , root ) : "List directory 'root' appending the path separator to subdirs." res = [ ] for name in os . listdir ( root ) : path = os . path . join ( root , name ) if os . path . isdir ( path ) : name += os . sep res . append ( name ) return res
List directory root appending the path separator to subdirs .
53,695
def complete_extra ( self , args ) : "Completions for the 'extra' command." if len ( args ) == 0 : return self . _listdir ( './' ) return self . _complete_path ( args [ - 1 ] )
Completions for the extra command .
53,696
def complete ( self , text , state ) : "Generic readline completion entry point." results = [ w for w in self . words if w . startswith ( text ) ] + [ None ] if results != [ None ] : return results [ state ] buffer = readline . get_line_buffer ( ) line = readline . get_line_buffer ( ) . split ( ) results = [ w for w in self . words if w . startswith ( text ) ] + [ None ] if results != [ None ] : return results [ state ] if RE_SPACE . match ( buffer ) : line . append ( '' ) return ( self . complete_extra ( line ) + [ None ] ) [ state ]
Generic readline completion entry point .
53,697
def stop ( self ) : self . _cmds_q . put ( ( "stop" , ) ) for recorder in self . _data_stream_recorders : recorder . _stop ( ) try : self . _syncer . terminate ( ) except Exception : pass
Stops syncer operations . Destroys syncer process .
53,698
def configure_root ( ) : root_logger = logging . getLogger ( ) for hdlr in root_logger . handlers : if isinstance ( hdlr , logging . StreamHandler ) : root_logger . removeHandler ( hdlr ) root_logger . setLevel ( ROOT_LOG_LEVEL ) hdlr = logging . StreamHandler ( ROOT_LOG_STREAM ) formatter = colorlog . ColoredFormatter ( '%(purple)s%(name)-10s %(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s' , reset = True , log_colors = { 'DEBUG' : 'cyan' , 'INFO' : 'green' , 'WARNING' : 'yellow' , 'ERROR' : 'red' , 'CRITICAL' : 'red,bg_white' , } ) hdlr . setFormatter ( formatter ) root_logger . addHandler ( hdlr )
Configure the root logger .
53,699
def add_root_log_file ( log_file ) : root_logger = logging . getLogger ( ) hdlr = logging . FileHandler ( log_file ) formatter = logging . Formatter ( '%(asctime)s %(name)-10s %(levelname)-8s %(message)s' , datefmt = '%m-%d %H:%M:%S' ) hdlr . setFormatter ( formatter ) root_logger . addHandler ( hdlr ) root_logger . info ( 'Root logger now logging to {}' . format ( log_file ) )
Add a log file to the root logger .