idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
52,600
def tournament ( self , individuals , tourn_size , num_selections = None ) : winners = [ ] locs = [ ] if num_selections is None : num_selections = len ( individuals ) for i in np . arange ( num_selections ) : pool_i = self . random_state . choice ( len ( individuals ) , size = tourn_size ) pool = [ ] for i in pool_i : pool . append ( np . mean ( individuals [ i ] . fitness ) ) locs . append ( pool_i [ np . argmin ( pool ) ] ) winners . append ( copy . deepcopy ( individuals [ locs [ - 1 ] ] ) ) return winners , locs
conducts tournament selection of size tourn_size
52,601
def lexicase ( self , F , num_selections = None , survival = False ) : if num_selections is None : num_selections = F . shape [ 0 ] winners = [ ] locs = [ ] individual_locs = np . arange ( F . shape [ 0 ] ) for i in np . arange ( num_selections ) : can_locs = individual_locs cases = list ( np . arange ( F . shape [ 1 ] ) ) self . random_state . shuffle ( cases ) while len ( cases ) > 0 and len ( can_locs ) > 1 : best_val_for_case = np . min ( F [ can_locs , cases [ 0 ] ] ) can_locs = [ l for l in can_locs if F [ l , cases [ 0 ] ] <= best_val_for_case ] cases . pop ( 0 ) choice = self . random_state . randint ( len ( can_locs ) ) locs . append ( can_locs [ choice ] ) if survival : individual_locs = [ i for i in individual_locs if i != can_locs [ choice ] ] while len ( locs ) < num_selections : locs . append ( individual_locs [ 0 ] ) return locs
conducts lexicase selection for de - aggregated fitness vectors
52,602
def epsilon_lexicase ( self , F , sizes , num_selections = None , survival = False ) : if num_selections is None : num_selections = F . shape [ 0 ] if self . c : locs = np . empty ( num_selections , dtype = 'int32' , order = 'F' ) if self . lex_size : ep_lex ( F , F . shape [ 0 ] , F . shape [ 1 ] , num_selections , locs , self . lex_size , np . array ( sizes ) ) else : ep_lex ( F , F . shape [ 0 ] , F . shape [ 1 ] , num_selections , locs , self . lex_size , np . array ( [ ] ) ) return locs else : locs = [ ] individual_locs = np . arange ( F . shape [ 0 ] ) mad_for_case = np . array ( [ self . mad ( f ) for f in F . transpose ( ) ] ) for i in np . arange ( num_selections ) : can_locs = individual_locs cases = list ( np . arange ( F . shape [ 1 ] ) ) self . random_state . shuffle ( cases ) while len ( cases ) > 0 and len ( can_locs ) > 1 : best_val_for_case = np . min ( F [ can_locs , cases [ 0 ] ] ) can_locs = [ l for l in can_locs if F [ l , cases [ 0 ] ] <= best_val_for_case + mad_for_case [ cases [ 0 ] ] ] cases . pop ( 0 ) choice = self . random_state . randint ( len ( can_locs ) ) locs . append ( can_locs [ choice ] ) if survival : individual_locs = [ i for i in individual_locs if i != can_locs [ choice ] ] while len ( locs ) < num_selections : locs . append ( individual_locs [ 0 ] ) return locs
conducts epsilon lexicase selection for de - aggregated fitness vectors
52,603
def mad ( self , x , axis = None ) : return np . median ( np . abs ( x - np . median ( x , axis ) ) , axis )
median absolute deviation statistic
52,604
def cross ( self , p_i , p_j , max_depth = 2 ) : types_p_i = [ t for t in [ p . out_type for p in p_i ] ] types_p_j = [ t for t in [ p . out_type for p in p_j ] ] types = set ( types_p_i ) . intersection ( types_p_j ) p_i_sub = [ i for i , n in enumerate ( p_i ) if n . out_type in types ] x_i_end = self . random_state . choice ( p_i_sub ) x_i_begin = x_i_end arity_sum = p_i [ x_i_end ] . arity [ p_i [ x_i_end ] . in_type ] while ( arity_sum > 0 ) : if x_i_begin == 0 : print ( "arity_sum:" , arity_sum , "x_i_begin:" , x_i_begin , "x_i_end:" , x_i_end ) x_i_begin -= 1 arity_sum += p_i [ x_i_begin ] . arity [ p_i [ x_i_begin ] . in_type ] - 1 p_j_sub = [ i for i , n in enumerate ( p_j ) if n . out_type == p_i [ x_i_end ] . out_type ] x_j_end = self . random_state . choice ( p_j_sub ) x_j_begin = x_j_end arity_sum = p_j [ x_j_end ] . arity [ p_j [ x_j_end ] . in_type ] while ( arity_sum > 0 ) : if x_j_begin == 0 : print ( "arity_sum:" , arity_sum , "x_j_begin:" , x_j_begin , "x_j_end:" , x_j_end ) print ( "p_j:" , p_j ) x_j_begin -= 1 arity_sum += p_j [ x_j_begin ] . arity [ p_j [ x_j_begin ] . in_type ] - 1 tmpi = p_i [ : ] tmpj = p_j [ : ] tmpi [ x_i_begin : x_i_end + 1 : ] , tmpj [ x_j_begin : x_j_end + 1 : ] = tmpj [ x_j_begin : x_j_end + 1 : ] , tmpi [ x_i_begin : x_i_end + 1 : ] if not self . is_valid_program ( p_i ) or not self . is_valid_program ( p_j ) : print ( "parent 1:" , p_i , "x_i_begin:" , x_i_begin , "x_i_end:" , x_i_end ) print ( "parent 2:" , p_j , "x_j_begin:" , x_j_begin , "x_j_end:" , x_j_end ) print ( "child 1:" , tmpi ) print ( "child 2:" , tmpj ) raise ValueError ( 'Crossover produced an invalid program.' ) if len ( tmpi ) <= 2 ** max_depth - 1 : p_i [ : ] = tmpi if len ( tmpj ) <= 2 ** max_depth - 1 : p_j [ : ] = tmpj
subtree - like swap crossover between programs p_i and p_j .
52,605
def mutate ( self , p_i , func_set , term_set ) : self . point_mutate ( p_i , func_set , term_set )
point mutation addition removal
52,606
def point_mutate ( self , p_i , func_set , term_set ) : x = self . random_state . randint ( len ( p_i ) ) arity = p_i [ x ] . arity [ p_i [ x ] . in_type ] reps = [ n for n in func_set + term_set if n . arity [ n . in_type ] == arity and n . out_type == p_i [ x ] . out_type and n . in_type == p_i [ x ] . in_type ] tmp = reps [ self . random_state . randint ( len ( reps ) ) ] tmp_p = p_i [ : ] p_i [ x ] = tmp if not self . is_valid_program ( p_i ) : print ( "old:" , tmp_p ) print ( "new:" , p_i ) raise ValueError ( 'Mutation produced an invalid program.' )
point mutation on individual p_i
52,607
def is_valid_program ( self , p ) : arities = list ( a . arity [ a . in_type ] for a in p ) accu_arities = list ( accumulate ( arities ) ) accu_len = list ( np . arange ( len ( p ) ) + 1 ) check = list ( a < b for a , b in zip ( accu_arities , accu_len ) ) return all ( check ) and sum ( a . arity [ a . in_type ] for a in p ) + 1 == len ( p ) and len ( p ) > 0
checks whether program p makes a syntactically valid tree .
52,608
def run_MDR ( n , stack_float , labels = None ) : x1 = stack_float . pop ( ) x2 = stack_float . pop ( ) if len ( np . unique ( x1 ) ) <= 3 and len ( np . unique ( x2 ) ) <= 3 : tmp = np . vstack ( ( x1 , x2 ) ) . transpose ( ) if labels is None : return n . model . transform ( tmp ) [ : , 0 ] else : out = n . model . fit_transform ( tmp , labels ) [ : , 0 ] return out else : return np . zeros ( x1 . shape [ 0 ] )
run utility function for MDR nodes .
52,609
def stack_2_eqn ( self , p ) : stack_eqn = [ ] if p : for n in p . stack : self . eval_eqn ( n , stack_eqn ) return stack_eqn [ - 1 ] return [ ]
returns equation string for program stack
52,610
def stacks_2_eqns ( self , stacks ) : if stacks : return list ( map ( lambda p : self . stack_2_eqn ( p ) , stacks ) ) else : return [ ]
returns equation strings from stacks
52,611
def make_program ( self , stack , func_set , term_set , max_d , ntype ) : if max_d == 0 : ts = [ t for t in term_set if t . out_type == ntype ] if not ts : raise ValueError ( 'no ts. ntype:' + ntype + '. term_set out_types:' + ',' . join ( [ t . out_type for t in term_set ] ) ) stack . append ( ts [ self . random_state . choice ( len ( ts ) ) ] ) else : fs = [ f for f in func_set if ( f . out_type == ntype and ( f . in_type == 'f' or max_d > 1 ) ) ] if len ( fs ) == 0 : print ( 'ntype:' , ntype , '\nfunc_set:' , [ f . name for f in func_set ] ) stack . append ( fs [ self . random_state . choice ( len ( fs ) ) ] ) tmp = copy . copy ( stack [ - 1 ] ) for i in np . arange ( tmp . arity [ 'f' ] ) : self . make_program ( stack , func_set , term_set , max_d - 1 , 'f' ) for i in np . arange ( tmp . arity [ 'b' ] ) : self . make_program ( stack , func_set , term_set , max_d - 1 , 'b' )
makes a program stack
52,612
def init_pop ( self ) : pop = Pop ( self . population_size ) seed_with_raw_features = False if self . seed_with_ml : if ( self . ml_type == 'SVC' or self . ml_type == 'SVR' ) : seed_with_raw_features = True elif ( hasattr ( self . pipeline . named_steps [ 'ml' ] , 'coef_' ) or hasattr ( self . pipeline . named_steps [ 'ml' ] , 'feature_importances_' ) ) : coef = ( self . pipeline . named_steps [ 'ml' ] . coef_ if hasattr ( self . pipeline . named_steps [ 'ml' ] , 'coef_' ) else self . pipeline . named_steps [ 'ml' ] . feature_importances_ ) if len ( coef . shape ) > 1 : coef = [ np . mean ( abs ( c ) ) for c in coef . transpose ( ) ] coef = [ c for c in coef if c != 0 ] locs = np . arange ( len ( coef ) ) locs = locs [ np . argsort ( np . abs ( coef ) ) [ : : - 1 ] ] for i , p in enumerate ( pop . individuals ) : if i < len ( locs ) : p . stack = [ node ( 'x' , loc = locs [ i ] ) ] else : self . make_program ( p . stack , self . func_set , self . term_set , self . random_state . randint ( self . min_depth , self . max_depth + 1 ) , self . otype ) p . stack = list ( reversed ( p . stack ) ) else : seed_with_raw_features = True if seed_with_raw_features : for i , p in enumerate ( pop . individuals ) : if i < self . n_features : p . stack = [ node ( 'x' , loc = self . random_state . randint ( self . n_features ) ) ] else : self . make_program ( p . stack , self . func_set , self . term_set , self . random_state . randint ( self . min_depth , self . max_depth + 1 ) , self . otype ) p . stack = list ( reversed ( p . stack ) ) if self . verbosity > 2 : print ( "seeded initial population:" , self . stacks_2_eqns ( pop . individuals ) ) else : for I in pop . individuals : depth = self . random_state . randint ( self . min_depth , self . max_depth_init ) self . make_program ( I . stack , self . func_set , self . term_set , depth , self . otype ) I . stack = list ( reversed ( I . stack ) ) return pop
initializes population of features as GP stacks .
52,613
def transform ( self , x , inds = None , labels = None ) : if inds : return np . asarray ( [ self . out ( I , x , labels , self . otype ) for I in inds ] ) . transpose ( ) elif self . _best_inds : return np . asarray ( [ self . out ( I , x , labels , self . otype ) for I in self . _best_inds ] ) . transpose ( ) else : return x
return a transformation of x using population outputs
52,614
def impute_data ( self , x ) : imp = Imputer ( missing_values = 'NaN' , strategy = 'mean' , axis = 0 ) return imp . fit_transform ( x )
Imputes data set containing Nan values
52,615
def clean ( self , x ) : return x [ ~ np . any ( np . isnan ( x ) | np . isinf ( x ) , axis = 1 ) ]
remove nan and inf rows from x
52,616
def clean_with_zeros ( self , x ) : x [ ~ np . any ( np . isnan ( x ) | np . isinf ( x ) , axis = 1 ) ] = 0 return x
set nan and inf rows from x to zero
52,617
def predict ( self , testing_features ) : if self . clean : testing_features = self . impute_data ( testing_features ) if self . _best_inds : X_transform = self . transform ( testing_features ) try : return self . _best_estimator . predict ( self . transform ( testing_features ) ) except ValueError as detail : print ( 'shape of X:' , testing_features . shape ) print ( 'shape of X_transform:' , X_transform . transpose ( ) . shape ) print ( 'best inds:' , self . stacks_2_eqns ( self . _best_inds ) ) print ( 'valid locs:' , self . valid_loc ( self . _best_inds ) ) raise ValueError ( detail ) else : return self . _best_estimator . predict ( testing_features )
predict on a holdout data set .
52,618
def fit_predict ( self , features , labels ) : self . fit ( features , labels ) return self . predict ( features )
Convenience function that fits a pipeline then predicts on the provided features
52,619
def score ( self , testing_features , testing_labels ) : yhat = self . predict ( testing_features ) return self . scoring_function ( testing_labels , yhat )
estimates accuracy on testing set
52,620
def export ( self , output_file_name ) : if self . _best_estimator is None : raise ValueError ( 'A model has not been optimized. Please call fit()' ' first.' ) with open ( output_file_name , 'w' ) as output_file : output_file . write ( self . print_model ( ) ) if 'DecisionTree' in self . ml_type : export_graphviz ( self . _best_estimator , out_file = output_file_name + '.dot' , feature_names = self . stacks_2_eqns ( self . _best_inds ) if self . _best_inds else None , class_names = [ 'True' , 'False' ] , filled = False , impurity = True , rotate = True )
exports engineered features
52,621
def print_model ( self , sep = '\n' ) : model = '' if self . _best_inds : if self . ml_type == 'GridSearchCV' : ml = self . _best_estimator . named_steps [ 'ml' ] . best_estimator_ else : ml = self . _best_estimator . named_steps [ 'ml' ] if self . ml_type != 'SVC' and self . ml_type != 'SVR' : if hasattr ( ml , 'coef_' ) : if len ( ml . coef_ . shape ) == 1 : s = np . argsort ( np . abs ( ml . coef_ ) ) [ : : - 1 ] scoef = ml . coef_ [ s ] bi = [ self . _best_inds [ k ] for k in s ] model = ( ' +' + sep ) . join ( [ str ( round ( c , 3 ) ) + '*' + self . stack_2_eqn ( f ) for i , ( f , c ) in enumerate ( zip ( bi , scoef ) ) if round ( scoef [ i ] , 3 ) != 0 ] ) else : for j , coef in enumerate ( ml . coef_ ) : s = np . argsort ( np . abs ( coef ) ) [ : : - 1 ] scoef = coef [ s ] bi = [ self . _best_inds [ k ] for k in s ] model += sep + 'class' + str ( j ) + ' :' + ' + ' . join ( [ str ( round ( c , 3 ) ) + '*' + self . stack_2_eqn ( f ) for i , ( f , c ) in enumerate ( zip ( bi , coef ) ) if coef [ i ] != 0 ] ) elif hasattr ( ml , 'feature_importances_' ) : s = np . argsort ( ml . feature_importances_ ) [ : : - 1 ] sfi = ml . feature_importances_ [ s ] bi = [ self . _best_inds [ k ] for k in s ] model += sep . join ( [ str ( round ( c , 3 ) ) + ':' + self . stack_2_eqn ( f ) for i , ( f , c ) in enumerate ( zip ( bi , sfi ) ) if round ( sfi [ i ] , 3 ) != 0 ] ) else : return sep . join ( self . stacks_2_eqns ( self . _best_inds ) ) else : return sep . join ( self . stacks_2_eqns ( self . _best_inds ) ) else : return 'original features' return model
prints model contained in best inds if ml has a coefficient property . otherwise prints the features generated by FEW .
52,622
def valid_loc ( self , F = None ) : if F is not None : return [ i for i , f in enumerate ( F ) if np . all ( f < self . max_fit ) and np . all ( f >= 0 ) ] else : return [ i for i , f in enumerate ( self . F ) if np . all ( f < self . max_fit ) and np . all ( f >= 0 ) ]
returns the indices of individuals with valid fitness .
52,623
def valid ( self , individuals = None , F = None ) : if F : valid_locs = self . valid_loc ( F ) else : valid_locs = self . valid_loc ( self . F ) if individuals : return [ ind for i , ind in enumerate ( individuals ) if i in valid_locs ] else : return [ ind for i , ind in enumerate ( self . pop . individuals ) if i in valid_locs ]
returns the sublist of individuals with valid fitness .
52,624
def get_diversity ( self , X ) : feature_correlations = np . zeros ( X . shape [ 0 ] - 1 ) for i in np . arange ( 1 , X . shape [ 0 ] - 1 ) : feature_correlations [ i ] = max ( 0.0 , r2_score ( X [ 0 ] , X [ i ] ) ) self . diversity . append ( 1 - np . mean ( feature_correlations ) )
compute mean diversity of individual outputs
52,625
def roc_auc_cv ( self , features , labels ) : if callable ( getattr ( self . ml , "decision_function" , None ) ) : return np . mean ( [ self . scoring_function ( labels [ test ] , self . pipeline . fit ( features [ train ] , labels [ train ] ) . decision_function ( features [ test ] ) ) for train , test in KFold ( ) . split ( features , labels ) ] ) elif callable ( getattr ( self . ml , "predict_proba" , None ) ) : return np . mean ( [ self . scoring_function ( labels [ test ] , self . pipeline . fit ( features [ train ] , labels [ train ] ) . predict_proba ( features [ test ] ) [ : , 1 ] ) for train , test in KFold ( ) . split ( features , labels ) ] ) else : raise ValueError ( "ROC AUC score won't work with " + self . ml_type + ". No " "decision_function or predict_proba method found for this learner." )
returns an roc auc score depending on the underlying estimator .
52,626
def r2_score_vec ( y_true , y_pred ) : numerator = ( y_true - y_pred ) ** 2 denominator = ( y_true - np . average ( y_true ) ) ** 2 nonzero_denominator = denominator != 0 nonzero_numerator = numerator != 0 valid_score = nonzero_denominator & nonzero_numerator output_scores = np . ones ( [ y_true . shape [ 0 ] ] ) output_scores [ valid_score ] = 1 - ( numerator [ valid_score ] / denominator [ valid_score ] ) output_scores [ nonzero_numerator & ~ nonzero_denominator ] = 0. return output_scores
returns non - aggregate version of r2 score .
52,627
def inertia ( X , y , samples = False ) : if samples : inertia = np . zeros ( y . shape ) for label in np . unique ( y ) : inertia [ y == label ] = ( X [ y == label ] - np . mean ( X [ y == label ] ) ) ** 2 else : inertia = 0 for i , label in enumerate ( np . unique ( y ) ) : inertia += np . sum ( ( X [ y == label ] - np . mean ( X [ y == label ] ) ) ** 2 ) / len ( y [ y == label ] ) inertia = inertia / len ( np . unique ( y ) ) return inertia
return the within - class squared distance from the centroid
52,628
def separation ( X , y , samples = False ) : num_classes = len ( np . unique ( y ) ) total_dist = ( X . max ( ) - X . min ( ) ) ** 2 if samples : separation = np . zeros ( y . shape ) for label in np . unique ( y ) : for outsider in np . unique ( y [ y != label ] ) : separation [ y == label ] += ( X [ y == label ] - np . mean ( X [ y == outsider ] ) ) ** 2 print ( 'separation:' , separation ) print ( 'num_classes:' , num_classes ) print ( 'total_dist:' , total_dist ) separation = separation print ( 'separation after normalization:' , separation ) else : separation = 0 for i , label in enumerate ( np . unique ( y ) ) : for outsider in np . unique ( y [ y != label ] ) : separation += np . sum ( ( X [ y == label ] - np . mean ( X [ y == outsider ] ) ) ** 2 ) / len ( y [ y == label ] ) separation = separation / len ( np . unique ( y ) ) return separation
return the sum of the between - class squared distance
52,629
def proper ( self , x ) : x [ x < 0 ] = self . max_fit x [ np . isnan ( x ) ] = self . max_fit x [ np . isinf ( x ) ] = self . max_fit return x
cleans fitness vector
52,630
def safe ( self , x ) : x [ np . isinf ( x ) ] = 1 x [ np . isnan ( x ) ] = 1 return x
removes nans and infs from outputs .
52,631
def evaluate ( self , n , features , stack_float , stack_bool , labels = None ) : np . seterr ( all = 'ignore' ) if len ( stack_float ) >= n . arity [ 'f' ] and len ( stack_bool ) >= n . arity [ 'b' ] : if n . out_type == 'f' : stack_float . append ( self . safe ( self . eval_dict [ n . name ] ( n , features , stack_float , stack_bool , labels ) ) ) if ( np . isnan ( stack_float [ - 1 ] ) . any ( ) or np . isinf ( stack_float [ - 1 ] ) . any ( ) ) : print ( "problem operator:" , n ) else : stack_bool . append ( self . safe ( self . eval_dict [ n . name ] ( n , features , stack_float , stack_bool , labels ) ) ) if np . isnan ( stack_bool [ - 1 ] ) . any ( ) or np . isinf ( stack_bool [ - 1 ] ) . any ( ) : print ( "problem operator:" , n )
evaluate node in program
52,632
def all_finite ( self , X ) : if ( X . dtype . char in np . typecodes [ 'AllFloat' ] and not np . isfinite ( np . asarray ( X , dtype = 'float32' ) . sum ( ) ) and not np . isfinite ( np . asarray ( X , dtype = 'float32' ) ) . all ( ) ) : return False return True
returns true if X is finite false otherwise
52,633
def out ( self , I , features , labels = None , otype = 'f' ) : stack_float = [ ] stack_bool = [ ] for n in I . stack : self . evaluate ( n , features , stack_float , stack_bool , labels ) if otype == 'f' : return ( stack_float [ - 1 ] if self . all_finite ( stack_float [ - 1 ] ) else np . zeros ( len ( features ) ) ) else : return ( stack_bool [ - 1 ] . astype ( float ) if self . all_finite ( stack_bool [ - 1 ] ) else np . zeros ( len ( features ) ) )
computes the output for individual I
52,634
def imagedatadict_to_ndarray ( imdict ) : arr = imdict [ 'Data' ] im = None if isinstance ( arr , parse_dm3 . array . array ) : im = numpy . asarray ( arr , dtype = arr . typecode ) elif isinstance ( arr , parse_dm3 . structarray ) : t = tuple ( arr . typecodes ) im = numpy . frombuffer ( arr . raw_data , dtype = structarray_to_np_map [ t ] ) assert dm_image_dtypes [ imdict [ "DataType" ] ] [ 1 ] == im . dtype assert imdict [ 'PixelDepth' ] == im . dtype . itemsize im = im . reshape ( imdict [ 'Dimensions' ] [ : : - 1 ] ) if imdict [ "DataType" ] == 23 : im = im . view ( numpy . uint8 ) . reshape ( im . shape + ( - 1 , ) ) [ ... , : - 1 ] return im
Converts the ImageData dictionary imdict to an nd image .
52,635
def ndarray_to_imagedatadict ( nparr ) : ret = { } dm_type = None for k , v in iter ( dm_image_dtypes . items ( ) ) : if v [ 1 ] == nparr . dtype . type : dm_type = k break if dm_type is None and nparr . dtype == numpy . uint8 and nparr . shape [ - 1 ] in ( 3 , 4 ) : ret [ "DataType" ] = 23 ret [ "PixelDepth" ] = 4 if nparr . shape [ 2 ] == 4 : rgb_view = nparr . view ( numpy . int32 ) . reshape ( nparr . shape [ : - 1 ] ) else : assert nparr . shape [ 2 ] == 3 rgba_image = numpy . empty ( nparr . shape [ : - 1 ] + ( 4 , ) , numpy . uint8 ) rgba_image [ : , : , 0 : 3 ] = nparr rgba_image [ : , : , 3 ] = 255 rgb_view = rgba_image . view ( numpy . int32 ) . reshape ( rgba_image . shape [ : - 1 ] ) ret [ "Dimensions" ] = list ( rgb_view . shape [ : : - 1 ] ) ret [ "Data" ] = parse_dm3 . array . array ( platform_independent_char ( rgb_view . dtype ) , rgb_view . flatten ( ) ) else : ret [ "DataType" ] = dm_type ret [ "PixelDepth" ] = nparr . dtype . itemsize ret [ "Dimensions" ] = list ( nparr . shape [ : : - 1 ] ) if nparr . dtype . type in np_to_structarray_map : types = np_to_structarray_map [ nparr . dtype . type ] ret [ "Data" ] = parse_dm3 . structarray ( types ) ret [ "Data" ] . raw_data = bytes ( numpy . array ( nparr , copy = False ) . data ) else : ret [ "Data" ] = parse_dm3 . array . array ( platform_independent_char ( nparr . dtype ) , numpy . array ( nparr , copy = False ) . flatten ( ) ) return ret
Convert the numpy array nparr into a suitable ImageList entry dictionary . Returns a dictionary with the appropriate Data DataType PixelDepth to be inserted into a dm3 tag dictionary and written to a file .
52,636
def parse_dm_header ( f , outdata = None ) : if outdata is not None : if verbose : print ( "write_dm_header start" , f . tell ( ) ) ver , file_size , endianness = 3 , - 1 , 1 put_into_file ( f , "> l l l" , ver , file_size , endianness ) start = f . tell ( ) parse_dm_tag_root ( f , outdata ) end = f . tell ( ) f . seek ( start - 8 ) put_into_file ( f , "> l" , end - start + 4 ) f . seek ( end ) enda , endb = 0 , 0 put_into_file ( f , "> l l" , enda , endb ) if verbose : print ( "write_dm_header end" , f . tell ( ) ) else : if verbose : print ( "read_dm_header start" , f . tell ( ) ) ver = get_from_file ( f , "> l" ) assert ver in [ 3 , 4 ] , "Version must be 3 or 4, not %s" % ver global size_type , version if ver == 3 : size_type = 'L' version = 3 if ver == 4 : size_type = 'Q' version = 4 file_size , endianness = get_from_file ( f , ">%c l" % size_type ) assert endianness == 1 , "Endianness must be 1, not %s" % endianness start = f . tell ( ) ret = parse_dm_tag_root ( f , outdata ) end = f . tell ( ) enda , endb = get_from_file ( f , "> l l" ) assert ( enda == endb == 0 ) if verbose : print ( "read_dm_header end" , f . tell ( ) ) return ret
This is the start of the DM file . We check for some magic values and then treat the next entry as a tag_root
52,637
def imwrite ( file , data = None , shape = None , dtype = None , ** kwargs ) : tifargs = parse_kwargs ( kwargs , 'append' , 'bigtiff' , 'byteorder' , 'imagej' ) if data is None : dtype = numpy . dtype ( dtype ) size = product ( shape ) * dtype . itemsize byteorder = dtype . byteorder else : try : size = data . nbytes byteorder = data . dtype . byteorder except Exception : size = 0 byteorder = None bigsize = kwargs . pop ( 'bigsize' , 2 ** 32 - 2 ** 25 ) if 'bigtiff' not in tifargs and size > bigsize and not ( tifargs . get ( 'imagej' , False ) or tifargs . get ( 'truncate' , False ) ) : tifargs [ 'bigtiff' ] = True if 'byteorder' not in tifargs : tifargs [ 'byteorder' ] = byteorder with TiffWriter ( file , ** tifargs ) as tif : return tif . save ( data , shape , dtype , ** kwargs )
Write numpy array to TIFF file .
52,638
def memmap ( filename , shape = None , dtype = None , page = None , series = 0 , mode = 'r+' , ** kwargs ) : if shape is not None and dtype is not None : kwargs . update ( data = None , shape = shape , dtype = dtype , returnoffset = True , align = TIFF . ALLOCATIONGRANULARITY ) result = imwrite ( filename , ** kwargs ) if result is None : raise ValueError ( 'image data are not memory-mappable' ) offset = result [ 0 ] else : with TiffFile ( filename , ** kwargs ) as tif : if page is not None : page = tif . pages [ page ] if not page . is_memmappable : raise ValueError ( 'image data are not memory-mappable' ) offset , _ = page . is_contiguous shape = page . shape dtype = page . dtype else : series = tif . series [ series ] if series . offset is None : raise ValueError ( 'image data are not memory-mappable' ) shape = series . shape dtype = series . dtype offset = series . offset dtype = tif . byteorder + dtype . char return numpy . memmap ( filename , dtype , mode , offset , shape , 'C' )
Return memory - mapped numpy array stored in TIFF file .
52,639
def read_exif_ifd ( fh , byteorder , dtype , count , offsetsize ) : exif = read_tags ( fh , byteorder , offsetsize , TIFF . EXIF_TAGS , maxifds = 1 ) for name in ( 'ExifVersion' , 'FlashpixVersion' ) : try : exif [ name ] = bytes2str ( exif [ name ] ) except Exception : pass if 'UserComment' in exif : idcode = exif [ 'UserComment' ] [ : 8 ] try : if idcode == b'ASCII\x00\x00\x00' : exif [ 'UserComment' ] = bytes2str ( exif [ 'UserComment' ] [ 8 : ] ) elif idcode == b'UNICODE\x00' : exif [ 'UserComment' ] = exif [ 'UserComment' ] [ 8 : ] . decode ( 'utf-16' ) except Exception : pass return exif
Read EXIF tags from file and return as dict .
52,640
def read_gps_ifd ( fh , byteorder , dtype , count , offsetsize ) : return read_tags ( fh , byteorder , offsetsize , TIFF . GPS_TAGS , maxifds = 1 )
Read GPS tags from file and return as dict .
52,641
def read_interoperability_ifd ( fh , byteorder , dtype , count , offsetsize ) : tag_names = { 1 : 'InteroperabilityIndex' } return read_tags ( fh , byteorder , offsetsize , tag_names , maxifds = 1 )
Read Interoperability tags from file and return as dict .
52,642
def read_utf8 ( fh , byteorder , dtype , count , offsetsize ) : return fh . read ( count ) . decode ( 'utf-8' )
Read tag data from file and return as unicode string .
52,643
def read_colormap ( fh , byteorder , dtype , count , offsetsize ) : cmap = fh . read_array ( byteorder + dtype [ - 1 ] , count ) cmap . shape = ( 3 , - 1 ) return cmap
Read ColorMap data from file and return as numpy array .
52,644
def read_json ( fh , byteorder , dtype , count , offsetsize ) : data = fh . read ( count ) try : return json . loads ( unicode ( stripnull ( data ) , 'utf-8' ) ) except ValueError : log . warning ( 'read_json: invalid JSON' )
Read JSON tag data from file and return as object .
52,645
def read_mm_header ( fh , byteorder , dtype , count , offsetsize ) : mmh = fh . read_record ( TIFF . MM_HEADER , byteorder = byteorder ) mmh = recarray2dict ( mmh ) mmh [ 'Dimensions' ] = [ ( bytes2str ( d [ 0 ] ) . strip ( ) , d [ 1 ] , d [ 2 ] , d [ 3 ] , bytes2str ( d [ 4 ] ) . strip ( ) ) for d in mmh [ 'Dimensions' ] ] d = mmh [ 'GrayChannel' ] mmh [ 'GrayChannel' ] = ( bytes2str ( d [ 0 ] ) . strip ( ) , d [ 1 ] , d [ 2 ] , d [ 3 ] , bytes2str ( d [ 4 ] ) . strip ( ) ) return mmh
Read FluoView mm_header tag from file and return as dict .
52,646
def read_uic1tag ( fh , byteorder , dtype , count , offsetsize , planecount = None ) : assert dtype in ( '2I' , '1I' ) and byteorder == '<' result = { } if dtype == '2I' : values = fh . read_array ( '<u4' , 2 * count ) . reshape ( count , 2 ) result = { 'ZDistance' : values [ : , 0 ] / values [ : , 1 ] } elif planecount : for _ in range ( count ) : tagid = struct . unpack ( '<I' , fh . read ( 4 ) ) [ 0 ] if tagid in ( 28 , 29 , 37 , 40 , 41 ) : fh . read ( 4 ) continue name , value = read_uic_tag ( fh , tagid , planecount , offset = True ) result [ name ] = value return result
Read MetaMorph STK UIC1Tag from file and return as dict .
52,647
def read_uic2tag ( fh , byteorder , dtype , planecount , offsetsize ) : assert dtype == '2I' and byteorder == '<' values = fh . read_array ( '<u4' , 6 * planecount ) . reshape ( planecount , 6 ) return { 'ZDistance' : values [ : , 0 ] / values [ : , 1 ] , 'DateCreated' : values [ : , 2 ] , 'TimeCreated' : values [ : , 3 ] , 'DateModified' : values [ : , 4 ] , 'TimeModified' : values [ : , 5 ] }
Read MetaMorph STK UIC2Tag from file and return as dict .
52,648
def read_uic4tag ( fh , byteorder , dtype , planecount , offsetsize ) : assert dtype == '1I' and byteorder == '<' result = { } while True : tagid = struct . unpack ( '<H' , fh . read ( 2 ) ) [ 0 ] if tagid == 0 : break name , value = read_uic_tag ( fh , tagid , planecount , offset = False ) result [ name ] = value return result
Read MetaMorph STK UIC4Tag from file and return as dict .
52,649
def read_uic_image_property ( fh ) : size = struct . unpack ( 'B' , fh . read ( 1 ) ) [ 0 ] name = struct . unpack ( '%is' % size , fh . read ( size ) ) [ 0 ] [ : - 1 ] flags , prop = struct . unpack ( '<IB' , fh . read ( 5 ) ) if prop == 1 : value = struct . unpack ( 'II' , fh . read ( 8 ) ) value = value [ 0 ] / value [ 1 ] else : size = struct . unpack ( 'B' , fh . read ( 1 ) ) [ 0 ] value = struct . unpack ( '%is' % size , fh . read ( size ) ) [ 0 ] return dict ( name = name , flags = flags , value = value )
Read UIC ImagePropertyEx tag from file and return as dict .
52,650
def read_cz_lsminfo ( fh , byteorder , dtype , count , offsetsize ) : assert byteorder == '<' magic_number , structure_size = struct . unpack ( '<II' , fh . read ( 8 ) ) if magic_number not in ( 50350412 , 67127628 ) : raise ValueError ( 'invalid CZ_LSMINFO structure' ) fh . seek ( - 8 , 1 ) if structure_size < numpy . dtype ( TIFF . CZ_LSMINFO ) . itemsize : lsminfo = [ ] size = 0 for name , dtype in TIFF . CZ_LSMINFO : size += numpy . dtype ( dtype ) . itemsize if size > structure_size : break lsminfo . append ( ( name , dtype ) ) else : lsminfo = TIFF . CZ_LSMINFO lsminfo = fh . read_record ( lsminfo , byteorder = byteorder ) lsminfo = recarray2dict ( lsminfo ) for name , reader in TIFF . CZ_LSMINFO_READERS . items ( ) : if reader is None : continue offset = lsminfo . get ( 'Offset' + name , 0 ) if offset < 8 : continue fh . seek ( offset ) try : lsminfo [ name ] = reader ( fh ) except ValueError : pass return lsminfo
Read CZ_LSMINFO tag from file and return as dict .
52,651
def read_lsm_floatpairs ( fh ) : size = struct . unpack ( '<i' , fh . read ( 4 ) ) [ 0 ] return fh . read_array ( '<2f8' , count = size )
Read LSM sequence of float pairs from file and return as list .
52,652
def read_lsm_positions ( fh ) : size = struct . unpack ( '<I' , fh . read ( 4 ) ) [ 0 ] return fh . read_array ( '<2f8' , count = size )
Read LSM positions from file and return as list .
52,653
def read_lsm_channelcolors ( fh ) : result = { 'Mono' : False , 'Colors' : [ ] , 'ColorNames' : [ ] } pos = fh . tell ( ) ( size , ncolors , nnames , coffset , noffset , mono ) = struct . unpack ( '<IIIIII' , fh . read ( 24 ) ) if ncolors != nnames : log . warning ( 'read_lsm_channelcolors: invalid LSM ChannelColors structure' ) return result result [ 'Mono' ] = bool ( mono ) fh . seek ( pos + coffset ) colors = fh . read_array ( 'uint8' , count = ncolors * 4 ) . reshape ( ( ncolors , 4 ) ) result [ 'Colors' ] = colors . tolist ( ) fh . seek ( pos + noffset ) buffer = fh . read ( size - noffset ) names = [ ] while len ( buffer ) > 4 : size = struct . unpack ( '<I' , buffer [ : 4 ] ) [ 0 ] names . append ( bytes2str ( buffer [ 4 : 3 + size ] ) ) buffer = buffer [ 4 + size : ] result [ 'ColorNames' ] = names return result
Read LSM ChannelColors structure from file and return as dict .
52,654
def read_lsm_scaninfo ( fh ) : block = { } blocks = [ block ] unpack = struct . unpack if struct . unpack ( '<I' , fh . read ( 4 ) ) [ 0 ] != 0x10000000 : log . warning ( 'read_lsm_scaninfo: invalid LSM ScanInfo structure' ) return block fh . read ( 8 ) while True : entry , dtype , size = unpack ( '<III' , fh . read ( 12 ) ) if dtype == 2 : value = bytes2str ( stripnull ( fh . read ( size ) ) ) elif dtype == 4 : value = unpack ( '<i' , fh . read ( 4 ) ) [ 0 ] elif dtype == 5 : value = unpack ( '<d' , fh . read ( 8 ) ) [ 0 ] else : value = 0 if entry in TIFF . CZ_LSMINFO_SCANINFO_ARRAYS : blocks . append ( block ) name = TIFF . CZ_LSMINFO_SCANINFO_ARRAYS [ entry ] newobj = [ ] block [ name ] = newobj block = newobj elif entry in TIFF . CZ_LSMINFO_SCANINFO_STRUCTS : blocks . append ( block ) newobj = { } block . append ( newobj ) block = newobj elif entry in TIFF . CZ_LSMINFO_SCANINFO_ATTRIBUTES : name = TIFF . CZ_LSMINFO_SCANINFO_ATTRIBUTES [ entry ] block [ name ] = value elif entry == 0xffffffff : block = blocks . pop ( ) else : block [ 'Entry0x%x' % entry ] = value if not blocks : break return block
Read LSM ScanInfo structure from file and return as dict .
52,655
def read_sis ( fh , byteorder , dtype , count , offsetsize ) : result = { } ( magic , _ , minute , hour , day , month , year , _ , name , tagcount ) = struct . unpack ( '<4s6shhhhh6s32sh' , fh . read ( 60 ) ) if magic != b'SIS0' : raise ValueError ( 'invalid OlympusSIS structure' ) result [ 'name' ] = bytes2str ( stripnull ( name ) ) try : result [ 'datetime' ] = datetime . datetime ( 1900 + year , month + 1 , day , hour , minute ) except ValueError : pass data = fh . read ( 8 * tagcount ) for i in range ( 0 , tagcount * 8 , 8 ) : tagtype , count , offset = struct . unpack ( '<hhI' , data [ i : i + 8 ] ) fh . seek ( offset ) if tagtype == 1 : ( _ , lenexp , xcal , ycal , _ , mag , _ , camname , pictype , ) = struct . unpack ( '<10shdd8sd2s34s32s' , fh . read ( 112 ) ) m = math . pow ( 10 , lenexp ) result [ 'pixelsizex' ] = xcal * m result [ 'pixelsizey' ] = ycal * m result [ 'magnification' ] = mag result [ 'cameraname' ] = bytes2str ( stripnull ( camname ) ) result [ 'picturetype' ] = bytes2str ( stripnull ( pictype ) ) elif tagtype == 10 : continue return result
Read OlympusSIS structure and return as dict .
52,656
def read_sis_ini ( fh , byteorder , dtype , count , offsetsize ) : inistr = fh . read ( count ) inistr = bytes2str ( stripnull ( inistr ) ) try : return olympusini_metadata ( inistr ) except Exception as exc : log . warning ( 'olympusini_metadata: %s: %s' , exc . __class__ . __name__ , exc ) return { }
Read OlympusSIS INI string and return as dict .
52,657
def read_tvips_header ( fh , byteorder , dtype , count , offsetsize ) : result = { } header = fh . read_record ( TIFF . TVIPS_HEADER_V1 , byteorder = byteorder ) for name , typestr in TIFF . TVIPS_HEADER_V1 : result [ name ] = header [ name ] . tolist ( ) if header [ 'Version' ] == 2 : header = fh . read_record ( TIFF . TVIPS_HEADER_V2 , byteorder = byteorder ) if header [ 'Magic' ] != int ( 0xaaaaaaaa ) : log . warning ( 'read_tvips_header: invalid TVIPS v2 magic number' ) return { } for name , typestr in TIFF . TVIPS_HEADER_V2 : if typestr . startswith ( 'V' ) : s = header [ name ] . tostring ( ) . decode ( 'utf16' , errors = 'ignore' ) result [ name ] = stripnull ( s , null = '\0' ) else : result [ name ] = header [ name ] . tolist ( ) for axis in 'XY' : header [ 'PhysicalPixelSize' + axis ] /= 1e9 header [ 'PixelSize' + axis ] /= 1e9 elif header . version != 1 : log . warning ( 'read_tvips_header: unknown TVIPS header version' ) return { } return result
Read TVIPS EM - MENU headers and return as dict .
52,658
def read_cz_sem ( fh , byteorder , dtype , count , offsetsize ) : result = { '' : ( ) } key = None data = bytes2str ( stripnull ( fh . read ( count ) ) ) for line in data . splitlines ( ) : if line . isupper ( ) : key = line . lower ( ) elif key : try : name , value = line . split ( '=' ) except ValueError : try : name , value = line . split ( ':' , 1 ) except Exception : continue value = value . strip ( ) unit = '' try : v , u = value . split ( ) number = astype ( v , ( int , float ) ) if number != v : value = number unit = u except Exception : number = astype ( value , ( int , float ) ) if number != value : value = number if value in ( 'No' , 'Off' ) : value = False elif value in ( 'Yes' , 'On' ) : value = True result [ key ] = ( name . strip ( ) , value ) if unit : result [ key ] += ( unit , ) key = None else : result [ '' ] += ( astype ( line , ( int , float ) ) , ) return result
Read Zeiss SEM tag and return as dict .
52,659
def read_nih_image_header ( fh , byteorder , dtype , count , offsetsize ) : a = fh . read_record ( TIFF . NIH_IMAGE_HEADER , byteorder = byteorder ) a = a . newbyteorder ( byteorder ) a = recarray2dict ( a ) a [ 'XUnit' ] = a [ 'XUnit' ] [ : a [ 'XUnitSize' ] ] a [ 'UM' ] = a [ 'UM' ] [ : a [ 'UMsize' ] ] return a
Read NIH_IMAGE_HEADER tag from file and return as dict .
52,660
def read_scanimage_metadata ( fh ) : fh . seek ( 0 ) try : byteorder , version = struct . unpack ( '<2sH' , fh . read ( 4 ) ) if byteorder != b'II' or version != 43 : raise Exception fh . seek ( 16 ) magic , version , size0 , size1 = struct . unpack ( '<IIII' , fh . read ( 16 ) ) if magic != 117637889 or version != 3 : raise Exception except Exception : raise ValueError ( 'not a ScanImage BigTIFF v3 file' ) frame_data = matlabstr2py ( bytes2str ( fh . read ( size0 ) [ : - 1 ] ) ) roi_data = read_json ( fh , '<' , None , size1 , None ) if size1 > 1 else { } return frame_data , roi_data
Read ScanImage BigTIFF v3 static and ROI metadata from open file .
52,661
def read_micromanager_metadata ( fh ) : fh . seek ( 0 ) try : byteorder = { b'II' : '<' , b'MM' : '>' } [ fh . read ( 2 ) ] except IndexError : raise ValueError ( 'not a MicroManager TIFF file' ) result = { } fh . seek ( 8 ) ( index_header , index_offset , display_header , display_offset , comments_header , comments_offset , summary_header , summary_length ) = struct . unpack ( byteorder + 'IIIIIIII' , fh . read ( 32 ) ) if summary_header != 2355492 : raise ValueError ( 'invalid MicroManager summary header' ) result [ 'Summary' ] = read_json ( fh , byteorder , None , summary_length , None ) if index_header != 54773648 : raise ValueError ( 'invalid MicroManager index header' ) fh . seek ( index_offset ) header , count = struct . unpack ( byteorder + 'II' , fh . read ( 8 ) ) if header != 3453623 : raise ValueError ( 'invalid MicroManager index header' ) data = struct . unpack ( byteorder + 'IIIII' * count , fh . read ( 20 * count ) ) result [ 'IndexMap' ] = { 'Channel' : data [ : : 5 ] , 'Slice' : data [ 1 : : 5 ] , 'Frame' : data [ 2 : : 5 ] , 'Position' : data [ 3 : : 5 ] , 'Offset' : data [ 4 : : 5 ] } if display_header != 483765892 : raise ValueError ( 'invalid MicroManager display header' ) fh . seek ( display_offset ) header , count = struct . unpack ( byteorder + 'II' , fh . read ( 8 ) ) if header != 347834724 : raise ValueError ( 'invalid MicroManager display header' ) result [ 'DisplaySettings' ] = read_json ( fh , byteorder , None , count , None ) if comments_header != 99384722 : raise ValueError ( 'invalid MicroManager comments header' ) fh . seek ( comments_offset ) header , count = struct . unpack ( byteorder + 'II' , fh . read ( 8 ) ) if header != 84720485 : raise ValueError ( 'invalid MicroManager comments header' ) result [ 'Comments' ] = read_json ( fh , byteorder , None , count , None ) return result
Read MicroManager non - TIFF settings from open file and return as dict .
52,662
def imagej_metadata_tag ( metadata , byteorder ) : header = [ { '>' : b'IJIJ' , '<' : b'JIJI' } [ byteorder ] ] bytecounts = [ 0 ] body = [ ] def _string ( data , byteorder ) : return data . encode ( 'utf-16' + { '>' : 'be' , '<' : 'le' } [ byteorder ] ) def _doubles ( data , byteorder ) : return struct . pack ( byteorder + ( 'd' * len ( data ) ) , * data ) def _ndarray ( data , byteorder ) : return data . tobytes ( ) def _bytes ( data , byteorder ) : return data metadata_types = ( ( 'Info' , b'info' , 1 , _string ) , ( 'Labels' , b'labl' , None , _string ) , ( 'Ranges' , b'rang' , 1 , _doubles ) , ( 'LUTs' , b'luts' , None , _ndarray ) , ( 'Plot' , b'plot' , 1 , _bytes ) , ( 'ROI' , b'roi ' , 1 , _bytes ) , ( 'Overlays' , b'over' , None , _bytes ) ) for key , mtype , count , func in metadata_types : if key . lower ( ) in metadata : key = key . lower ( ) elif key not in metadata : continue if byteorder == '<' : mtype = mtype [ : : - 1 ] values = metadata [ key ] if count is None : count = len ( values ) else : values = [ values ] header . append ( mtype + struct . pack ( byteorder + 'I' , count ) ) for value in values : data = func ( value , byteorder ) body . append ( data ) bytecounts . append ( len ( data ) ) if not body : return ( ) body = b'' . join ( body ) header = b'' . join ( header ) data = header + body bytecounts [ 0 ] = len ( header ) bytecounts = struct . pack ( byteorder + ( 'I' * len ( bytecounts ) ) , * bytecounts ) return ( ( 50839 , 'B' , len ( data ) , data , True ) , ( 50838 , 'I' , len ( bytecounts ) // 4 , bytecounts , True ) )
Return IJMetadata and IJMetadataByteCounts tags from metadata dict .
52,663
def imagej_metadata ( data , bytecounts , byteorder ) : def _string ( data , byteorder ) : return data . decode ( 'utf-16' + { '>' : 'be' , '<' : 'le' } [ byteorder ] ) def _doubles ( data , byteorder ) : return struct . unpack ( byteorder + ( 'd' * ( len ( data ) // 8 ) ) , data ) def _lut ( data , byteorder ) : return numpy . frombuffer ( data , 'uint8' ) . reshape ( - 1 , 256 ) def _bytes ( data , byteorder ) : return data metadata_types = { b'info' : ( 'Info' , _string ) , b'labl' : ( 'Labels' , _string ) , b'rang' : ( 'Ranges' , _doubles ) , b'luts' : ( 'LUTs' , _lut ) , b'plot' : ( 'Plots' , _bytes ) , b'roi ' : ( 'ROI' , _bytes ) , b'over' : ( 'Overlays' , _bytes ) } metadata_types . update ( dict ( ( k [ : : - 1 ] , v ) for k , v in metadata_types . items ( ) ) ) if not bytecounts : raise ValueError ( 'no ImageJ metadata' ) if not data [ : 4 ] in ( b'IJIJ' , b'JIJI' ) : raise ValueError ( 'invalid ImageJ metadata' ) header_size = bytecounts [ 0 ] if header_size < 12 or header_size > 804 : raise ValueError ( 'invalid ImageJ metadata header size' ) ntypes = ( header_size - 4 ) // 8 header = struct . unpack ( byteorder + '4sI' * ntypes , data [ 4 : 4 + ntypes * 8 ] ) pos = 4 + ntypes * 8 counter = 0 result = { } for mtype , count in zip ( header [ : : 2 ] , header [ 1 : : 2 ] ) : values = [ ] name , func = metadata_types . get ( mtype , ( bytes2str ( mtype ) , read_bytes ) ) for _ in range ( count ) : counter += 1 pos1 = pos + bytecounts [ counter ] values . append ( func ( data [ pos : pos1 ] , byteorder ) ) pos = pos1 result [ name . strip ( ) ] = values [ 0 ] if count == 1 else values return result
Return IJMetadata tag value as dict .
52,664
def imagej_description_metadata ( description ) : def _bool ( val ) : return { 'true' : True , 'false' : False } [ val . lower ( ) ] result = { } for line in description . splitlines ( ) : try : key , val = line . split ( '=' ) except Exception : continue key = key . strip ( ) val = val . strip ( ) for dtype in ( int , float , _bool ) : try : val = dtype ( val ) break except Exception : pass result [ key ] = val if 'ImageJ' not in result : raise ValueError ( 'not a ImageJ image description' ) return result
Return metatata from ImageJ image description as dict .
52,665
def imagej_description ( shape , rgb = None , colormaped = False , version = None , hyperstack = None , mode = None , loop = None , ** kwargs ) : if colormaped : raise NotImplementedError ( 'ImageJ colormapping not supported' ) if version is None : version = '1.11a' shape = imagej_shape ( shape , rgb = rgb ) rgb = shape [ - 1 ] in ( 3 , 4 ) result = [ 'ImageJ=%s' % version ] append = [ ] result . append ( 'images=%i' % product ( shape [ : - 3 ] ) ) if hyperstack is None : hyperstack = True append . append ( 'hyperstack=true' ) else : append . append ( 'hyperstack=%s' % bool ( hyperstack ) ) if shape [ 2 ] > 1 : result . append ( 'channels=%i' % shape [ 2 ] ) if mode is None and not rgb : mode = 'grayscale' if hyperstack and mode : append . append ( 'mode=%s' % mode ) if shape [ 1 ] > 1 : result . append ( 'slices=%i' % shape [ 1 ] ) if shape [ 0 ] > 1 : result . append ( 'frames=%i' % shape [ 0 ] ) if loop is None : append . append ( 'loop=false' ) if loop is not None : append . append ( 'loop=%s' % bool ( loop ) ) for key , value in kwargs . items ( ) : append . append ( '%s=%s' % ( key . lower ( ) , value ) ) return '\n' . join ( result + append + [ '' ] )
Return ImageJ image description from data shape .
52,666
def imagej_shape ( shape , rgb = None ) : shape = tuple ( int ( i ) for i in shape ) ndim = len ( shape ) if 1 > ndim > 6 : raise ValueError ( 'invalid ImageJ hyperstack: not 2 to 6 dimensional' ) if rgb is None : rgb = shape [ - 1 ] in ( 3 , 4 ) and ndim > 2 if rgb and shape [ - 1 ] not in ( 3 , 4 ) : raise ValueError ( 'invalid ImageJ hyperstack: not a RGB image' ) if not rgb and ndim == 6 and shape [ - 1 ] != 1 : raise ValueError ( 'invalid ImageJ hyperstack: not a non-RGB image' ) if rgb or shape [ - 1 ] == 1 : return ( 1 , ) * ( 6 - ndim ) + shape return ( 1 , ) * ( 5 - ndim ) + shape + ( 1 , )
Return shape normalized to 6D ImageJ hyperstack TZCYXS .
52,667
def json_description ( shape , ** metadata ) : metadata . update ( shape = shape ) return json . dumps ( metadata )
Return JSON image description from data shape and other metadata .
52,668
def json_description_metadata ( description ) : if description [ : 6 ] == 'shape=' : shape = tuple ( int ( i ) for i in description [ 7 : - 1 ] . split ( ',' ) ) return dict ( shape = shape ) if description [ : 1 ] == '{' and description [ - 1 : ] == '}' : return json . loads ( description ) raise ValueError ( 'invalid JSON image description' , description )
Return metatata from JSON formated image description as dict .
52,669
def fluoview_description_metadata ( description , ignoresections = None ) : if not description . startswith ( '[' ) : raise ValueError ( 'invalid FluoView image description' ) if ignoresections is None : ignoresections = { 'Region Info (Fields)' , 'Protocol Description' } result = { } sections = [ result ] comment = False for line in description . splitlines ( ) : if not comment : line = line . strip ( ) if not line : continue if line [ 0 ] == '[' : if line [ - 5 : ] == ' End]' : del sections [ - 1 ] section = sections [ - 1 ] name = line [ 1 : - 5 ] if comment : section [ name ] = '\n' . join ( section [ name ] ) if name [ : 4 ] == 'LUT ' : a = numpy . array ( section [ name ] , dtype = 'uint8' ) a . shape = - 1 , 3 section [ name ] = a continue comment = False name = line [ 1 : - 1 ] if name [ : 4 ] == 'LUT ' : section = [ ] elif name in ignoresections : section = [ ] comment = True else : section = { } sections . append ( section ) result [ name ] = section continue if comment : section . append ( line ) continue line = line . split ( '=' , 1 ) if len ( line ) == 1 : section [ line [ 0 ] . strip ( ) ] = None continue key , value = line if key [ : 4 ] == 'RGB ' : section . extend ( int ( rgb ) for rgb in value . split ( ) ) else : section [ key . strip ( ) ] = astype ( value . strip ( ) ) return result
Return metatata from FluoView image description as dict .
52,670
def pilatus_description_metadata ( description ) : result = { } if not description . startswith ( '# ' ) : return result for c in '#:=,()' : description = description . replace ( c , ' ' ) for line in description . split ( '\n' ) : if line [ : 2 ] != ' ' : continue line = line . split ( ) name = line [ 0 ] if line [ 0 ] not in TIFF . PILATUS_HEADER : try : result [ 'DateTime' ] = datetime . datetime . strptime ( ' ' . join ( line ) , '%Y-%m-%dT%H %M %S.%f' ) except Exception : result [ name ] = ' ' . join ( line [ 1 : ] ) continue indices , dtype = TIFF . PILATUS_HEADER [ line [ 0 ] ] if isinstance ( indices [ 0 ] , slice ) : values = line [ indices [ 0 ] ] else : values = [ line [ i ] for i in indices ] if dtype is float and values [ 0 ] == 'not' : values = [ 'NaN' ] values = tuple ( dtype ( v ) for v in values ) if dtype == str : values = ' ' . join ( values ) elif len ( values ) == 1 : values = values [ 0 ] result [ name ] = values return result
Return metatata from Pilatus image description as dict .
52,671
def svs_description_metadata ( description ) : if not description . startswith ( 'Aperio Image Library ' ) : raise ValueError ( 'invalid Aperio image description' ) result = { } lines = description . split ( '\n' ) key , value = lines [ 0 ] . strip ( ) . rsplit ( None , 1 ) result [ key . strip ( ) ] = value . strip ( ) if len ( lines ) == 1 : return result items = lines [ 1 ] . split ( '|' ) result [ '' ] = items [ 0 ] . strip ( ) for item in items [ 1 : ] : key , value = item . split ( ' = ' ) result [ key . strip ( ) ] = astype ( value . strip ( ) ) return result
Return metatata from Aperio image description as dict .
52,672
def stk_description_metadata ( description ) : description = description . strip ( ) if not description : return [ ] try : description = bytes2str ( description ) except UnicodeDecodeError as exc : log . warning ( 'stk_description_metadata: %s: %s' , exc . __class__ . __name__ , exc ) return [ ] result = [ ] for plane in description . split ( '\x00' ) : d = { } for line in plane . split ( '\r\n' ) : line = line . split ( ':' , 1 ) if len ( line ) > 1 : name , value = line d [ name . strip ( ) ] = astype ( value . strip ( ) ) else : value = line [ 0 ] . strip ( ) if value : if '' in d : d [ '' ] . append ( value ) else : d [ '' ] = [ value ] result . append ( d ) return result
Return metadata from MetaMorph image description as list of dict .
52,673
def metaseries_description_metadata ( description ) : if not description . startswith ( '<MetaData>' ) : raise ValueError ( 'invalid MetaSeries image description' ) from xml . etree import cElementTree as etree root = etree . fromstring ( description ) types = { 'float' : float , 'int' : int , 'bool' : lambda x : asbool ( x , 'on' , 'off' ) } def parse ( root , result ) : for child in root : attrib = child . attrib if not attrib : result [ child . tag ] = parse ( child , { } ) continue if 'id' in attrib : i = attrib [ 'id' ] t = attrib [ 'type' ] v = attrib [ 'value' ] if t in types : result [ i ] = types [ t ] ( v ) else : result [ i ] = v return result adict = parse ( root , { } ) if 'Description' in adict : adict [ 'Description' ] = adict [ 'Description' ] . replace ( '&#13;&#10;' , '\n' ) return adict
Return metatata from MetaSeries image description as dict .
52,674
def scanimage_artist_metadata ( artist ) : try : return json . loads ( artist ) except ValueError as exc : log . warning ( 'scanimage_artist_metadata: %s: %s' , exc . __class__ . __name__ , exc )
Return metatata from ScanImage artist tag as dict .
52,675
def tile_decode ( tile , tileindex , tileshape , tiledshape , lsb2msb , decompress , unpack , unpredict , out ) : _ , imagedepth , imagelength , imagewidth , _ = out . shape tileddepth , tiledlength , tiledwidth = tiledshape tiledepth , tilelength , tilewidth , samples = tileshape tilesize = tiledepth * tilelength * tilewidth * samples pl = tileindex // ( tiledwidth * tiledlength * tileddepth ) td = ( tileindex // ( tiledwidth * tiledlength ) ) % tileddepth * tiledepth tl = ( tileindex // tiledwidth ) % tiledlength * tilelength tw = tileindex % tiledwidth * tilewidth if tile : if lsb2msb : tile = bitorder_decode ( tile , out = tile ) tile = decompress ( tile ) tile = unpack ( tile ) tile = tile [ : tilesize ] try : tile . shape = tileshape except ValueError : s = ( min ( imagedepth - td , tiledepth ) , min ( imagelength - tl , tilelength ) , min ( imagewidth - tw , tilewidth ) , samples ) try : tile . shape = s except ValueError : log . warning ( 'tile_decode: incomplete tile %s %s' , tile . shape , tileshape ) t = numpy . zeros ( tilesize , tile . dtype ) s = min ( tile . size , tilesize ) t [ : s ] = tile [ : s ] tile = t . reshape ( tileshape ) tile = unpredict ( tile , axis = - 2 , out = tile ) out [ pl , td : td + tiledepth , tl : tl + tilelength , tw : tw + tilewidth ] = ( tile [ : imagedepth - td , : imagelength - tl , : imagewidth - tw ] ) else : out [ pl , td : td + tiledepth , tl : tl + tilelength , tw : tw + tilewidth ] = 0
Decode tile segment bytes into 5D output array .
52,676
def unpack_rgb ( data , dtype = None , bitspersample = None , rescale = True ) : if bitspersample is None : bitspersample = ( 5 , 6 , 5 ) if dtype is None : dtype = '<B' dtype = numpy . dtype ( dtype ) bits = int ( numpy . sum ( bitspersample ) ) if not ( bits <= 32 and all ( i <= dtype . itemsize * 8 for i in bitspersample ) ) : raise ValueError ( 'sample size not supported: %s' % str ( bitspersample ) ) dt = next ( i for i in 'BHI' if numpy . dtype ( i ) . itemsize * 8 >= bits ) data = numpy . frombuffer ( data , dtype . byteorder + dt ) result = numpy . empty ( ( data . size , len ( bitspersample ) ) , dtype . char ) for i , bps in enumerate ( bitspersample ) : t = data >> int ( numpy . sum ( bitspersample [ i + 1 : ] ) ) t &= int ( '0b' + '1' * bps , 2 ) if rescale : o = ( ( dtype . itemsize * 8 ) // bps + 1 ) * bps if o > data . dtype . itemsize * 8 : t = t . astype ( 'I' ) t *= ( 2 ** o - 1 ) // ( 2 ** bps - 1 ) t //= 2 ** ( o - ( dtype . itemsize * 8 ) ) result [ : , i ] = t return result . reshape ( - 1 )
Return array from byte string containing packed samples .
52,677
def delta_encode ( data , axis = - 1 , out = None ) : if isinstance ( data , ( bytes , bytearray ) ) : data = numpy . frombuffer ( data , dtype = 'u1' ) diff = numpy . diff ( data , axis = 0 ) return numpy . insert ( diff , 0 , data [ 0 ] ) . tobytes ( ) dtype = data . dtype if dtype . kind == 'f' : data = data . view ( 'u%i' % dtype . itemsize ) diff = numpy . diff ( data , axis = axis ) key = [ slice ( None ) ] * data . ndim key [ axis ] = 0 diff = numpy . insert ( diff , 0 , data [ tuple ( key ) ] , axis = axis ) if dtype . kind == 'f' : return diff . view ( dtype ) return diff
Encode Delta .
52,678
def delta_decode ( data , axis = - 1 , out = None ) : if out is not None and not out . flags . writeable : out = None if isinstance ( data , ( bytes , bytearray ) ) : data = numpy . frombuffer ( data , dtype = 'u1' ) return numpy . cumsum ( data , axis = 0 , dtype = 'u1' , out = out ) . tobytes ( ) if data . dtype . kind == 'f' : view = data . view ( 'u%i' % data . dtype . itemsize ) view = numpy . cumsum ( view , axis = axis , dtype = view . dtype ) return view . view ( data . dtype ) return numpy . cumsum ( data , axis = axis , dtype = data . dtype , out = out )
Decode Delta .
52,679
def bitorder_decode ( data , out = None , _bitorder = [ ] ) : if not _bitorder : _bitorder . append ( b'\x00\x80@\xc0 \xa0`\xe0\x10\x90P\xd00\xb0p\xf0\x08\x88H\xc8(' b'\xa8h\xe8\x18\x98X\xd88\xb8x\xf8\x04\x84D\xc4$\xa4d\xe4\x14' b'\x94T\xd44\xb4t\xf4\x0c\x8cL\xcc,\xacl\xec\x1c\x9c\\\xdc<\xbc|' b'\xfc\x02\x82B\xc2"\xa2b\xe2\x12\x92R\xd22\xb2r\xf2\n\x8aJ\xca*' b'\xaaj\xea\x1a\x9aZ\xda:\xbaz\xfa\x06\x86F\xc6&\xa6f\xe6\x16' b'\x96V\xd66\xb6v\xf6\x0e\x8eN\xce.\xaen\xee\x1e\x9e^\xde>\xbe~' b'\xfe\x01\x81A\xc1!\xa1a\xe1\x11\x91Q\xd11\xb1q\xf1\t\x89I\xc9)' b'\xa9i\xe9\x19\x99Y\xd99\xb9y\xf9\x05\x85E\xc5%\xa5e\xe5\x15' b'\x95U\xd55\xb5u\xf5\r\x8dM\xcd-\xadm\xed\x1d\x9d]\xdd=\xbd}' b'\xfd\x03\x83C\xc3#\xa3c\xe3\x13\x93S\xd33\xb3s\xf3\x0b\x8bK' b'\xcb+\xabk\xeb\x1b\x9b[\xdb;\xbb{\xfb\x07\x87G\xc7\'\xa7g\xe7' b'\x17\x97W\xd77\xb7w\xf7\x0f\x8fO\xcf/\xafo\xef\x1f\x9f_' b'\xdf?\xbf\x7f\xff' ) _bitorder . append ( numpy . frombuffer ( _bitorder [ 0 ] , dtype = 'uint8' ) ) try : view = data . view ( 'uint8' ) numpy . take ( _bitorder [ 1 ] , view , out = view ) return data except AttributeError : return data . translate ( _bitorder [ 0 ] ) except ValueError : raise NotImplementedError ( 'slices of arrays not supported' ) return None
Reverse bits in each byte of byte string or numpy array .
52,680
def packints_decode ( data , dtype , numbits , runlen = 0 , out = None ) : if numbits == 1 : data = numpy . frombuffer ( data , '|B' ) data = numpy . unpackbits ( data ) if runlen % 8 : data = data . reshape ( - 1 , runlen + ( 8 - runlen % 8 ) ) data = data [ : , : runlen ] . reshape ( - 1 ) return data . astype ( dtype ) if numbits in ( 8 , 16 , 32 , 64 ) : return numpy . frombuffer ( data , dtype ) raise NotImplementedError ( 'unpacking %s-bit integers to %s not supported' % ( numbits , numpy . dtype ( dtype ) ) )
Decompress byte string to array of integers .
52,681
def apply_colormap ( image , colormap , contig = True ) : image = numpy . take ( colormap , image , axis = 1 ) image = numpy . rollaxis ( image , 0 , image . ndim ) if contig : image = numpy . ascontiguousarray ( image ) return image
Return palette - colored image .
52,682
def repeat_nd ( a , repeats ) : a = numpy . asarray ( a ) reshape = [ ] shape = [ ] strides = [ ] for i , j , k in zip ( a . strides , a . shape , repeats ) : shape . extend ( ( j , k ) ) strides . extend ( ( i , 0 ) ) reshape . append ( j * k ) return numpy . lib . stride_tricks . as_strided ( a , shape , strides , writeable = False ) . reshape ( reshape )
Return read - only view into input array with elements repeated .
52,683
def reshape_nd ( data_or_shape , ndim ) : is_shape = isinstance ( data_or_shape , tuple ) shape = data_or_shape if is_shape else data_or_shape . shape if len ( shape ) >= ndim : return data_or_shape shape = ( 1 , ) * ( ndim - len ( shape ) ) + shape return shape if is_shape else data_or_shape . reshape ( shape )
Return image array or shape with at least ndim dimensions .
52,684
def squeeze_axes ( shape , axes , skip = None ) : if len ( shape ) != len ( axes ) : raise ValueError ( 'dimensions of axes and shape do not match' ) if skip is None : skip = 'XY' shape , axes = zip ( * ( i for i in zip ( shape , axes ) if i [ 0 ] > 1 or i [ 1 ] in skip ) ) return tuple ( shape ) , '' . join ( axes )
Return shape and axes with single - dimensional entries removed .
52,685
def transpose_axes ( image , axes , asaxes = None ) : for ax in axes : if ax not in asaxes : raise ValueError ( 'unknown axis %s' % ax ) if asaxes is None : asaxes = 'CTZYX' shape = image . shape for ax in reversed ( asaxes ) : if ax not in axes : axes = ax + axes shape = ( 1 , ) + shape image = image . reshape ( shape ) image = image . transpose ( [ axes . index ( ax ) for ax in asaxes ] ) return image
Return image with its axes permuted to match specified axes .
52,686
def reshape_axes ( axes , shape , newshape , unknown = None ) : shape = tuple ( shape ) newshape = tuple ( newshape ) if len ( axes ) != len ( shape ) : raise ValueError ( 'axes do not match shape' ) size = product ( shape ) newsize = product ( newshape ) if size != newsize : raise ValueError ( 'cannot reshape %s to %s' % ( shape , newshape ) ) if not axes or not newshape : return '' lendiff = max ( 0 , len ( shape ) - len ( newshape ) ) if lendiff : newshape = newshape + ( 1 , ) * lendiff i = len ( shape ) - 1 prodns = 1 prods = 1 result = [ ] for ns in newshape [ : : - 1 ] : prodns *= ns while i > 0 and shape [ i ] == 1 and ns != 1 : i -= 1 if ns == shape [ i ] and prodns == prods * shape [ i ] : prods *= shape [ i ] result . append ( axes [ i ] ) i -= 1 elif unknown : result . append ( unknown ) else : unknown = 'Q' result . append ( unknown ) return '' . join ( reversed ( result [ lendiff : ] ) )
Return axes matching new shape .
52,687
def stack_pages ( pages , out = None , maxworkers = None , ** kwargs ) : npages = len ( pages ) if npages == 0 : raise ValueError ( 'no pages' ) if npages == 1 : kwargs [ 'maxworkers' ] = maxworkers return pages [ 0 ] . asarray ( out = out , ** kwargs ) page0 = next ( p for p in pages if p is not None ) . keyframe page0 . asarray ( validate = None ) shape = ( npages , ) + page0 . shape dtype = page0 . dtype out = create_output ( out , shape , dtype ) if maxworkers is None : if page0 . compression > 1 : if page0 . is_tiled : maxworkers = 1 kwargs [ 'maxworkers' ] = 0 else : maxworkers = 0 else : maxworkers = 1 if maxworkers == 0 : import multiprocessing maxworkers = multiprocessing . cpu_count ( ) // 2 if maxworkers > 1 : kwargs [ 'maxworkers' ] = 1 page0 . parent . filehandle . lock = maxworkers > 1 filecache = OpenFileCache ( size = max ( 4 , maxworkers ) , lock = page0 . parent . filehandle . lock ) def func ( page , index , out = out , filecache = filecache , kwargs = kwargs ) : if page is not None : filecache . open ( page . parent . filehandle ) out [ index ] = page . asarray ( lock = filecache . lock , reopen = False , validate = False , ** kwargs ) filecache . close ( page . parent . filehandle ) if maxworkers < 2 : for i , page in enumerate ( pages ) : func ( page , i ) else : with ThreadPoolExecutor ( maxworkers ) as executor : executor . map ( func , pages , range ( npages ) ) filecache . clear ( ) page0 . parent . filehandle . lock = None return out
Read data from sequence of TiffPage and stack them vertically .
52,688
def clean_offsetscounts ( offsets , counts ) : offsets = list ( offsets ) counts = list ( counts ) size = len ( offsets ) if size != len ( counts ) : raise ValueError ( 'StripOffsets and StripByteCounts mismatch' ) j = 0 for i , ( o , b ) in enumerate ( zip ( offsets , counts ) ) : if b > 0 : if o > 0 : if i > j : offsets [ j ] = o counts [ j ] = b j += 1 continue raise ValueError ( 'invalid offset' ) log . warning ( 'clean_offsetscounts: empty bytecount' ) if size == len ( offsets ) : return offsets , counts if j == 0 : return [ offsets [ 0 ] ] , [ counts [ 0 ] ] return offsets [ : j ] , counts [ : j ]
Return cleaned offsets and byte counts .
52,689
def buffered_read ( fh , lock , offsets , bytecounts , buffersize = None ) : if buffersize is None : buffersize = 2 ** 26 length = len ( offsets ) i = 0 while i < length : data = [ ] with lock : size = 0 while size < buffersize and i < length : fh . seek ( offsets [ i ] ) bytecount = bytecounts [ i ] data . append ( fh . read ( bytecount ) ) size += bytecount i += 1 for segment in data : yield segment
Return iterator over segments read from file .
52,690
def create_output ( out , shape , dtype , mode = 'w+' , suffix = None ) : if out is None : return numpy . zeros ( shape , dtype ) if isinstance ( out , str ) and out [ : 6 ] == 'memmap' : import tempfile tempdir = out [ 7 : ] if len ( out ) > 7 else None if suffix is None : suffix = '.memmap' with tempfile . NamedTemporaryFile ( dir = tempdir , suffix = suffix ) as fh : return numpy . memmap ( fh , shape = shape , dtype = dtype , mode = mode ) if isinstance ( out , numpy . ndarray ) : if product ( shape ) != product ( out . shape ) : raise ValueError ( 'incompatible output shape' ) if not numpy . can_cast ( dtype , out . dtype ) : raise ValueError ( 'incompatible output dtype' ) return out . reshape ( shape ) if isinstance ( out , pathlib . Path ) : out = str ( out ) return numpy . memmap ( out , shape = shape , dtype = dtype , mode = mode )
Return numpy array where image data of shape and dtype can be copied .
52,691
def stripascii ( string ) : i = len ( string ) while i : i -= 1 if 8 < byte2int ( string [ i ] ) < 127 : break else : i = - 1 return string [ : i + 1 ]
Return string truncated at last byte that is 7 - bit ASCII .
52,692
def asbool ( value , true = ( b'true' , u'true' ) , false = ( b'false' , u'false' ) ) : value = value . strip ( ) . lower ( ) if value in true : return True if value in false : return False raise TypeError ( )
Return string as bool if possible else raise TypeError .
52,693
def astype ( value , types = None ) : if types is None : types = int , float , asbool , bytes2str for typ in types : try : return typ ( value ) except ( ValueError , AttributeError , TypeError , UnicodeEncodeError ) : pass return value
Return argument as one of types if possible .
52,694
def format_size ( size , threshold = 1536 ) : if size < threshold : return "%i B" % size for unit in ( 'KiB' , 'MiB' , 'GiB' , 'TiB' , 'PiB' ) : size /= 1024.0 if size < threshold : return "%.2f %s" % ( size , unit ) return 'ginormous'
Return file size as string from byte size .
52,695
def natural_sorted ( iterable ) : def sortkey ( x ) : return [ ( int ( c ) if c . isdigit ( ) else c ) for c in re . split ( numbers , x ) ] numbers = re . compile ( r'(\d+)' ) return sorted ( iterable , key = sortkey )
Return human sorted list of strings .
52,696
def byteorder_isnative ( byteorder ) : if byteorder in ( '=' , sys . byteorder ) : return True keys = { 'big' : '>' , 'little' : '<' } return keys . get ( byteorder , byteorder ) == keys [ sys . byteorder ]
Return if byteorder matches the system s byteorder .
52,697
def recarray2dict ( recarray ) : result = { } for descr , value in zip ( recarray . dtype . descr , recarray ) : name , dtype = descr [ : 2 ] if dtype [ 1 ] == 'S' : value = bytes2str ( stripnull ( value ) ) elif value . ndim < 2 : value = value . tolist ( ) result [ name ] = value return result
Return numpy . recarray as dict .
52,698
def xml2dict ( xml , sanitize = True , prefix = None ) : from xml . etree import cElementTree as etree at = tx = '' if prefix : at , tx = prefix def astype ( value ) : for t in ( int , float , asbool ) : try : return t ( value ) except Exception : pass return value def etree2dict ( t ) : key = t . tag if sanitize : key = key . rsplit ( '}' , 1 ) [ - 1 ] d = { key : { } if t . attrib else None } children = list ( t ) if children : dd = collections . defaultdict ( list ) for dc in map ( etree2dict , children ) : for k , v in dc . items ( ) : dd [ k ] . append ( astype ( v ) ) d = { key : { k : astype ( v [ 0 ] ) if len ( v ) == 1 else astype ( v ) for k , v in dd . items ( ) } } if t . attrib : d [ key ] . update ( ( at + k , astype ( v ) ) for k , v in t . attrib . items ( ) ) if t . text : text = t . text . strip ( ) if children or t . attrib : if text : d [ key ] [ tx + 'value' ] = astype ( text ) else : d [ key ] = astype ( text ) return d return etree2dict ( etree . fromstring ( xml ) )
Return XML as dict .
52,699
def isprintable ( string ) : string = string . strip ( ) if not string : return True if sys . version_info [ 0 ] == 3 : try : return string . isprintable ( ) except Exception : pass try : return string . decode ( 'utf-8' ) . isprintable ( ) except Exception : pass else : if string . isalnum ( ) : return True printable = ( '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST' 'UVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c' ) return all ( c in printable for c in string )
Return if all characters in string are printable .