idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
52,600 | def tournament ( self , individuals , tourn_size , num_selections = None ) : winners = [ ] locs = [ ] if num_selections is None : num_selections = len ( individuals ) for i in np . arange ( num_selections ) : pool_i = self . random_state . choice ( len ( individuals ) , size = tourn_size ) pool = [ ] for i in pool_i : ... | conducts tournament selection of size tourn_size |
52,601 | def lexicase ( self , F , num_selections = None , survival = False ) : if num_selections is None : num_selections = F . shape [ 0 ] winners = [ ] locs = [ ] individual_locs = np . arange ( F . shape [ 0 ] ) for i in np . arange ( num_selections ) : can_locs = individual_locs cases = list ( np . arange ( F . shape [ 1 ]... | conducts lexicase selection for de - aggregated fitness vectors |
52,602 | def epsilon_lexicase ( self , F , sizes , num_selections = None , survival = False ) : if num_selections is None : num_selections = F . shape [ 0 ] if self . c : locs = np . empty ( num_selections , dtype = 'int32' , order = 'F' ) if self . lex_size : ep_lex ( F , F . shape [ 0 ] , F . shape [ 1 ] , num_selections , lo... | conducts epsilon lexicase selection for de - aggregated fitness vectors |
52,603 | def mad ( self , x , axis = None ) : return np . median ( np . abs ( x - np . median ( x , axis ) ) , axis ) | median absolute deviation statistic |
52,604 | def cross ( self , p_i , p_j , max_depth = 2 ) : types_p_i = [ t for t in [ p . out_type for p in p_i ] ] types_p_j = [ t for t in [ p . out_type for p in p_j ] ] types = set ( types_p_i ) . intersection ( types_p_j ) p_i_sub = [ i for i , n in enumerate ( p_i ) if n . out_type in types ] x_i_end = self . random_state ... | subtree - like swap crossover between programs p_i and p_j . |
52,605 | def mutate ( self , p_i , func_set , term_set ) : self . point_mutate ( p_i , func_set , term_set ) | point mutation addition removal |
52,606 | def point_mutate ( self , p_i , func_set , term_set ) : x = self . random_state . randint ( len ( p_i ) ) arity = p_i [ x ] . arity [ p_i [ x ] . in_type ] reps = [ n for n in func_set + term_set if n . arity [ n . in_type ] == arity and n . out_type == p_i [ x ] . out_type and n . in_type == p_i [ x ] . in_type ] tmp ... | point mutation on individual p_i |
52,607 | def is_valid_program ( self , p ) : arities = list ( a . arity [ a . in_type ] for a in p ) accu_arities = list ( accumulate ( arities ) ) accu_len = list ( np . arange ( len ( p ) ) + 1 ) check = list ( a < b for a , b in zip ( accu_arities , accu_len ) ) return all ( check ) and sum ( a . arity [ a . in_type ] for a ... | checks whether program p makes a syntactically valid tree . |
52,608 | def run_MDR ( n , stack_float , labels = None ) : x1 = stack_float . pop ( ) x2 = stack_float . pop ( ) if len ( np . unique ( x1 ) ) <= 3 and len ( np . unique ( x2 ) ) <= 3 : tmp = np . vstack ( ( x1 , x2 ) ) . transpose ( ) if labels is None : return n . model . transform ( tmp ) [ : , 0 ] else : out = n . model . f... | run utility function for MDR nodes . |
52,609 | def stack_2_eqn ( self , p ) : stack_eqn = [ ] if p : for n in p . stack : self . eval_eqn ( n , stack_eqn ) return stack_eqn [ - 1 ] return [ ] | returns equation string for program stack |
52,610 | def stacks_2_eqns ( self , stacks ) : if stacks : return list ( map ( lambda p : self . stack_2_eqn ( p ) , stacks ) ) else : return [ ] | returns equation strings from stacks |
52,611 | def make_program ( self , stack , func_set , term_set , max_d , ntype ) : if max_d == 0 : ts = [ t for t in term_set if t . out_type == ntype ] if not ts : raise ValueError ( 'no ts. ntype:' + ntype + '. term_set out_types:' + ',' . join ( [ t . out_type for t in term_set ] ) ) stack . append ( ts [ self . random_state... | makes a program stack |
52,612 | def init_pop ( self ) : pop = Pop ( self . population_size ) seed_with_raw_features = False if self . seed_with_ml : if ( self . ml_type == 'SVC' or self . ml_type == 'SVR' ) : seed_with_raw_features = True elif ( hasattr ( self . pipeline . named_steps [ 'ml' ] , 'coef_' ) or hasattr ( self . pipeline . named_steps [ ... | initializes population of features as GP stacks . |
52,613 | def transform ( self , x , inds = None , labels = None ) : if inds : return np . asarray ( [ self . out ( I , x , labels , self . otype ) for I in inds ] ) . transpose ( ) elif self . _best_inds : return np . asarray ( [ self . out ( I , x , labels , self . otype ) for I in self . _best_inds ] ) . transpose ( ) else : ... | return a transformation of x using population outputs |
52,614 | def impute_data ( self , x ) : imp = Imputer ( missing_values = 'NaN' , strategy = 'mean' , axis = 0 ) return imp . fit_transform ( x ) | Imputes data set containing Nan values |
52,615 | def clean ( self , x ) : return x [ ~ np . any ( np . isnan ( x ) | np . isinf ( x ) , axis = 1 ) ] | remove nan and inf rows from x |
52,616 | def clean_with_zeros ( self , x ) : x [ ~ np . any ( np . isnan ( x ) | np . isinf ( x ) , axis = 1 ) ] = 0 return x | set nan and inf rows from x to zero |
52,617 | def predict ( self , testing_features ) : if self . clean : testing_features = self . impute_data ( testing_features ) if self . _best_inds : X_transform = self . transform ( testing_features ) try : return self . _best_estimator . predict ( self . transform ( testing_features ) ) except ValueError as detail : print ( ... | predict on a holdout data set . |
52,618 | def fit_predict ( self , features , labels ) : self . fit ( features , labels ) return self . predict ( features ) | Convenience function that fits a pipeline then predicts on the provided features |
52,619 | def score ( self , testing_features , testing_labels ) : yhat = self . predict ( testing_features ) return self . scoring_function ( testing_labels , yhat ) | estimates accuracy on testing set |
52,620 | def export ( self , output_file_name ) : if self . _best_estimator is None : raise ValueError ( 'A model has not been optimized. Please call fit()' ' first.' ) with open ( output_file_name , 'w' ) as output_file : output_file . write ( self . print_model ( ) ) if 'DecisionTree' in self . ml_type : export_graphviz ( sel... | exports engineered features |
52,621 | def print_model ( self , sep = '\n' ) : model = '' if self . _best_inds : if self . ml_type == 'GridSearchCV' : ml = self . _best_estimator . named_steps [ 'ml' ] . best_estimator_ else : ml = self . _best_estimator . named_steps [ 'ml' ] if self . ml_type != 'SVC' and self . ml_type != 'SVR' : if hasattr ( ml , 'coef_... | prints model contained in best inds if ml has a coefficient property . otherwise prints the features generated by FEW . |
52,622 | def valid_loc ( self , F = None ) : if F is not None : return [ i for i , f in enumerate ( F ) if np . all ( f < self . max_fit ) and np . all ( f >= 0 ) ] else : return [ i for i , f in enumerate ( self . F ) if np . all ( f < self . max_fit ) and np . all ( f >= 0 ) ] | returns the indices of individuals with valid fitness . |
52,623 | def valid ( self , individuals = None , F = None ) : if F : valid_locs = self . valid_loc ( F ) else : valid_locs = self . valid_loc ( self . F ) if individuals : return [ ind for i , ind in enumerate ( individuals ) if i in valid_locs ] else : return [ ind for i , ind in enumerate ( self . pop . individuals ) if i in ... | returns the sublist of individuals with valid fitness . |
52,624 | def get_diversity ( self , X ) : feature_correlations = np . zeros ( X . shape [ 0 ] - 1 ) for i in np . arange ( 1 , X . shape [ 0 ] - 1 ) : feature_correlations [ i ] = max ( 0.0 , r2_score ( X [ 0 ] , X [ i ] ) ) self . diversity . append ( 1 - np . mean ( feature_correlations ) ) | compute mean diversity of individual outputs |
52,625 | def roc_auc_cv ( self , features , labels ) : if callable ( getattr ( self . ml , "decision_function" , None ) ) : return np . mean ( [ self . scoring_function ( labels [ test ] , self . pipeline . fit ( features [ train ] , labels [ train ] ) . decision_function ( features [ test ] ) ) for train , test in KFold ( ) . ... | returns an roc auc score depending on the underlying estimator . |
52,626 | def r2_score_vec ( y_true , y_pred ) : numerator = ( y_true - y_pred ) ** 2 denominator = ( y_true - np . average ( y_true ) ) ** 2 nonzero_denominator = denominator != 0 nonzero_numerator = numerator != 0 valid_score = nonzero_denominator & nonzero_numerator output_scores = np . ones ( [ y_true . shape [ 0 ] ] ) outpu... | returns non - aggregate version of r2 score . |
52,627 | def inertia ( X , y , samples = False ) : if samples : inertia = np . zeros ( y . shape ) for label in np . unique ( y ) : inertia [ y == label ] = ( X [ y == label ] - np . mean ( X [ y == label ] ) ) ** 2 else : inertia = 0 for i , label in enumerate ( np . unique ( y ) ) : inertia += np . sum ( ( X [ y == label ] - ... | return the within - class squared distance from the centroid |
52,628 | def separation ( X , y , samples = False ) : num_classes = len ( np . unique ( y ) ) total_dist = ( X . max ( ) - X . min ( ) ) ** 2 if samples : separation = np . zeros ( y . shape ) for label in np . unique ( y ) : for outsider in np . unique ( y [ y != label ] ) : separation [ y == label ] += ( X [ y == label ] - np... | return the sum of the between - class squared distance |
52,629 | def proper ( self , x ) : x [ x < 0 ] = self . max_fit x [ np . isnan ( x ) ] = self . max_fit x [ np . isinf ( x ) ] = self . max_fit return x | cleans fitness vector |
52,630 | def safe ( self , x ) : x [ np . isinf ( x ) ] = 1 x [ np . isnan ( x ) ] = 1 return x | removes nans and infs from outputs . |
52,631 | def evaluate ( self , n , features , stack_float , stack_bool , labels = None ) : np . seterr ( all = 'ignore' ) if len ( stack_float ) >= n . arity [ 'f' ] and len ( stack_bool ) >= n . arity [ 'b' ] : if n . out_type == 'f' : stack_float . append ( self . safe ( self . eval_dict [ n . name ] ( n , features , stack_fl... | evaluate node in program |
52,632 | def all_finite ( self , X ) : if ( X . dtype . char in np . typecodes [ 'AllFloat' ] and not np . isfinite ( np . asarray ( X , dtype = 'float32' ) . sum ( ) ) and not np . isfinite ( np . asarray ( X , dtype = 'float32' ) ) . all ( ) ) : return False return True | returns true if X is finite false otherwise |
52,633 | def out ( self , I , features , labels = None , otype = 'f' ) : stack_float = [ ] stack_bool = [ ] for n in I . stack : self . evaluate ( n , features , stack_float , stack_bool , labels ) if otype == 'f' : return ( stack_float [ - 1 ] if self . all_finite ( stack_float [ - 1 ] ) else np . zeros ( len ( features ) ) ) ... | computes the output for individual I |
52,634 | def imagedatadict_to_ndarray ( imdict ) : arr = imdict [ 'Data' ] im = None if isinstance ( arr , parse_dm3 . array . array ) : im = numpy . asarray ( arr , dtype = arr . typecode ) elif isinstance ( arr , parse_dm3 . structarray ) : t = tuple ( arr . typecodes ) im = numpy . frombuffer ( arr . raw_data , dtype = struc... | Converts the ImageData dictionary imdict to an nd image . |
52,635 | def ndarray_to_imagedatadict ( nparr ) : ret = { } dm_type = None for k , v in iter ( dm_image_dtypes . items ( ) ) : if v [ 1 ] == nparr . dtype . type : dm_type = k break if dm_type is None and nparr . dtype == numpy . uint8 and nparr . shape [ - 1 ] in ( 3 , 4 ) : ret [ "DataType" ] = 23 ret [ "PixelDepth" ] = 4 if ... | Convert the numpy array nparr into a suitable ImageList entry dictionary . Returns a dictionary with the appropriate Data DataType PixelDepth to be inserted into a dm3 tag dictionary and written to a file . |
52,636 | def parse_dm_header ( f , outdata = None ) : if outdata is not None : if verbose : print ( "write_dm_header start" , f . tell ( ) ) ver , file_size , endianness = 3 , - 1 , 1 put_into_file ( f , "> l l l" , ver , file_size , endianness ) start = f . tell ( ) parse_dm_tag_root ( f , outdata ) end = f . tell ( ) f . seek... | This is the start of the DM file . We check for some magic values and then treat the next entry as a tag_root |
52,637 | def imwrite ( file , data = None , shape = None , dtype = None , ** kwargs ) : tifargs = parse_kwargs ( kwargs , 'append' , 'bigtiff' , 'byteorder' , 'imagej' ) if data is None : dtype = numpy . dtype ( dtype ) size = product ( shape ) * dtype . itemsize byteorder = dtype . byteorder else : try : size = data . nbytes b... | Write numpy array to TIFF file . |
52,638 | def memmap ( filename , shape = None , dtype = None , page = None , series = 0 , mode = 'r+' , ** kwargs ) : if shape is not None and dtype is not None : kwargs . update ( data = None , shape = shape , dtype = dtype , returnoffset = True , align = TIFF . ALLOCATIONGRANULARITY ) result = imwrite ( filename , ** kwargs )... | Return memory - mapped numpy array stored in TIFF file . |
52,639 | def read_exif_ifd ( fh , byteorder , dtype , count , offsetsize ) : exif = read_tags ( fh , byteorder , offsetsize , TIFF . EXIF_TAGS , maxifds = 1 ) for name in ( 'ExifVersion' , 'FlashpixVersion' ) : try : exif [ name ] = bytes2str ( exif [ name ] ) except Exception : pass if 'UserComment' in exif : idcode = exif [ '... | Read EXIF tags from file and return as dict . |
52,640 | def read_gps_ifd ( fh , byteorder , dtype , count , offsetsize ) : return read_tags ( fh , byteorder , offsetsize , TIFF . GPS_TAGS , maxifds = 1 ) | Read GPS tags from file and return as dict . |
52,641 | def read_interoperability_ifd ( fh , byteorder , dtype , count , offsetsize ) : tag_names = { 1 : 'InteroperabilityIndex' } return read_tags ( fh , byteorder , offsetsize , tag_names , maxifds = 1 ) | Read Interoperability tags from file and return as dict . |
52,642 | def read_utf8 ( fh , byteorder , dtype , count , offsetsize ) : return fh . read ( count ) . decode ( 'utf-8' ) | Read tag data from file and return as unicode string . |
52,643 | def read_colormap ( fh , byteorder , dtype , count , offsetsize ) : cmap = fh . read_array ( byteorder + dtype [ - 1 ] , count ) cmap . shape = ( 3 , - 1 ) return cmap | Read ColorMap data from file and return as numpy array . |
52,644 | def read_json ( fh , byteorder , dtype , count , offsetsize ) : data = fh . read ( count ) try : return json . loads ( unicode ( stripnull ( data ) , 'utf-8' ) ) except ValueError : log . warning ( 'read_json: invalid JSON' ) | Read JSON tag data from file and return as object . |
52,645 | def read_mm_header ( fh , byteorder , dtype , count , offsetsize ) : mmh = fh . read_record ( TIFF . MM_HEADER , byteorder = byteorder ) mmh = recarray2dict ( mmh ) mmh [ 'Dimensions' ] = [ ( bytes2str ( d [ 0 ] ) . strip ( ) , d [ 1 ] , d [ 2 ] , d [ 3 ] , bytes2str ( d [ 4 ] ) . strip ( ) ) for d in mmh [ 'Dimensions... | Read FluoView mm_header tag from file and return as dict . |
52,646 | def read_uic1tag ( fh , byteorder , dtype , count , offsetsize , planecount = None ) : assert dtype in ( '2I' , '1I' ) and byteorder == '<' result = { } if dtype == '2I' : values = fh . read_array ( '<u4' , 2 * count ) . reshape ( count , 2 ) result = { 'ZDistance' : values [ : , 0 ] / values [ : , 1 ] } elif planecoun... | Read MetaMorph STK UIC1Tag from file and return as dict . |
52,647 | def read_uic2tag ( fh , byteorder , dtype , planecount , offsetsize ) : assert dtype == '2I' and byteorder == '<' values = fh . read_array ( '<u4' , 6 * planecount ) . reshape ( planecount , 6 ) return { 'ZDistance' : values [ : , 0 ] / values [ : , 1 ] , 'DateCreated' : values [ : , 2 ] , 'TimeCreated' : values [ : , ... | Read MetaMorph STK UIC2Tag from file and return as dict . |
52,648 | def read_uic4tag ( fh , byteorder , dtype , planecount , offsetsize ) : assert dtype == '1I' and byteorder == '<' result = { } while True : tagid = struct . unpack ( '<H' , fh . read ( 2 ) ) [ 0 ] if tagid == 0 : break name , value = read_uic_tag ( fh , tagid , planecount , offset = False ) result [ name ] = value retu... | Read MetaMorph STK UIC4Tag from file and return as dict . |
52,649 | def read_uic_image_property ( fh ) : size = struct . unpack ( 'B' , fh . read ( 1 ) ) [ 0 ] name = struct . unpack ( '%is' % size , fh . read ( size ) ) [ 0 ] [ : - 1 ] flags , prop = struct . unpack ( '<IB' , fh . read ( 5 ) ) if prop == 1 : value = struct . unpack ( 'II' , fh . read ( 8 ) ) value = value [ 0 ] / valu... | Read UIC ImagePropertyEx tag from file and return as dict . |
52,650 | def read_cz_lsminfo ( fh , byteorder , dtype , count , offsetsize ) : assert byteorder == '<' magic_number , structure_size = struct . unpack ( '<II' , fh . read ( 8 ) ) if magic_number not in ( 50350412 , 67127628 ) : raise ValueError ( 'invalid CZ_LSMINFO structure' ) fh . seek ( - 8 , 1 ) if structure_size < numpy .... | Read CZ_LSMINFO tag from file and return as dict . |
52,651 | def read_lsm_floatpairs ( fh ) : size = struct . unpack ( '<i' , fh . read ( 4 ) ) [ 0 ] return fh . read_array ( '<2f8' , count = size ) | Read LSM sequence of float pairs from file and return as list . |
52,652 | def read_lsm_positions ( fh ) : size = struct . unpack ( '<I' , fh . read ( 4 ) ) [ 0 ] return fh . read_array ( '<2f8' , count = size ) | Read LSM positions from file and return as list . |
52,653 | def read_lsm_channelcolors ( fh ) : result = { 'Mono' : False , 'Colors' : [ ] , 'ColorNames' : [ ] } pos = fh . tell ( ) ( size , ncolors , nnames , coffset , noffset , mono ) = struct . unpack ( '<IIIIII' , fh . read ( 24 ) ) if ncolors != nnames : log . warning ( 'read_lsm_channelcolors: invalid LSM ChannelColors st... | Read LSM ChannelColors structure from file and return as dict . |
52,654 | def read_lsm_scaninfo ( fh ) : block = { } blocks = [ block ] unpack = struct . unpack if struct . unpack ( '<I' , fh . read ( 4 ) ) [ 0 ] != 0x10000000 : log . warning ( 'read_lsm_scaninfo: invalid LSM ScanInfo structure' ) return block fh . read ( 8 ) while True : entry , dtype , size = unpack ( '<III' , fh . read ( ... | Read LSM ScanInfo structure from file and return as dict . |
52,655 | def read_sis ( fh , byteorder , dtype , count , offsetsize ) : result = { } ( magic , _ , minute , hour , day , month , year , _ , name , tagcount ) = struct . unpack ( '<4s6shhhhh6s32sh' , fh . read ( 60 ) ) if magic != b'SIS0' : raise ValueError ( 'invalid OlympusSIS structure' ) result [ 'name' ] = bytes2str ( strip... | Read OlympusSIS structure and return as dict . |
52,656 | def read_sis_ini ( fh , byteorder , dtype , count , offsetsize ) : inistr = fh . read ( count ) inistr = bytes2str ( stripnull ( inistr ) ) try : return olympusini_metadata ( inistr ) except Exception as exc : log . warning ( 'olympusini_metadata: %s: %s' , exc . __class__ . __name__ , exc ) return { } | Read OlympusSIS INI string and return as dict . |
52,657 | def read_tvips_header ( fh , byteorder , dtype , count , offsetsize ) : result = { } header = fh . read_record ( TIFF . TVIPS_HEADER_V1 , byteorder = byteorder ) for name , typestr in TIFF . TVIPS_HEADER_V1 : result [ name ] = header [ name ] . tolist ( ) if header [ 'Version' ] == 2 : header = fh . read_record ( TIFF ... | Read TVIPS EM - MENU headers and return as dict . |
52,658 | def read_cz_sem ( fh , byteorder , dtype , count , offsetsize ) : result = { '' : ( ) } key = None data = bytes2str ( stripnull ( fh . read ( count ) ) ) for line in data . splitlines ( ) : if line . isupper ( ) : key = line . lower ( ) elif key : try : name , value = line . split ( '=' ) except ValueError : try : name... | Read Zeiss SEM tag and return as dict . |
52,659 | def read_nih_image_header ( fh , byteorder , dtype , count , offsetsize ) : a = fh . read_record ( TIFF . NIH_IMAGE_HEADER , byteorder = byteorder ) a = a . newbyteorder ( byteorder ) a = recarray2dict ( a ) a [ 'XUnit' ] = a [ 'XUnit' ] [ : a [ 'XUnitSize' ] ] a [ 'UM' ] = a [ 'UM' ] [ : a [ 'UMsize' ] ] return a | Read NIH_IMAGE_HEADER tag from file and return as dict . |
52,660 | def read_scanimage_metadata ( fh ) : fh . seek ( 0 ) try : byteorder , version = struct . unpack ( '<2sH' , fh . read ( 4 ) ) if byteorder != b'II' or version != 43 : raise Exception fh . seek ( 16 ) magic , version , size0 , size1 = struct . unpack ( '<IIII' , fh . read ( 16 ) ) if magic != 117637889 or version != 3 :... | Read ScanImage BigTIFF v3 static and ROI metadata from open file . |
52,661 | def read_micromanager_metadata ( fh ) : fh . seek ( 0 ) try : byteorder = { b'II' : '<' , b'MM' : '>' } [ fh . read ( 2 ) ] except IndexError : raise ValueError ( 'not a MicroManager TIFF file' ) result = { } fh . seek ( 8 ) ( index_header , index_offset , display_header , display_offset , comments_header , comments_of... | Read MicroManager non - TIFF settings from open file and return as dict . |
52,662 | def imagej_metadata_tag ( metadata , byteorder ) : header = [ { '>' : b'IJIJ' , '<' : b'JIJI' } [ byteorder ] ] bytecounts = [ 0 ] body = [ ] def _string ( data , byteorder ) : return data . encode ( 'utf-16' + { '>' : 'be' , '<' : 'le' } [ byteorder ] ) def _doubles ( data , byteorder ) : return struct . pack ( byteor... | Return IJMetadata and IJMetadataByteCounts tags from metadata dict . |
52,663 | def imagej_metadata ( data , bytecounts , byteorder ) : def _string ( data , byteorder ) : return data . decode ( 'utf-16' + { '>' : 'be' , '<' : 'le' } [ byteorder ] ) def _doubles ( data , byteorder ) : return struct . unpack ( byteorder + ( 'd' * ( len ( data ) // 8 ) ) , data ) def _lut ( data , byteorder ) : retur... | Return IJMetadata tag value as dict . |
52,664 | def imagej_description_metadata ( description ) : def _bool ( val ) : return { 'true' : True , 'false' : False } [ val . lower ( ) ] result = { } for line in description . splitlines ( ) : try : key , val = line . split ( '=' ) except Exception : continue key = key . strip ( ) val = val . strip ( ) for dtype in ( int ,... | Return metatata from ImageJ image description as dict . |
52,665 | def imagej_description ( shape , rgb = None , colormaped = False , version = None , hyperstack = None , mode = None , loop = None , ** kwargs ) : if colormaped : raise NotImplementedError ( 'ImageJ colormapping not supported' ) if version is None : version = '1.11a' shape = imagej_shape ( shape , rgb = rgb ) rgb = shap... | Return ImageJ image description from data shape . |
52,666 | def imagej_shape ( shape , rgb = None ) : shape = tuple ( int ( i ) for i in shape ) ndim = len ( shape ) if 1 > ndim > 6 : raise ValueError ( 'invalid ImageJ hyperstack: not 2 to 6 dimensional' ) if rgb is None : rgb = shape [ - 1 ] in ( 3 , 4 ) and ndim > 2 if rgb and shape [ - 1 ] not in ( 3 , 4 ) : raise ValueError... | Return shape normalized to 6D ImageJ hyperstack TZCYXS . |
52,667 | def json_description ( shape , ** metadata ) : metadata . update ( shape = shape ) return json . dumps ( metadata ) | Return JSON image description from data shape and other metadata . |
52,668 | def json_description_metadata ( description ) : if description [ : 6 ] == 'shape=' : shape = tuple ( int ( i ) for i in description [ 7 : - 1 ] . split ( ',' ) ) return dict ( shape = shape ) if description [ : 1 ] == '{' and description [ - 1 : ] == '}' : return json . loads ( description ) raise ValueError ( 'invalid... | Return metatata from JSON formated image description as dict . |
52,669 | def fluoview_description_metadata ( description , ignoresections = None ) : if not description . startswith ( '[' ) : raise ValueError ( 'invalid FluoView image description' ) if ignoresections is None : ignoresections = { 'Region Info (Fields)' , 'Protocol Description' } result = { } sections = [ result ] comment = Fa... | Return metatata from FluoView image description as dict . |
52,670 | def pilatus_description_metadata ( description ) : result = { } if not description . startswith ( '# ' ) : return result for c in '#:=,()' : description = description . replace ( c , ' ' ) for line in description . split ( '\n' ) : if line [ : 2 ] != ' ' : continue line = line . split ( ) name = line [ 0 ] if line [ 0... | Return metatata from Pilatus image description as dict . |
52,671 | def svs_description_metadata ( description ) : if not description . startswith ( 'Aperio Image Library ' ) : raise ValueError ( 'invalid Aperio image description' ) result = { } lines = description . split ( '\n' ) key , value = lines [ 0 ] . strip ( ) . rsplit ( None , 1 ) result [ key . strip ( ) ] = value . strip ( ... | Return metatata from Aperio image description as dict . |
52,672 | def stk_description_metadata ( description ) : description = description . strip ( ) if not description : return [ ] try : description = bytes2str ( description ) except UnicodeDecodeError as exc : log . warning ( 'stk_description_metadata: %s: %s' , exc . __class__ . __name__ , exc ) return [ ] result = [ ] for plane ... | Return metadata from MetaMorph image description as list of dict . |
52,673 | def metaseries_description_metadata ( description ) : if not description . startswith ( '<MetaData>' ) : raise ValueError ( 'invalid MetaSeries image description' ) from xml . etree import cElementTree as etree root = etree . fromstring ( description ) types = { 'float' : float , 'int' : int , 'bool' : lambda x : asboo... | Return metatata from MetaSeries image description as dict . |
52,674 | def scanimage_artist_metadata ( artist ) : try : return json . loads ( artist ) except ValueError as exc : log . warning ( 'scanimage_artist_metadata: %s: %s' , exc . __class__ . __name__ , exc ) | Return metatata from ScanImage artist tag as dict . |
52,675 | def tile_decode ( tile , tileindex , tileshape , tiledshape , lsb2msb , decompress , unpack , unpredict , out ) : _ , imagedepth , imagelength , imagewidth , _ = out . shape tileddepth , tiledlength , tiledwidth = tiledshape tiledepth , tilelength , tilewidth , samples = tileshape tilesize = tiledepth * tilelength * ti... | Decode tile segment bytes into 5D output array . |
52,676 | def unpack_rgb ( data , dtype = None , bitspersample = None , rescale = True ) : if bitspersample is None : bitspersample = ( 5 , 6 , 5 ) if dtype is None : dtype = '<B' dtype = numpy . dtype ( dtype ) bits = int ( numpy . sum ( bitspersample ) ) if not ( bits <= 32 and all ( i <= dtype . itemsize * 8 for i in bitspers... | Return array from byte string containing packed samples . |
52,677 | def delta_encode ( data , axis = - 1 , out = None ) : if isinstance ( data , ( bytes , bytearray ) ) : data = numpy . frombuffer ( data , dtype = 'u1' ) diff = numpy . diff ( data , axis = 0 ) return numpy . insert ( diff , 0 , data [ 0 ] ) . tobytes ( ) dtype = data . dtype if dtype . kind == 'f' : data = data . view ... | Encode Delta . |
52,678 | def delta_decode ( data , axis = - 1 , out = None ) : if out is not None and not out . flags . writeable : out = None if isinstance ( data , ( bytes , bytearray ) ) : data = numpy . frombuffer ( data , dtype = 'u1' ) return numpy . cumsum ( data , axis = 0 , dtype = 'u1' , out = out ) . tobytes ( ) if data . dtype . ki... | Decode Delta . |
52,679 | def bitorder_decode ( data , out = None , _bitorder = [ ] ) : if not _bitorder : _bitorder . append ( b'\x00\x80@\xc0 \xa0`\xe0\x10\x90P\xd00\xb0p\xf0\x08\x88H\xc8(' b'\xa8h\xe8\x18\x98X\xd88\xb8x\xf8\x04\x84D\xc4$\xa4d\xe4\x14' b'\x94T\xd44\xb4t\xf4\x0c\x8cL\xcc,\xacl\xec\x1c\x9c\\\xdc<\xbc|' b'\xfc\x02\x82B\xc2"\xa2b... | Reverse bits in each byte of byte string or numpy array . |
52,680 | def packints_decode ( data , dtype , numbits , runlen = 0 , out = None ) : if numbits == 1 : data = numpy . frombuffer ( data , '|B' ) data = numpy . unpackbits ( data ) if runlen % 8 : data = data . reshape ( - 1 , runlen + ( 8 - runlen % 8 ) ) data = data [ : , : runlen ] . reshape ( - 1 ) return data . astype ( dtyp... | Decompress byte string to array of integers . |
52,681 | def apply_colormap ( image , colormap , contig = True ) : image = numpy . take ( colormap , image , axis = 1 ) image = numpy . rollaxis ( image , 0 , image . ndim ) if contig : image = numpy . ascontiguousarray ( image ) return image | Return palette - colored image . |
52,682 | def repeat_nd ( a , repeats ) : a = numpy . asarray ( a ) reshape = [ ] shape = [ ] strides = [ ] for i , j , k in zip ( a . strides , a . shape , repeats ) : shape . extend ( ( j , k ) ) strides . extend ( ( i , 0 ) ) reshape . append ( j * k ) return numpy . lib . stride_tricks . as_strided ( a , shape , strides , wr... | Return read - only view into input array with elements repeated . |
52,683 | def reshape_nd ( data_or_shape , ndim ) : is_shape = isinstance ( data_or_shape , tuple ) shape = data_or_shape if is_shape else data_or_shape . shape if len ( shape ) >= ndim : return data_or_shape shape = ( 1 , ) * ( ndim - len ( shape ) ) + shape return shape if is_shape else data_or_shape . reshape ( shape ) | Return image array or shape with at least ndim dimensions . |
52,684 | def squeeze_axes ( shape , axes , skip = None ) : if len ( shape ) != len ( axes ) : raise ValueError ( 'dimensions of axes and shape do not match' ) if skip is None : skip = 'XY' shape , axes = zip ( * ( i for i in zip ( shape , axes ) if i [ 0 ] > 1 or i [ 1 ] in skip ) ) return tuple ( shape ) , '' . join ( axes ) | Return shape and axes with single - dimensional entries removed . |
52,685 | def transpose_axes ( image , axes , asaxes = None ) : for ax in axes : if ax not in asaxes : raise ValueError ( 'unknown axis %s' % ax ) if asaxes is None : asaxes = 'CTZYX' shape = image . shape for ax in reversed ( asaxes ) : if ax not in axes : axes = ax + axes shape = ( 1 , ) + shape image = image . reshape ( shape... | Return image with its axes permuted to match specified axes . |
52,686 | def reshape_axes ( axes , shape , newshape , unknown = None ) : shape = tuple ( shape ) newshape = tuple ( newshape ) if len ( axes ) != len ( shape ) : raise ValueError ( 'axes do not match shape' ) size = product ( shape ) newsize = product ( newshape ) if size != newsize : raise ValueError ( 'cannot reshape %s to %s... | Return axes matching new shape . |
52,687 | def stack_pages ( pages , out = None , maxworkers = None , ** kwargs ) : npages = len ( pages ) if npages == 0 : raise ValueError ( 'no pages' ) if npages == 1 : kwargs [ 'maxworkers' ] = maxworkers return pages [ 0 ] . asarray ( out = out , ** kwargs ) page0 = next ( p for p in pages if p is not None ) . keyframe page... | Read data from sequence of TiffPage and stack them vertically . |
52,688 | def clean_offsetscounts ( offsets , counts ) : offsets = list ( offsets ) counts = list ( counts ) size = len ( offsets ) if size != len ( counts ) : raise ValueError ( 'StripOffsets and StripByteCounts mismatch' ) j = 0 for i , ( o , b ) in enumerate ( zip ( offsets , counts ) ) : if b > 0 : if o > 0 : if i > j : offs... | Return cleaned offsets and byte counts . |
52,689 | def buffered_read ( fh , lock , offsets , bytecounts , buffersize = None ) : if buffersize is None : buffersize = 2 ** 26 length = len ( offsets ) i = 0 while i < length : data = [ ] with lock : size = 0 while size < buffersize and i < length : fh . seek ( offsets [ i ] ) bytecount = bytecounts [ i ] data . append ( fh... | Return iterator over segments read from file . |
52,690 | def create_output ( out , shape , dtype , mode = 'w+' , suffix = None ) : if out is None : return numpy . zeros ( shape , dtype ) if isinstance ( out , str ) and out [ : 6 ] == 'memmap' : import tempfile tempdir = out [ 7 : ] if len ( out ) > 7 else None if suffix is None : suffix = '.memmap' with tempfile . NamedTempo... | Return numpy array where image data of shape and dtype can be copied . |
52,691 | def stripascii ( string ) : i = len ( string ) while i : i -= 1 if 8 < byte2int ( string [ i ] ) < 127 : break else : i = - 1 return string [ : i + 1 ] | Return string truncated at last byte that is 7 - bit ASCII . |
52,692 | def asbool ( value , true = ( b'true' , u'true' ) , false = ( b'false' , u'false' ) ) : value = value . strip ( ) . lower ( ) if value in true : return True if value in false : return False raise TypeError ( ) | Return string as bool if possible else raise TypeError . |
52,693 | def astype ( value , types = None ) : if types is None : types = int , float , asbool , bytes2str for typ in types : try : return typ ( value ) except ( ValueError , AttributeError , TypeError , UnicodeEncodeError ) : pass return value | Return argument as one of types if possible . |
52,694 | def format_size ( size , threshold = 1536 ) : if size < threshold : return "%i B" % size for unit in ( 'KiB' , 'MiB' , 'GiB' , 'TiB' , 'PiB' ) : size /= 1024.0 if size < threshold : return "%.2f %s" % ( size , unit ) return 'ginormous' | Return file size as string from byte size . |
52,695 | def natural_sorted ( iterable ) : def sortkey ( x ) : return [ ( int ( c ) if c . isdigit ( ) else c ) for c in re . split ( numbers , x ) ] numbers = re . compile ( r'(\d+)' ) return sorted ( iterable , key = sortkey ) | Return human sorted list of strings . |
52,696 | def byteorder_isnative ( byteorder ) : if byteorder in ( '=' , sys . byteorder ) : return True keys = { 'big' : '>' , 'little' : '<' } return keys . get ( byteorder , byteorder ) == keys [ sys . byteorder ] | Return if byteorder matches the system s byteorder . |
52,697 | def recarray2dict ( recarray ) : result = { } for descr , value in zip ( recarray . dtype . descr , recarray ) : name , dtype = descr [ : 2 ] if dtype [ 1 ] == 'S' : value = bytes2str ( stripnull ( value ) ) elif value . ndim < 2 : value = value . tolist ( ) result [ name ] = value return result | Return numpy . recarray as dict . |
52,698 | def xml2dict ( xml , sanitize = True , prefix = None ) : from xml . etree import cElementTree as etree at = tx = '' if prefix : at , tx = prefix def astype ( value ) : for t in ( int , float , asbool ) : try : return t ( value ) except Exception : pass return value def etree2dict ( t ) : key = t . tag if sanitize : key... | Return XML as dict . |
52,699 | def isprintable ( string ) : string = string . strip ( ) if not string : return True if sys . version_info [ 0 ] == 3 : try : return string . isprintable ( ) except Exception : pass try : return string . decode ( 'utf-8' ) . isprintable ( ) except Exception : pass else : if string . isalnum ( ) : return True printable ... | Return if all characters in string are printable . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.