idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
9,400 | def dict_stack ( dict_list , key_prefix = '' ) : r dict_stacked_ = defaultdict ( list ) for dict_ in dict_list : for key , val in six . iteritems ( dict_ ) : dict_stacked_ [ key_prefix + key ] . append ( val ) dict_stacked = dict ( dict_stacked_ ) return dict_stacked | r stacks values from two dicts into a new dict where the values are list of the input values . the keys are the same . |
9,401 | def dict_stack2 ( dict_list , key_suffix = None , default = None ) : if len ( dict_list ) > 0 : dict_list_ = [ map_dict_vals ( lambda x : [ x ] , kw ) for kw in dict_list ] default1 = [ ] default2 = [ default ] accum_ = dict_list_ [ 0 ] for dict_ in dict_list_ [ 1 : ] : default1 . append ( default ) accum_ = dict_union_combine ( accum_ , dict_ , default = default1 , default2 = default2 ) stacked_dict = accum_ else : stacked_dict = { } if key_suffix is not None : stacked_dict = map_dict_keys ( lambda x : x + key_suffix , stacked_dict ) return stacked_dict | Stacks vals from a list of dicts into a dict of lists . Inserts Nones in place of empty items to preserve order . |
9,402 | def invert_dict ( dict_ , unique_vals = True ) : if unique_vals : inverted_items = [ ( val , key ) for key , val in six . iteritems ( dict_ ) ] inverted_dict = type ( dict_ ) ( inverted_items ) else : inverted_dict = group_items ( dict_ . keys ( ) , dict_ . values ( ) ) return inverted_dict | Reverses the keys and values in a dictionary . Set unique_vals to False if the values in the dict are not unique . |
9,403 | def iter_all_dict_combinations_ordered ( varied_dict ) : tups_list = [ [ ( key , val ) for val in val_list ] for ( key , val_list ) in six . iteritems ( varied_dict ) ] dict_iter = ( OrderedDict ( tups ) for tups in it . product ( * tups_list ) ) return dict_iter | Same as all_dict_combinations but preserves order |
9,404 | def all_dict_combinations_lbls ( varied_dict , remove_singles = True , allow_lone_singles = False ) : is_lone_single = all ( [ isinstance ( val_list , ( list , tuple ) ) and len ( val_list ) == 1 for key , val_list in iteritems_sorted ( varied_dict ) ] ) if not remove_singles or ( allow_lone_singles and is_lone_single ) : multitups_list = [ [ ( key , val ) for val in val_list ] for key , val_list in iteritems_sorted ( varied_dict ) ] else : multitups_list = [ [ ( key , val ) for val in val_list ] for key , val_list in iteritems_sorted ( varied_dict ) if isinstance ( val_list , ( list , tuple ) ) and len ( val_list ) > 1 ] combtup_list = list ( it . product ( * multitups_list ) ) combtup_list2 = [ [ ( key , val ) if isinstance ( val , six . string_types ) else ( key , repr ( val ) ) for ( key , val ) in combtup ] for combtup in combtup_list ] comb_lbls = [ ',' . join ( [ '%s=%s' % ( key , val ) for ( key , val ) in combtup ] ) for combtup in combtup_list2 ] return comb_lbls | returns a label for each variation in a varydict . |
9,405 | def build_conflict_dict ( key_list , val_list ) : key_to_vals = defaultdict ( list ) for key , val in zip ( key_list , val_list ) : key_to_vals [ key ] . append ( val ) return key_to_vals | Builds dict where a list of values is associated with more than one key |
9,406 | def update_existing ( dict1 , dict2 , copy = False , assert_exists = False , iswarning = False , alias_dict = None ) : r if assert_exists : try : assert_keys_are_subset ( dict1 , dict2 ) except AssertionError as ex : from utool import util_dbg util_dbg . printex ( ex , iswarning = iswarning , N = 1 ) if not iswarning : raise if copy : dict1 = dict ( dict1 ) if alias_dict is None : alias_dict = { } for key , val in six . iteritems ( dict2 ) : key = alias_dict . get ( key , key ) if key in dict1 : dict1 [ key ] = val return dict1 | r updates vals in dict1 using vals from dict2 only if the key is already in dict1 . |
9,407 | def dict_update_newkeys ( dict_ , dict2 ) : for key , val in six . iteritems ( dict2 ) : if key not in dict_ : dict_ [ key ] = val | Like dict . update but does not overwrite items |
9,408 | def is_dicteq ( dict1_ , dict2_ , almosteq_ok = True , verbose_err = True ) : import utool as ut assert len ( dict1_ ) == len ( dict2_ ) , 'dicts are not of same length' try : for ( key1 , val1 ) , ( key2 , val2 ) in zip ( dict1_ . items ( ) , dict2_ . items ( ) ) : assert key1 == key2 , 'key mismatch' assert type ( val1 ) == type ( val2 ) , 'vals are not same type' if HAVE_NUMPY and np . iterable ( val1 ) : if almosteq_ok and ut . is_float ( val1 ) : assert np . all ( ut . almost_eq ( val1 , val2 ) ) , 'float vals are not within thresh' else : assert all ( [ np . all ( x1 == x2 ) for ( x1 , x2 ) in zip ( val1 , val2 ) ] ) , 'np vals are different' elif isinstance ( val1 , dict ) : is_dicteq ( val1 , val2 , almosteq_ok = almosteq_ok , verbose_err = verbose_err ) else : assert val1 == val2 , 'vals are different' except AssertionError as ex : if verbose_err : ut . printex ( ex ) return False return True | Checks to see if dicts are the same . Performs recursion . Handles numpy |
9,409 | def dict_setdiff ( dict_ , negative_keys ) : r keys = [ key for key in six . iterkeys ( dict_ ) if key not in set ( negative_keys ) ] subdict_ = dict_subset ( dict_ , keys ) return subdict_ | r returns a copy of dict_ without keys in the negative_keys list |
9,410 | def delete_dict_keys ( dict_ , key_list ) : r invalid_keys = set ( key_list ) - set ( dict_ . keys ( ) ) valid_keys = set ( key_list ) - invalid_keys for key in valid_keys : del dict_ [ key ] return dict_ | r Removes items from a dictionary inplace . Keys that do not exist are ignored . |
9,411 | def dict_take_gen ( dict_ , keys , * d ) : r if isinstance ( keys , six . string_types ) : keys = keys . split ( ', ' ) if len ( d ) == 0 : dictget = dict_ . __getitem__ elif len ( d ) == 1 : dictget = dict_ . get else : raise ValueError ( 'len(d) must be 1 or 0' ) for key in keys : if HAVE_NUMPY and isinstance ( key , np . ndarray ) : yield list ( dict_take_gen ( dict_ , key , * d ) ) else : yield dictget ( key , * d ) | r generate multiple values from a dictionary |
9,412 | def dict_take ( dict_ , keys , * d ) : try : return list ( dict_take_gen ( dict_ , keys , * d ) ) except TypeError : return list ( dict_take_gen ( dict_ , keys , * d ) ) [ 0 ] | get multiple values from a dictionary |
9,413 | def dict_take_pop ( dict_ , keys , * d ) : if len ( d ) == 0 : return [ dict_ . pop ( key ) for key in keys ] elif len ( d ) == 1 : default = d [ 0 ] return [ dict_ . pop ( key , default ) for key in keys ] else : raise ValueError ( 'len(d) must be 1 or 0' ) | like dict_take but pops values off |
9,414 | def dict_assign ( dict_ , keys , vals ) : for key , val in zip ( keys , vals ) : dict_ [ key ] = val | simple method for assigning or setting values with a similar interface to dict_take |
9,415 | def dict_where_len0 ( dict_ ) : keys = np . array ( dict_ . keys ( ) ) flags = np . array ( list ( map ( len , dict_ . values ( ) ) ) ) == 0 indices = np . where ( flags ) [ 0 ] return keys [ indices ] | Accepts a dict of lists . Returns keys that have vals with no length |
9,416 | def dict_hist ( item_list , weight_list = None , ordered = False , labels = None ) : r if labels is None : hist_ = defaultdict ( int ) else : hist_ = { k : 0 for k in labels } if weight_list is None : for item in item_list : hist_ [ item ] += 1 else : for item , weight in zip ( item_list , weight_list ) : hist_ [ item ] += weight if ordered : getval = op . itemgetter ( 1 ) key_order = [ key for ( key , value ) in sorted ( hist_ . items ( ) , key = getval ) ] hist_ = order_dict_by ( hist_ , key_order ) return hist_ | r Builds a histogram of items in item_list |
9,417 | def dict_isect_combine ( dict1 , dict2 , combine_op = op . add ) : keys3 = set ( dict1 . keys ( ) ) . intersection ( set ( dict2 . keys ( ) ) ) dict3 = { key : combine_op ( dict1 [ key ] , dict2 [ key ] ) for key in keys3 } return dict3 | Intersection of dict keys and combination of dict values |
9,418 | def dict_union_combine ( dict1 , dict2 , combine_op = op . add , default = util_const . NoParam , default2 = util_const . NoParam ) : keys3 = set ( dict1 . keys ( ) ) . union ( set ( dict2 . keys ( ) ) ) if default is util_const . NoParam : dict3 = { key : combine_op ( dict1 [ key ] , dict2 [ key ] ) for key in keys3 } else : if default2 is util_const . NoParam : default2 = default dict3 = { key : combine_op ( dict1 . get ( key , default ) , dict2 . get ( key , default2 ) ) for key in keys3 } return dict3 | Combine of dict keys and uses dfault value when key does not exist |
9,419 | def dict_filter_nones ( dict_ ) : r dict2_ = { key : val for key , val in six . iteritems ( dict_ ) if val is not None } return dict2_ | r Removes None values |
9,420 | def groupby_tags ( item_list , tags_list ) : r groupid_to_items = defaultdict ( list ) for tags , item in zip ( tags_list , item_list ) : for tag in tags : groupid_to_items [ tag ] . append ( item ) return groupid_to_items | r case where an item can belong to multiple groups |
9,421 | def group_pairs ( pair_list ) : groupid_to_items = defaultdict ( list ) for item , groupid in pair_list : groupid_to_items [ groupid ] . append ( item ) return groupid_to_items | Groups a list of items using the first element in each pair as the item and the second element as the groupid . |
9,422 | def group_items ( items , by = None , sorted_ = True ) : if by is not None : pairs = list ( zip ( by , items ) ) if sorted_ : try : pairs = sorted ( pairs , key = op . itemgetter ( 0 ) ) except TypeError : pairs = sorted ( pairs , key = lambda tup : str ( tup [ 0 ] ) ) else : pairs = items groupid_to_items = defaultdict ( list ) for groupid , item in pairs : groupid_to_items [ groupid ] . append ( item ) return groupid_to_items | Groups a list of items by group id . |
9,423 | def hierarchical_group_items ( item_list , groupids_list ) : num_groups = len ( groupids_list ) leaf_type = partial ( defaultdict , list ) if num_groups > 1 : node_type = leaf_type for _ in range ( len ( groupids_list ) - 2 ) : node_type = partial ( defaultdict , node_type ) root_type = node_type elif num_groups == 1 : root_type = list else : raise ValueError ( 'must suply groupids' ) tree = defaultdict ( root_type ) groupid_tuple_list = list ( zip ( * groupids_list ) ) for groupid_tuple , item in zip ( groupid_tuple_list , item_list ) : node = tree for groupid in groupid_tuple : node = node [ groupid ] node . append ( item ) return tree | Generalization of group_item . Convert a flast list of ids into a heirarchical dictionary . |
9,424 | def hierarchical_map_vals ( func , node , max_depth = None , depth = 0 ) : if not hasattr ( node , 'items' ) : return func ( node ) elif max_depth is not None and depth >= max_depth : return map_dict_vals ( func , node ) else : keyval_list = [ ( key , hierarchical_map_vals ( func , val , max_depth , depth + 1 ) ) for key , val in node . items ( ) ] if isinstance ( node , OrderedDict ) : return OrderedDict ( keyval_list ) else : return dict ( keyval_list ) | node is a dict tree like structure with leaves of type list |
9,425 | def sort_dict ( dict_ , part = 'keys' , key = None , reverse = False ) : if part == 'keys' : index = 0 elif part in { 'vals' , 'values' } : index = 1 else : raise ValueError ( 'Unknown method part=%r' % ( part , ) ) if key is None : _key = op . itemgetter ( index ) else : def _key ( item ) : return key ( item [ index ] ) sorted_items = sorted ( six . iteritems ( dict_ ) , key = _key , reverse = reverse ) sorted_dict = OrderedDict ( sorted_items ) return sorted_dict | sorts a dictionary by its values or its keys |
9,426 | def order_dict_by ( dict_ , key_order ) : r dict_keys = set ( dict_ . keys ( ) ) other_keys = dict_keys - set ( key_order ) key_order = it . chain ( key_order , other_keys ) sorted_dict = OrderedDict ( ( key , dict_ [ key ] ) for key in key_order if key in dict_keys ) return sorted_dict | r Reorders items in a dictionary according to a custom key order |
9,427 | def iteritems_sorted ( dict_ ) : if isinstance ( dict_ , OrderedDict ) : return six . iteritems ( dict_ ) else : return iter ( sorted ( six . iteritems ( dict_ ) ) ) | change to iteritems ordered |
9,428 | def flatten_dict_vals ( dict_ ) : if isinstance ( dict_ , dict ) : return dict ( [ ( ( key , augkey ) , augval ) for key , val in dict_ . items ( ) for augkey , augval in flatten_dict_vals ( val ) . items ( ) ] ) else : return { None : dict_ } | Flattens only values in a heirarchical dictionary keys are nested . |
9,429 | def depth_atleast ( list_ , depth ) : r if depth == 0 : return True else : try : return all ( [ depth_atleast ( item , depth - 1 ) for item in list_ ] ) except TypeError : return False | r Returns if depth of list is at least depth |
9,430 | def get_splitcolnr ( header , bioset , splitcol ) : if bioset : return header . index ( mzidtsvdata . HEADER_SETNAME ) elif splitcol is not None : return splitcol - 1 else : raise RuntimeError ( 'Must specify either --bioset or --splitcol' ) | Returns column nr on which to split PSM table . Chooses from flags given via bioset and splitcol |
9,431 | def generate_psms_split ( fn , oldheader , bioset , splitcol ) : try : splitcolnr = get_splitcolnr ( oldheader , bioset , splitcol ) except IndexError : raise RuntimeError ( 'Cannot find bioset header column in ' 'input file {}, though --bioset has ' 'been passed' . format ( fn ) ) for psm in tsvreader . generate_tsv_psms ( fn , oldheader ) : yield { 'psm' : psm , 'split_pool' : psm [ oldheader [ splitcolnr ] ] } | Loops PSMs and outputs dictionaries passed to writer . Dictionaries contain the PSMs and info to which split pool the respective PSM belongs |
9,432 | def rnumlistwithoutreplacement ( min , max ) : if checkquota ( ) < 1 : raise Exception ( "Your www.random.org quota has already run out." ) requestparam = build_request_parameterNR ( min , max ) request = urllib . request . Request ( requestparam ) request . add_header ( 'User-Agent' , 'randomwrapy/0.1 very alpha' ) opener = urllib . request . build_opener ( ) numlist = opener . open ( request ) . read ( ) return numlist . split ( ) | Returns a randomly ordered list of the integers between min and max |
9,433 | def rnumlistwithreplacement ( howmany , max , min = 0 ) : if checkquota ( ) < 1 : raise Exception ( "Your www.random.org quota has already run out." ) requestparam = build_request_parameterWR ( howmany , min , max ) request = urllib . request . Request ( requestparam ) request . add_header ( 'User-Agent' , 'randomwrapy/0.1 very alpha' ) opener = urllib . request . build_opener ( ) numlist = opener . open ( request ) . read ( ) return numlist . split ( ) | Returns a list of howmany integers with a maximum value = max . The minimum value defaults to zero . |
9,434 | def num_fmt ( num , max_digits = None ) : r if num is None : return 'None' def num_in_mag ( num , mag ) : return mag > num and num > ( - 1 * mag ) if max_digits is None : if num_in_mag ( num , 1 ) : if num_in_mag ( num , .1 ) : max_digits = 4 else : max_digits = 3 else : max_digits = 1 if util_type . is_float ( num ) : num_str = ( '%.' + str ( max_digits ) + 'f' ) % num num_str = num_str . rstrip ( '0' ) . lstrip ( '0' ) if num_str . startswith ( '.' ) : num_str = '0' + num_str if num_str . endswith ( '.' ) : num_str = num_str + '0' return num_str elif util_type . is_int ( num ) : return int_comma_str ( num ) else : return '%r' | r Weird function . Not very well written . Very special case - y |
9,435 | def load_feature_lists ( self , feature_lists ) : column_names = [ ] feature_ranges = [ ] running_feature_count = 0 for list_id in feature_lists : feature_list_names = load_lines ( self . features_dir + 'X_train_{}.names' . format ( list_id ) ) column_names . extend ( feature_list_names ) start_index = running_feature_count end_index = running_feature_count + len ( feature_list_names ) - 1 running_feature_count += len ( feature_list_names ) feature_ranges . append ( [ list_id , start_index , end_index ] ) X_train = np . hstack ( [ load ( self . features_dir + 'X_train_{}.pickle' . format ( list_id ) ) for list_id in feature_lists ] ) X_test = np . hstack ( [ load ( self . features_dir + 'X_test_{}.pickle' . format ( list_id ) ) for list_id in feature_lists ] ) df_train = pd . DataFrame ( X_train , columns = column_names ) df_test = pd . DataFrame ( X_test , columns = column_names ) return df_train , df_test , feature_ranges | Load pickled features for train and test sets assuming they are saved in the features folder along with their column names . |
9,436 | def save_features ( self , train_features , test_features , feature_names , feature_list_id ) : self . save_feature_names ( feature_names , feature_list_id ) self . save_feature_list ( train_features , 'train' , feature_list_id ) self . save_feature_list ( test_features , 'test' , feature_list_id ) | Save features for the training and test sets to disk along with their metadata . |
9,437 | def discover ( ) : candidate_path = os . path . abspath ( os . path . join ( os . curdir , os . pardir , 'data' ) ) if os . path . exists ( candidate_path ) : return Project ( os . path . abspath ( os . path . join ( candidate_path , os . pardir ) ) ) candidate_path = os . path . abspath ( os . path . join ( os . curdir , 'data' ) ) if os . path . exists ( candidate_path ) : return Project ( os . path . abspath ( os . curdir ) ) candidate_path = os . path . abspath ( os . path . join ( os . curdir , os . pardir , 'data' ) ) if os . path . exists ( candidate_path ) : return Project ( os . path . abspath ( os . path . join ( candidate_path , os . pardir , os . pardir ) ) ) raise ValueError ( 'Cannot discover the structure of the project. Make sure that the data directory exists' ) | Automatically discover the paths to various data folders in this project and compose a Project instance . |
9,438 | def init ( ) : project = Project ( os . path . abspath ( os . getcwd ( ) ) ) paths_to_create = [ project . data_dir , project . notebooks_dir , project . aux_dir , project . features_dir , project . preprocessed_data_dir , project . submissions_dir , project . trained_model_dir , project . temp_dir , ] for path in paths_to_create : os . makedirs ( path , exist_ok = True ) | Creates the project infrastructure assuming the current directory is the project root . Typically used as a command - line entry point called by pygoose init . |
9,439 | def unique_justseen ( iterable , key = None ) : "List unique elements, preserving order. Remember only the element just seen." return imap ( next , imap ( operator . itemgetter ( 1 ) , groupby ( iterable , key ) ) ) | List unique elements preserving order . Remember only the element just seen . |
9,440 | def _get_module ( module_name = None , module = None , register = True ) : if module is None and module_name is not None : try : module = sys . modules [ module_name ] except KeyError as ex : print ( ex ) raise KeyError ( ( 'module_name=%r must be loaded before ' + 'receiving injections' ) % module_name ) elif module is not None and module_name is None : pass else : raise ValueError ( 'module_name or module must be exclusively specified' ) if register is True : _add_injected_module ( module ) return module | finds module in sys . modules based on module name unless the module has already been found and is passed in |
9,441 | def inject_colored_exceptions ( ) : if VERBOSE : print ( '[inject] injecting colored exceptions' ) if not sys . platform . startswith ( 'win32' ) : if VERYVERBOSE : print ( '[inject] injecting colored exceptions' ) if '--noinject-color' in sys . argv : print ( 'Not injecting color' ) else : sys . excepthook = colored_pygments_excepthook else : if VERYVERBOSE : print ( '[inject] cannot inject colored exceptions' ) | Causes exceptions to be colored if not already |
9,442 | def inject_print_functions ( module_name = None , module_prefix = '[???]' , DEBUG = False , module = None ) : module = _get_module ( module_name , module ) if SILENT : def print ( * args ) : pass def printDBG ( * args ) : pass def print_ ( * args ) : pass else : if DEBUG_PRINT : def print ( * args ) : from utool . _internal . meta_util_dbg import get_caller_name calltag = '' . join ( ( '[caller:' , get_caller_name ( N = DEBUG_PRINT_N ) , ']' ) ) util_logging . _utool_print ( ) ( calltag , * args ) else : def print ( * args ) : util_logging . _utool_print ( ) ( * args ) if __AGGROFLUSH__ : def print_ ( * args ) : util_logging . _utool_write ( ) ( * args ) util_logging . _utool_flush ( ) ( ) else : def print_ ( * args ) : util_logging . _utool_write ( ) ( * args ) dotpos = module . __name__ . rfind ( '.' ) if dotpos == - 1 : module_name = module . __name__ else : module_name = module . __name__ [ dotpos + 1 : ] def _replchars ( str_ ) : return str_ . replace ( '_' , '-' ) . replace ( ']' , '' ) . replace ( '[' , '' ) flag1 = '--debug-%s' % _replchars ( module_name ) flag2 = '--debug-%s' % _replchars ( module_prefix ) DEBUG_FLAG = any ( [ flag in sys . argv for flag in [ flag1 , flag2 ] ] ) for curflag in ARGV_DEBUG_FLAGS : if curflag in module_prefix : DEBUG_FLAG = True if __DEBUG_ALL__ or DEBUG or DEBUG_FLAG : print ( 'INJECT_PRINT: %r == %r' % ( module_name , module_prefix ) ) def printDBG ( * args ) : msg = ', ' . join ( map ( str , args ) ) util_logging . __UTOOL_PRINTDBG__ ( module_prefix + ' DEBUG ' + msg ) else : def printDBG ( * args ) : pass print_funcs = ( print , print_ , printDBG ) return print_funcs | makes print functions to be injected into the module |
9,443 | def make_module_reload_func ( module_name = None , module_prefix = '[???]' , module = None ) : module = _get_module ( module_name , module , register = False ) if module_name is None : module_name = str ( module . __name__ ) def rrr ( verbose = True ) : if not __RELOAD_OK__ : raise Exception ( 'Reloading has been forced off' ) try : import imp if verbose and not QUIET : builtins . print ( 'RELOAD: ' + str ( module_prefix ) + ' __name__=' + module_name ) imp . reload ( module ) except Exception as ex : print ( ex ) print ( '%s Failed to reload' % module_prefix ) raise return rrr | Injects dynamic module reloading |
9,444 | def noinject ( module_name = None , module_prefix = '[???]' , DEBUG = False , module = None , N = 0 , via = None ) : if PRINT_INJECT_ORDER : from utool . _internal import meta_util_dbg callername = meta_util_dbg . get_caller_name ( N = N + 1 , strict = False ) lineno = meta_util_dbg . get_caller_lineno ( N = N + 1 , strict = False ) suff = ' via %s' % ( via , ) if via else '' fmtdict = dict ( N = N , lineno = lineno , callername = callername , modname = module_name , suff = suff ) msg = '[util_inject] N={N} {modname} is imported by {callername} at lineno={lineno}{suff}' . format ( ** fmtdict ) if DEBUG_SLOW_IMPORT : global PREV_MODNAME seconds = tt . toc ( ) import_times [ ( PREV_MODNAME , module_name ) ] = seconds PREV_MODNAME = module_name builtins . print ( msg ) if DEBUG_SLOW_IMPORT : tt . tic ( ) if EXIT_ON_INJECT_MODNAME == module_name : builtins . print ( '...exiting' ) assert False , 'exit in inject requested' | Use in modules that do not have inject in them |
9,445 | def inject ( module_name = None , module_prefix = '[???]' , DEBUG = False , module = None , N = 1 ) : noinject ( module_name , module_prefix , DEBUG , module , N = N ) module = _get_module ( module_name , module ) rrr = make_module_reload_func ( None , module_prefix , module ) profile_ = make_module_profile_func ( None , module_prefix , module ) print_funcs = inject_print_functions ( None , module_prefix , DEBUG , module ) ( print , print_ , printDBG ) = print_funcs return ( print , print_ , printDBG , rrr , profile_ ) | Injects your module with utool magic |
9,446 | def inject2 ( module_name = None , module_prefix = None , DEBUG = False , module = None , N = 1 ) : if module_prefix is None : module_prefix = '[%s]' % ( module_name , ) noinject ( module_name , module_prefix , DEBUG , module , N = N ) module = _get_module ( module_name , module ) rrr = make_module_reload_func ( None , module_prefix , module ) profile_ = make_module_profile_func ( None , module_prefix , module ) print = make_module_print_func ( module ) return print , rrr , profile_ | wrapper that depricates print_ and printDBG |
9,447 | def inject_python_code2 ( fpath , patch_code , tag ) : import utool as ut text = ut . readfrom ( fpath ) start_tag = '# <%s>' % tag end_tag = '# </%s>' % tag new_text = ut . replace_between_tags ( text , patch_code , start_tag , end_tag ) ut . writeto ( fpath , new_text ) | Does autogeneration stuff |
9,448 | def inject_python_code ( fpath , patch_code , tag = None , inject_location = 'after_imports' ) : import utool as ut assert tag is not None , 'TAG MUST BE SPECIFIED IN INJECTED CODETEXT' text = ut . read_from ( fpath ) comment_start_tag = '# <util_inject:%s>' % tag comment_end_tag = '# </util_inject:%s>' % tag tagstart_txtpos = text . find ( comment_start_tag ) tagend_txtpos = text . find ( comment_end_tag ) text_lines = ut . split_python_text_into_lines ( text ) if tagstart_txtpos != - 1 or tagend_txtpos != - 1 : assert tagstart_txtpos != - 1 , 'both tags must not be found' assert tagend_txtpos != - 1 , 'both tags must not be found' for pos , line in enumerate ( text_lines ) : if line . startswith ( comment_start_tag ) : tagstart_pos = pos if line . startswith ( comment_end_tag ) : tagend_pos = pos part1 = text_lines [ 0 : tagstart_pos ] part2 = text_lines [ tagend_pos + 1 : ] else : if inject_location == 'after_imports' : first_nonimport_pos = 0 for line in text_lines : list_ = [ 'import ' , 'from ' , '#' , ' ' ] isvalid = ( len ( line ) == 0 or any ( line . startswith ( str_ ) for str_ in list_ ) ) if not isvalid : break first_nonimport_pos += 1 part1 = text_lines [ 0 : first_nonimport_pos ] part2 = text_lines [ first_nonimport_pos : ] else : raise AssertionError ( 'Unknown inject location' ) newtext = ( '\n' . join ( part1 + [ comment_start_tag ] ) + '\n' + patch_code + '\n' + '\n' . join ( [ comment_end_tag ] + part2 ) ) text_backup_fname = fpath + '.' + ut . get_timestamp ( ) + '.bak' ut . write_to ( text_backup_fname , text ) ut . write_to ( fpath , newtext ) | DEPRICATE puts code into files on disk |
9,449 | def printableType ( val , name = None , parent = None ) : import numpy as np if parent is not None and hasattr ( parent , 'customPrintableType' ) : _typestr = parent . customPrintableType ( name ) if _typestr is not None : return _typestr if isinstance ( val , np . ndarray ) : info = npArrInfo ( val ) _typestr = info . dtypestr elif isinstance ( val , object ) : _typestr = val . __class__ . __name__ else : _typestr = str ( type ( val ) ) _typestr = _typestr . replace ( 'type' , '' ) _typestr = re . sub ( '[\'><]' , '' , _typestr ) _typestr = re . sub ( ' *' , ' ' , _typestr ) _typestr = _typestr . strip ( ) return _typestr | Tries to make a nice type string for a value . Can also pass in a Printable parent object |
9,450 | def printableVal ( val , type_bit = True , justlength = False ) : from utool import util_dev import numpy as np if type ( val ) is np . ndarray : info = npArrInfo ( val ) if info . dtypestr . startswith ( 'bool' ) : _valstr = '{ shape:' + info . shapestr + ' bittotal: ' + info . bittotal + '}' elif info . dtypestr . startswith ( 'float' ) : _valstr = util_dev . get_stats_str ( val ) else : _valstr = '{ shape:' + info . shapestr + ' mM:' + info . minmaxstr + ' }' elif isinstance ( val , ( str , unicode ) ) : _valstr = '\'%s\'' % val elif isinstance ( val , list ) : if justlength or len ( val ) > 30 : _valstr = 'len=' + str ( len ( val ) ) else : _valstr = '[ ' + ( ', \n ' . join ( [ str ( v ) for v in val ] ) ) + ' ]' elif hasattr ( val , 'get_printable' ) and type ( val ) != type : _valstr = val . get_printable ( type_bit = type_bit ) elif isinstance ( val , dict ) : _valstr = '{\n' for val_key in val . keys ( ) : val_val = val [ val_key ] _valstr += ' ' + str ( val_key ) + ' : ' + str ( val_val ) + '\n' _valstr += '}' else : _valstr = str ( val ) if _valstr . find ( '\n' ) > 0 : _valstr = _valstr . replace ( '\n' , '\n ' ) _valstr = '\n ' + _valstr _valstr = re . sub ( '\n *$' , '' , _valstr ) return _valstr | Very old way of doing pretty printing . Need to update and refactor . DEPRICATE |
9,451 | def npArrInfo ( arr ) : from utool . DynamicStruct import DynStruct info = DynStruct ( ) info . shapestr = '[' + ' x ' . join ( [ str ( x ) for x in arr . shape ] ) + ']' info . dtypestr = str ( arr . dtype ) if info . dtypestr == 'bool' : info . bittotal = 'T=%d, F=%d' % ( sum ( arr ) , sum ( 1 - arr ) ) elif info . dtypestr == 'object' : info . minmaxstr = 'NA' elif info . dtypestr [ 0 ] == '|' : info . minmaxstr = 'NA' else : if arr . size > 0 : info . minmaxstr = '(%r, %r)' % ( arr . min ( ) , arr . max ( ) ) else : info . minmaxstr = '(None)' return info | OLD update and refactor |
9,452 | def get_isobaric_ratios ( psmfn , psmheader , channels , denom_channels , min_int , targetfn , accessioncol , normalize , normratiofn ) : psm_or_feat_ratios = get_psmratios ( psmfn , psmheader , channels , denom_channels , min_int , accessioncol ) if normalize and normratiofn : normheader = reader . get_tsv_header ( normratiofn ) normratios = get_ratios_from_fn ( normratiofn , normheader , channels ) ch_medians = get_medians ( channels , normratios , report = True ) outratios = calculate_normalized_ratios ( psm_or_feat_ratios , ch_medians , channels ) elif normalize : flatratios = [ [ feat [ ch ] for ch in channels ] for feat in psm_or_feat_ratios ] ch_medians = get_medians ( channels , flatratios , report = True ) outratios = calculate_normalized_ratios ( psm_or_feat_ratios , ch_medians , channels ) else : outratios = psm_or_feat_ratios if accessioncol and targetfn : outratios = { x [ ISOQUANTRATIO_FEAT_ACC ] : x for x in outratios } return output_to_target_accession_table ( targetfn , outratios , channels ) elif not accessioncol and not targetfn : return paste_to_psmtable ( psmfn , psmheader , outratios ) elif accessioncol and not targetfn : return ( { ( k if not k == ISOQUANTRATIO_FEAT_ACC else prottabledata . HEADER_ACCESSION ) : v for k , v in ratio . items ( ) } for ratio in outratios ) | Main function to calculate ratios for PSMs peptides proteins genes . Can do simple ratios median - of - ratios and median - centering normalization . |
9,453 | def sanitize ( value ) : value = unicodedata . normalize ( 'NFKD' , value ) value = value . strip ( ) value = re . sub ( '[^./\w\s-]' , '' , value ) value = re . sub ( '[-\s]+' , '-' , value ) return value | Strips all undesirable characters out of potential file paths . |
9,454 | def remove_on_exception ( dirname , remove = True ) : os . makedirs ( dirname ) try : yield except : if remove : shutil . rmtree ( dirname , ignore_errors = True ) raise | Creates a directory yields to the caller and removes that directory if an exception is thrown . |
9,455 | def add_percolator_to_mzidtsv ( mzidfn , tsvfn , multipsm , oldheader ) : namespace = readers . get_mzid_namespace ( mzidfn ) try : xmlns = '{%s}' % namespace [ 'xmlns' ] except TypeError : xmlns = '' specfnids = readers . get_mzid_specfile_ids ( mzidfn , namespace ) mzidpepmap = { } for peptide in readers . generate_mzid_peptides ( mzidfn , namespace ) : pep_id , seq = readers . get_mzid_peptidedata ( peptide , xmlns ) mzidpepmap [ pep_id ] = seq mzidpercomap = { } for specid_data in readers . generate_mzid_spec_id_items ( mzidfn , namespace , xmlns , specfnids ) : scan , fn , pepid , spec_id = specid_data percodata = readers . get_specidentitem_percolator_data ( spec_id , xmlns ) try : mzidpercomap [ fn ] [ scan ] [ mzidpepmap [ pepid ] ] = percodata except KeyError : try : mzidpercomap [ fn ] [ scan ] = { mzidpepmap [ pepid ] : percodata } except KeyError : mzidpercomap [ fn ] = { scan : { mzidpepmap [ pepid ] : percodata } } for line in tsvreader . generate_tsv_psms ( tsvfn , oldheader ) : outline = { k : v for k , v in line . items ( ) } fn = line [ mzidtsvdata . HEADER_SPECFILE ] scan = line [ mzidtsvdata . HEADER_SCANNR ] seq = line [ mzidtsvdata . HEADER_PEPTIDE ] outline . update ( mzidpercomap [ fn ] [ scan ] [ seq ] ) yield outline | Takes a MSGF + tsv and corresponding mzId adds percolatordata to tsv lines . Generator yields the lines . Multiple PSMs per scan can be delivered in which case rank is also reported . |
9,456 | def parse_rawprofile_blocks ( text ) : delim = 'Total time: ' delim2 = 'Pystone time: ' profile_block_list = ut . regex_split ( '^' + delim , text ) for ix in range ( 1 , len ( profile_block_list ) ) : profile_block_list [ ix ] = delim2 + profile_block_list [ ix ] return profile_block_list | Split the file into blocks along delimters and and put delimeters back in the list |
9,457 | def parse_timemap_from_blocks ( profile_block_list ) : prefix_list = [ ] timemap = ut . ddict ( list ) for ix in range ( len ( profile_block_list ) ) : block = profile_block_list [ ix ] total_time = get_block_totaltime ( block ) if total_time is None : prefix_list . append ( block ) elif total_time != 0 : timemap [ total_time ] . append ( block ) return prefix_list , timemap | Build a map from times to line_profile blocks |
9,458 | def clean_line_profile_text ( text ) : profile_block_list = parse_rawprofile_blocks ( text ) prefix_list , timemap = parse_timemap_from_blocks ( profile_block_list ) sorted_lists = sorted ( six . iteritems ( timemap ) , key = operator . itemgetter ( 0 ) ) newlist = prefix_list [ : ] for key , val in sorted_lists : newlist . extend ( val ) output_text = '\n' . join ( newlist ) summary_text = get_summary ( profile_block_list ) output_text = output_text return output_text , summary_text | Sorts the output from line profile by execution time Removes entries which were not run |
9,459 | def clean_lprof_file ( input_fname , output_fname = None ) : text = ut . read_from ( input_fname ) output_text = clean_line_profile_text ( text ) return output_text | Reads a . lprof file and cleans it |
9,460 | def get_class_weights ( y , smooth_factor = 0 ) : from collections import Counter counter = Counter ( y ) if smooth_factor > 0 : p = max ( counter . values ( ) ) * smooth_factor for k in counter . keys ( ) : counter [ k ] += p majority = max ( counter . values ( ) ) return { cls : float ( majority / count ) for cls , count in counter . items ( ) } | Returns the weights for each class based on the frequencies of the samples . |
9,461 | def plot_loss_history ( history , figsize = ( 15 , 8 ) ) : plt . figure ( figsize = figsize ) plt . plot ( history . history [ "loss" ] ) plt . plot ( history . history [ "val_loss" ] ) plt . xlabel ( "# Epochs" ) plt . ylabel ( "Loss" ) plt . legend ( [ "Training" , "Validation" ] ) plt . title ( "Loss over time" ) plt . show ( ) | Plots the learning history for a Keras model assuming the validation data was provided to the fit function . |
9,462 | def iter_window ( iterable , size = 2 , step = 1 , wrap = False ) : r iter_list = it . tee ( iterable , size ) if wrap : iter_list = [ iter_list [ 0 ] ] + list ( map ( it . cycle , iter_list [ 1 : ] ) ) try : for count , iter_ in enumerate ( iter_list [ 1 : ] , start = 1 ) : for _ in range ( count ) : six . next ( iter_ ) except StopIteration : return iter ( ( ) ) else : _window_iter = zip ( * iter_list ) window_iter = it . islice ( _window_iter , 0 , None , step ) return window_iter | r iterates through iterable with a window size generalizeation of itertwo |
9,463 | def iter_compress ( item_iter , flag_iter ) : true_items = ( item for ( item , flag ) in zip ( item_iter , flag_iter ) if flag ) return true_items | iter_compress - like numpy compress |
9,464 | def ichunks ( iterable , chunksize , bordermode = None ) : r if bordermode is None : return ichunks_noborder ( iterable , chunksize ) elif bordermode == 'cycle' : return ichunks_cycle ( iterable , chunksize ) elif bordermode == 'replicate' : return ichunks_replicate ( iterable , chunksize ) else : raise ValueError ( 'unknown bordermode=%r' % ( bordermode , ) ) | r generates successive n - sized chunks from iterable . |
9,465 | def ichunks_list ( list_ , chunksize ) : return ( list_ [ ix : ix + chunksize ] for ix in range ( 0 , len ( list_ ) , chunksize ) ) | input must be a list . |
9,466 | def interleave ( args ) : r arg_iters = list ( map ( iter , args ) ) cycle_iter = it . cycle ( arg_iters ) for iter_ in cycle_iter : yield six . next ( iter_ ) | r zip followed by flatten |
9,467 | def random_product ( items , num = None , rng = None ) : import utool as ut rng = ut . ensure_rng ( rng , 'python' ) seen = set ( ) items = [ list ( g ) for g in items ] max_num = ut . prod ( map ( len , items ) ) if num is None : num = max_num if num > max_num : raise ValueError ( 'num exceedes maximum number of products' ) if num > max_num // 2 : for prod in ut . shuffle ( list ( it . product ( * items ) ) , rng = rng ) : yield prod else : while len ( seen ) < num : idxs = tuple ( rng . randint ( 0 , len ( g ) - 1 ) for g in items ) if idxs not in seen : seen . add ( idxs ) prod = tuple ( g [ x ] for g , x in zip ( items , idxs ) ) yield prod | Yields num items from the cartesian product of items in a random order . |
9,468 | def random_combinations ( items , size , num = None , rng = None ) : import scipy . misc import numpy as np import utool as ut rng = ut . ensure_rng ( rng , impl = 'python' ) num_ = np . inf if num is None else num n_max = int ( scipy . misc . comb ( len ( items ) , size ) ) num_ = min ( n_max , num_ ) if num is not None and num_ > n_max // 2 : combos = list ( it . combinations ( items , size ) ) rng . shuffle ( combos ) for combo in combos [ : num ] : yield combo else : items = list ( items ) combos = set ( ) while len ( combos ) < num_ : combo = tuple ( sorted ( rng . sample ( items , size ) ) ) if combo not in combos : combos . add ( combo ) yield combo | Yields num combinations of length size from items in random order |
9,469 | def parse_dsn ( dsn_string ) : dsn = urlparse ( dsn_string ) scheme = dsn . scheme . split ( '+' ) [ 0 ] username = password = host = port = None host = dsn . netloc if '@' in host : username , host = host . split ( '@' ) if ':' in username : username , password = username . split ( ':' ) password = unquote ( password ) username = unquote ( username ) if ':' in host : host , port = host . split ( ':' ) port = int ( port ) database = dsn . path . split ( '?' ) [ 0 ] [ 1 : ] query = dsn . path . split ( '?' ) [ 1 ] if '?' in dsn . path else dsn . query kwargs = dict ( parse_qsl ( query , True ) ) if scheme == 'sqlite' : return SQLiteDriver , [ dsn . path ] , { } elif scheme == 'mysql' : kwargs [ 'user' ] = username or 'root' kwargs [ 'db' ] = database if port : kwargs [ 'port' ] = port if host : kwargs [ 'host' ] = host if password : kwargs [ 'passwd' ] = password return MySQLDriver , [ ] , kwargs elif scheme == 'postgresql' : kwargs [ 'user' ] = username or 'postgres' kwargs [ 'database' ] = database if port : kwargs [ 'port' ] = port if 'unix_socket' in kwargs : kwargs [ 'host' ] = kwargs . pop ( 'unix_socket' ) elif host : kwargs [ 'host' ] = host if password : kwargs [ 'password' ] = password return PostgreSQLDriver , [ ] , kwargs else : raise ValueError ( 'Unknown driver %s' % dsn_string ) | Parse a connection string and return the associated driver |
9,470 | def db_for_write ( self , model , ** hints ) : try : if model . sf_access == READ_ONLY : raise WriteNotSupportedError ( "%r is a read-only model." % model ) except AttributeError : pass return None | Prevent write actions on read - only tables . |
9,471 | def run_hook ( self , hook , * args , ** kwargs ) : for plugin in self . raw_plugins : if hasattr ( plugin , hook ) : self . logger . debug ( 'Calling hook {0} in plugin {1}' . format ( hook , plugin . __name__ ) ) getattr ( plugin , hook ) ( * args , ** kwargs ) | Loop over all plugins and invoke function hook with args and kwargs in each of them . If the plugin does not have the function it is skipped . |
9,472 | def write_tsv ( headerfields , features , outfn ) : with open ( outfn , 'w' ) as fp : write_tsv_line_from_list ( headerfields , fp ) for line in features : write_tsv_line_from_list ( [ str ( line [ field ] ) for field in headerfields ] , fp ) | Writes header and generator of lines to tab separated file . |
9,473 | def write_tsv_line_from_list ( linelist , outfp ) : line = '\t' . join ( linelist ) outfp . write ( line ) outfp . write ( '\n' ) | Utility method to convert list to tsv line with carriage return |
9,474 | def replace_between_tags ( text , repl_ , start_tag , end_tag = None ) : r new_lines = [ ] editing = False lines = text . split ( '\n' ) for line in lines : if not editing : new_lines . append ( line ) if line . strip ( ) . startswith ( start_tag ) : new_lines . append ( repl_ ) editing = True if end_tag is not None and line . strip ( ) . startswith ( end_tag ) : editing = False new_lines . append ( line ) new_text = '\n' . join ( new_lines ) return new_text | r Replaces text between sentinal lines in a block of text . |
9,475 | def theta_str ( theta , taustr = TAUSTR , fmtstr = '{coeff:,.1f}{taustr}' ) : r coeff = theta / TAU theta_str = fmtstr . format ( coeff = coeff , taustr = taustr ) return theta_str | r Format theta so it is interpretable in base 10 |
9,476 | def bbox_str ( bbox , pad = 4 , sep = ', ' ) : r if bbox is None : return 'None' fmtstr = sep . join ( [ '%' + six . text_type ( pad ) + 'd' ] * 4 ) return '(' + fmtstr % tuple ( bbox ) + ')' | r makes a string from an integer bounding box |
9,477 | def verts_str ( verts , pad = 1 ) : r if verts is None : return 'None' fmtstr = ', ' . join ( [ '%' + six . text_type ( pad ) + 'd' + ', %' + six . text_type ( pad ) + 'd' ] * 1 ) return ', ' . join ( [ '(' + fmtstr % vert + ')' for vert in verts ] ) | r makes a string from a list of integer verticies |
9,478 | def remove_chars ( str_ , char_list ) : outstr = str_ [ : ] for char in char_list : outstr = outstr . replace ( char , '' ) return outstr | removes all chars in char_list from str_ |
9,479 | def get_minimum_indentation ( text ) : r lines = text . split ( '\n' ) indentations = [ get_indentation ( line_ ) for line_ in lines if len ( line_ . strip ( ) ) > 0 ] if len ( indentations ) == 0 : return 0 return min ( indentations ) | r returns the number of preceding spaces |
9,480 | def indentjoin ( strlist , indent = '\n ' , suffix = '' ) : r indent_ = indent strlist = list ( strlist ) if len ( strlist ) == 0 : return '' return indent_ + indent_ . join ( [ six . text_type ( str_ ) + suffix for str_ in strlist ] ) | r Convineince indentjoin |
9,481 | def truncate_str ( str_ , maxlen = 110 , truncmsg = ' ~~~TRUNCATED~~~ ' ) : if NO_TRUNCATE : return str_ if maxlen is None or maxlen == - 1 or len ( str_ ) < maxlen : return str_ else : maxlen_ = maxlen - len ( truncmsg ) lowerb = int ( maxlen_ * .8 ) upperb = maxlen_ - lowerb tup = ( str_ [ : lowerb ] , truncmsg , str_ [ - upperb : ] ) return '' . join ( tup ) | Removes the middle part of any string over maxlen characters . |
9,482 | def packstr ( instr , textwidth = 160 , breakchars = ' ' , break_words = True , newline_prefix = '' , indentation = '' , nlprefix = None , wordsep = ' ' , remove_newlines = True ) : if not isinstance ( instr , six . string_types ) : instr = repr ( instr ) if nlprefix is not None : newline_prefix = nlprefix str_ = pack_into ( instr , textwidth , breakchars , break_words , newline_prefix , wordsep , remove_newlines ) if indentation != '' : str_ = indent ( str_ , indentation ) return str_ | alias for pack_into . has more up to date kwargs |
9,483 | def byte_str ( nBytes , unit = 'bytes' , precision = 2 ) : if unit . lower ( ) . startswith ( 'b' ) : nUnit = nBytes elif unit . lower ( ) . startswith ( 'k' ) : nUnit = nBytes / ( 2.0 ** 10 ) elif unit . lower ( ) . startswith ( 'm' ) : nUnit = nBytes / ( 2.0 ** 20 ) elif unit . lower ( ) . startswith ( 'g' ) : nUnit = nBytes / ( 2.0 ** 30 ) elif unit . lower ( ) . startswith ( 't' ) : nUnit = nBytes / ( 2.0 ** 40 ) else : raise NotImplementedError ( 'unknown nBytes=%r unit=%r' % ( nBytes , unit ) ) return repr2 ( nUnit , precision = precision ) + ' ' + unit | representing the number of bytes with the chosen unit |
9,484 | def func_str ( func , args = [ ] , kwargs = { } , type_aliases = [ ] , packed = False , packkw = None , truncate = False ) : import utool as ut truncatekw = { } argrepr_list = ( [ ] if args is None else ut . get_itemstr_list ( args , nl = False , truncate = truncate , truncatekw = truncatekw ) ) kwrepr_list = ( [ ] if kwargs is None else ut . dict_itemstr_list ( kwargs , explicit = True , nl = False , truncate = truncate , truncatekw = truncatekw ) ) repr_list = argrepr_list + kwrepr_list argskwargs_str = ', ' . join ( repr_list ) _str = '%s(%s)' % ( meta_util_six . get_funcname ( func ) , argskwargs_str ) if packed : packkw_ = dict ( textwidth = 80 , nlprefix = ' ' , break_words = False ) if packkw is not None : packkw_ . update ( packkw_ ) _str = packstr ( _str , ** packkw_ ) return _str | string representation of function definition |
9,485 | def func_defsig ( func , with_name = True ) : import inspect argspec = inspect . getargspec ( func ) ( args , varargs , varkw , defaults ) = argspec defsig = inspect . formatargspec ( * argspec ) if with_name : defsig = get_callable_name ( func ) + defsig return defsig | String of function definition signature |
9,486 | def func_callsig ( func , with_name = True ) : import inspect argspec = inspect . getargspec ( func ) ( args , varargs , varkw , defaults ) = argspec callsig = inspect . formatargspec ( * argspec [ 0 : 3 ] ) if with_name : callsig = get_callable_name ( func ) + callsig return callsig | String of function call signature |
9,487 | def numpy_str ( arr , strvals = False , precision = None , pr = None , force_dtype = False , with_dtype = None , suppress_small = None , max_line_width = None , threshold = None , ** kwargs ) : itemsep = kwargs . get ( 'itemsep' , ' ' ) newlines = kwargs . pop ( 'nl' , kwargs . pop ( 'newlines' , 1 ) ) data = arr separator = ',' + itemsep if strvals : prefix = '' suffix = '' else : modname = type ( data ) . __module__ np_nice = 'np' modname = re . sub ( '\\bnumpy\\b' , np_nice , modname ) modname = re . sub ( '\\bma.core\\b' , 'ma' , modname ) class_name = type ( data ) . __name__ if class_name == 'ndarray' : class_name = 'array' prefix = modname + '.' + class_name + '(' if with_dtype : dtype_repr = data . dtype . name suffix = ',{}dtype={}.{})' . format ( itemsep , np_nice , dtype_repr ) else : suffix = ')' if not strvals and data . size == 0 and data . shape != ( 0 , ) : prefix = modname + '.empty(' body = repr ( tuple ( map ( int , data . shape ) ) ) else : body = np . array2string ( data , precision = precision , separator = separator , suppress_small = suppress_small , prefix = prefix , max_line_width = max_line_width ) if not newlines : body = re . sub ( '\n *' , '' , body ) formatted = prefix + body + suffix return formatted | suppress_small = False turns off scientific representation |
9,488 | def list_str_summarized ( list_ , list_name , maxlen = 5 ) : if len ( list_ ) > maxlen : return 'len(%s)=%d' % ( list_name , len ( list_ ) ) else : return '%s=%r' % ( list_name , list_ ) | prints the list members when the list is small and the length when it is large |
9,489 | def _rectify_countdown_or_bool ( count_or_bool ) : if count_or_bool is True or count_or_bool is False : count_or_bool_ = count_or_bool elif isinstance ( count_or_bool , int ) : if count_or_bool == 0 : return 0 sign_ = math . copysign ( 1 , count_or_bool ) count_or_bool_ = int ( count_or_bool - sign_ ) else : count_or_bool_ = False return count_or_bool_ | used by recrusive functions to specify which level to turn a bool on in counting down yeilds True True ... False conting up yeilds False False False ... True |
9,490 | def repr2 ( obj_ , ** kwargs ) : kwargs [ 'nl' ] = kwargs . pop ( 'nl' , kwargs . pop ( 'newlines' , False ) ) val_str = _make_valstr ( ** kwargs ) return val_str ( obj_ ) | Attempt to replace repr more configurable pretty version that works the same in both 2 and 3 |
9,491 | def repr2_json ( obj_ , ** kwargs ) : import utool as ut kwargs [ 'trailing_sep' ] = False json_str = ut . repr2 ( obj_ , ** kwargs ) json_str = str ( json_str . replace ( '\'' , '"' ) ) json_str = json_str . replace ( '(' , '[' ) json_str = json_str . replace ( ')' , ']' ) json_str = json_str . replace ( 'None' , 'null' ) return json_str | hack for json reprs |
9,492 | def list_str ( list_ , ** listkw ) : r import utool as ut newlines = listkw . pop ( 'nl' , listkw . pop ( 'newlines' , 1 ) ) packed = listkw . pop ( 'packed' , False ) truncate = listkw . pop ( 'truncate' , False ) listkw [ 'nl' ] = _rectify_countdown_or_bool ( newlines ) listkw [ 'truncate' ] = _rectify_countdown_or_bool ( truncate ) listkw [ 'packed' ] = _rectify_countdown_or_bool ( packed ) nobraces = listkw . pop ( 'nobr' , listkw . pop ( 'nobraces' , False ) ) itemsep = listkw . get ( 'itemsep' , ' ' ) trailing_sep = listkw . get ( 'trailing_sep' , True ) with_comma = True itemstr_list = get_itemstr_list ( list_ , ** listkw ) is_tuple = isinstance ( list_ , tuple ) is_set = isinstance ( list_ , ( set , frozenset , ut . oset ) ) is_onetup = isinstance ( list_ , ( tuple ) ) and len ( list_ ) <= 1 if nobraces : lbr , rbr = '' , '' elif is_tuple : lbr , rbr = '(' , ')' elif is_set : lbr , rbr = '{' , '}' else : lbr , rbr = '[' , ']' if len ( itemstr_list ) == 0 : newlines = False if newlines is not False and ( newlines is True or newlines > 0 ) : sep = ',\n' if with_comma else '\n' if nobraces : body_str = sep . join ( itemstr_list ) if trailing_sep : body_str += ',' retstr = body_str else : if packed : joinstr = sep + itemsep * len ( lbr ) body_str = joinstr . join ( [ itemstr for itemstr in itemstr_list ] ) if trailing_sep : body_str += ',' braced_body_str = ( lbr + '' + body_str + '' + rbr ) else : body_str = sep . join ( [ ut . indent ( itemstr ) for itemstr in itemstr_list ] ) if trailing_sep : body_str += ',' braced_body_str = ( lbr + '\n' + body_str + '\n' + rbr ) retstr = braced_body_str else : sep = ',' + itemsep if with_comma else itemsep body_str = sep . join ( itemstr_list ) if is_onetup : body_str += ',' retstr = ( lbr + body_str + rbr ) do_truncate = truncate is not False and ( truncate is True or truncate == 0 ) if do_truncate : truncatekw = listkw . get ( 'truncatekw' , { } ) retstr = truncate_str ( retstr , ** truncatekw ) return retstr | r Makes a pretty list string |
9,493 | def horiz_string ( * args , ** kwargs ) : import unicodedata precision = kwargs . get ( 'precision' , None ) sep = kwargs . get ( 'sep' , '' ) if len ( args ) == 1 and not isinstance ( args [ 0 ] , six . string_types ) : val_list = args [ 0 ] else : val_list = args val_list = [ unicodedata . normalize ( 'NFC' , ensure_unicode ( val ) ) for val in val_list ] all_lines = [ ] hpos = 0 for sx in range ( len ( val_list ) ) : val = val_list [ sx ] str_ = None if precision is not None : if util_type . HAVE_NUMPY : try : if isinstance ( val , np . ndarray ) : str_ = np . array_str ( val , precision = precision , suppress_small = True ) except ImportError : pass if str_ is None : str_ = six . text_type ( val_list [ sx ] ) lines = str_ . split ( '\n' ) line_diff = len ( lines ) - len ( all_lines ) if line_diff > 0 : all_lines += [ ' ' * hpos ] * line_diff for lx , line in enumerate ( lines ) : all_lines [ lx ] += line hpos = max ( hpos , len ( all_lines [ lx ] ) ) for lx in range ( len ( all_lines ) ) : hpos_diff = hpos - len ( all_lines [ lx ] ) all_lines [ lx ] += ' ' * hpos_diff + sep all_lines = [ line . rstrip ( ' ' ) for line in all_lines ] ret = '\n' . join ( all_lines ) return ret | Horizontally concatenates strings reprs preserving indentation |
9,494 | def str_between ( str_ , startstr , endstr ) : r if startstr is None : startpos = 0 else : startpos = str_ . find ( startstr ) + len ( startstr ) if endstr is None : endpos = None else : endpos = str_ . find ( endstr ) if endpos == - 1 : endpos = None newstr = str_ [ startpos : endpos ] return newstr | r gets substring between two sentianl strings |
9,495 | def get_callable_name ( func ) : try : return meta_util_six . get_funcname ( func ) except AttributeError : if isinstance ( func , type ) : return repr ( func ) . replace ( '<type \'' , '' ) . replace ( '\'>' , '' ) elif hasattr ( func , '__name__' ) : return func . __name__ else : raise NotImplementedError ( ( 'cannot get func_name of func=%r' 'type(func)=%r' ) % ( func , type ( func ) ) ) | Works on must functionlike objects including str which has no func_name |
9,496 | def multi_replace ( str_ , search_list , repl_list ) : r if isinstance ( repl_list , six . string_types ) : repl_list_ = [ repl_list ] * len ( search_list ) else : repl_list_ = repl_list newstr = str_ assert len ( search_list ) == len ( repl_list_ ) , 'bad lens' for search , repl in zip ( search_list , repl_list_ ) : newstr = newstr . replace ( search , repl ) return newstr | r Performs multiple replace functions foreach item in search_list and repl_list . |
9,497 | def pluralize ( wordtext , num = 2 , plural_suffix = 's' ) : r if num == 1 : return wordtext else : if wordtext . endswith ( '\'s' ) : return wordtext [ : - 2 ] + 's\'' else : return wordtext + plural_suffix return ( wordtext + plural_suffix ) if num != 1 else wordtext | r Heuristically changes a word to its plural form if num is not 1 |
9,498 | def quantstr ( typestr , num , plural_suffix = 's' ) : r return six . text_type ( num ) + ' ' + pluralize ( typestr , num , plural_suffix ) | r Heuristically generates an english phrase relating to the quantity of something . This is useful for writing user messages . |
9,499 | def msgblock ( key , text , side = '|' ) : blocked_text = '' . join ( [ ' + --- ' , key , ' ---\n' ] + [ ' ' + side + ' ' + line + '\n' for line in text . split ( '\n' ) ] + [ ' L ' , key , ' \n' ] ) return blocked_text | puts text inside a visual ascii block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.