idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
9,300
def length_hint ( obj , default = 0 ) : try : return len ( obj ) except TypeError : try : get_hint = type ( obj ) . __length_hint__ except AttributeError : return default try : hint = get_hint ( obj ) except TypeError : return default if hint is NotImplemented : return default if not isinstance ( hint , int ) : raise TypeError ( "Length hint must be an integer, not %r" % type ( hint ) ) if hint < 0 : raise ValueError ( "__length_hint__() should return >= 0" ) return hint
Return an estimate of the number of items in obj .
9,301
def add_parser_arguments ( parser , args , group = None , prefix = DATA_PREFIX ) : if group : parser = parser . add_argument_group ( group ) for arg , kwargs in iteritems ( args ) : arg_name = kwargs . pop ( 'arg' , arg . replace ( '_' , '-' ) ) if 'metavar' not in kwargs : kwargs [ 'metavar' ] = arg . upper ( ) if 'dest' in kwargs : kwargs [ 'dest' ] = prefix + kwargs [ 'dest' ] else : kwargs [ 'dest' ] = prefix + arg parser . add_argument ( '--' + arg_name , ** kwargs )
Helper method that populates parser arguments . The argument values can be later retrieved with extract_arguments method .
9,302
def add_mutually_exclusive_args ( parser , args , required = False , prefix = DATA_PREFIX ) : parser = parser . add_mutually_exclusive_group ( required = required ) for arg , kwargs in iteritems ( args ) : arg_name = kwargs . pop ( 'arg' , arg . replace ( '_' , '-' ) ) if 'metavar' not in kwargs : kwargs [ 'metavar' ] = arg . upper ( ) parser . add_argument ( '--' + arg_name , dest = prefix + arg , ** kwargs )
Helper method that populates mutually exclusive arguments . The argument values can be later retrieved with extract_arguments method .
9,303
def add_create_update_args ( parser , required_args , optional_args , create = False ) : if create : for key in required_args : required_args [ key ] [ 'required' ] = True add_parser_arguments ( parser , required_args , group = 'required arguments' ) else : optional_args . update ( required_args ) add_parser_arguments ( parser , optional_args )
Wrapper around add_parser_arguments .
9,304
def extract_arguments ( args , prefix = DATA_PREFIX ) : data = { } for key , value in iteritems ( args . __dict__ ) : if key . startswith ( prefix ) and value is not None : parts = key [ len ( prefix ) : ] . split ( '__' ) d = data for p in parts [ : - 1 ] : assert p not in d or isinstance ( d [ p ] , dict ) d = d . setdefault ( p , { } ) d [ parts [ - 1 ] ] = value if value != '' else None return data
Return a dict of arguments created by add_parser_arguments .
9,305
def create_searchspace ( lookup , fastafn , proline_cut = False , reverse_seqs = True , do_trypsinize = True ) : allpeps = [ ] for record in SeqIO . parse ( fastafn , 'fasta' ) : if do_trypsinize : pepseqs = trypsinize ( record . seq , proline_cut ) else : pepseqs = [ record . seq ] pepseqs = [ ( str ( pep ) . replace ( 'L' , 'I' ) , ) for pep in pepseqs ] allpeps . extend ( pepseqs ) if len ( allpeps ) > 1000000 : lookup . write_peps ( allpeps , reverse_seqs ) allpeps = [ ] lookup . write_peps ( allpeps , reverse_seqs ) lookup . index_peps ( reverse_seqs ) lookup . close_connection ( )
Given a FASTA database proteins are trypsinized and resulting peptides stored in a database or dict for lookups
9,306
def hashid_arr ( arr , label = 'arr' , hashlen = 16 ) : hashstr = hash_data ( arr ) [ 0 : hashlen ] if isinstance ( arr , ( list , tuple ) ) : shapestr = len ( arr ) else : shapestr = ',' . join ( list ( map ( str , arr . shape ) ) ) hashid = '{}-{}-{}' . format ( label , shapestr , hashstr ) return hashid
newer version of hashstr_arr2
9,307
def _update_hasher ( hasher , data ) : if isinstance ( data , ( tuple , list , zip ) ) : needs_iteration = True elif ( util_type . HAVE_NUMPY and isinstance ( data , np . ndarray ) and data . dtype . kind == 'O' ) : needs_iteration = True else : needs_iteration = False if needs_iteration : SEP = b'SEP' iter_prefix = b'ITER' iter_ = iter ( data ) hasher . update ( iter_prefix ) try : for item in iter_ : prefix , hashable = _covert_to_hashable ( data ) binary_data = SEP + prefix + hashable hasher . update ( binary_data ) except TypeError : _update_hasher ( hasher , item ) for item in iter_ : hasher . update ( SEP ) _update_hasher ( hasher , item ) else : prefix , hashable = _covert_to_hashable ( data ) binary_data = prefix + hashable hasher . update ( binary_data )
This is the clear winner over the generate version . Used by hash_data
9,308
def combine_hashes ( bytes_list , hasher = None ) : if hasher is None : hasher = hashlib . sha256 ( ) for b in bytes_list : hasher . update ( b ) hasher . update ( SEP_BYTE ) return hasher . digest ( )
Only works on bytes
9,309
def hash_data ( data , hashlen = None , alphabet = None ) : r if alphabet is None : alphabet = ALPHABET_27 if hashlen is None : hashlen = HASH_LEN2 if isinstance ( data , stringlike ) and len ( data ) == 0 : text = ( alphabet [ 0 ] * hashlen ) else : hasher = hashlib . sha512 ( ) _update_hasher ( hasher , data ) text = hasher . hexdigest ( ) hashstr2 = convert_hexstr_to_bigbase ( text , alphabet , bigbase = len ( alphabet ) ) text = hashstr2 [ : hashlen ] return text
r Get a unique hash depending on the state of the data .
9,310
def convert_hexstr_to_bigbase ( hexstr , alphabet = ALPHABET , bigbase = BIGBASE ) : r x = int ( hexstr , 16 ) if x == 0 : return '0' sign = 1 if x > 0 else - 1 x *= sign digits = [ ] while x : digits . append ( alphabet [ x % bigbase ] ) x //= bigbase if sign < 0 : digits . append ( '-' ) digits . reverse ( ) newbase_str = '' . join ( digits ) return newbase_str
r Packs a long hexstr into a shorter length string with a larger base
9,311
def get_file_hash ( fpath , blocksize = 65536 , hasher = None , stride = 1 , hexdigest = False ) : r if hasher is None : hasher = hashlib . sha1 ( ) with open ( fpath , 'rb' ) as file_ : buf = file_ . read ( blocksize ) while len ( buf ) > 0 : hasher . update ( buf ) if stride > 1 : file_ . seek ( blocksize * ( stride - 1 ) , 1 ) buf = file_ . read ( blocksize ) if hexdigest : return hasher . hexdigest ( ) else : return hasher . digest ( )
r For better hashes use hasher = hashlib . sha256 and keep stride = 1
9,312
def get_file_uuid ( fpath , hasher = None , stride = 1 ) : if hasher is None : hasher = hashlib . sha1 ( ) hashbytes_20 = get_file_hash ( fpath , hasher = hasher , stride = stride ) hashbytes_16 = hashbytes_20 [ 0 : 16 ] uuid_ = uuid . UUID ( bytes = hashbytes_16 ) return uuid_
Creates a uuid from the hash of a file
9,313
def combine_uuids ( uuids , ordered = True , salt = '' ) : if len ( uuids ) == 0 : return get_zero_uuid ( ) elif len ( uuids ) == 1 : return uuids [ 0 ] else : if not ordered : uuids = sorted ( uuids ) sep_str = '-' sep_byte = six . binary_type ( six . b ( sep_str ) ) pref = six . binary_type ( six . b ( '{}{}{}' . format ( salt , sep_str , len ( uuids ) ) ) ) combined_bytes = pref + sep_byte . join ( [ u . bytes for u in uuids ] ) combined_uuid = hashable_to_uuid ( combined_bytes ) return combined_uuid
Creates a uuid that specifies a group of UUIDS
9,314
def __start_connection ( self , context , node , ccallbacks = None ) : _logger . debug ( "Creating connection object: CONTEXT=[%s] NODE=[%s]" , context , node ) c = nsq . connection . Connection ( context , node , self . __identify , self . __message_handler , self . __quit_ev , ccallbacks , ignore_quit = self . __connection_ignore_quit ) g = gevent . spawn ( c . run ) timeout_s = nsq . config . client . NEW_CONNECTION_NEGOTIATE_TIMEOUT_S if c . connected_ev . wait ( timeout_s ) is False : _logger . error ( "New connection to server [%s] timed-out. Cleaning-" "up thread." , node ) g . kill ( ) g . join ( ) raise EnvironmentError ( "Connection to server [%s] failed." % ( node , ) ) self . __connections . append ( ( node , c , g ) )
Start a new connection and manage it from a new greenlet .
9,315
def __audit_connections ( self , ccallbacks ) : while self . __quit_ev . is_set ( ) is False : self . __connections = filter ( lambda ( n , c , g ) : not g . ready ( ) , self . __connections ) connected_node_couplets_s = set ( [ ( c . managed_connection . context , node ) for ( node , c , g ) in self . __connections ] ) lingering_nodes_s = connected_node_couplets_s - self . __node_couplets_s if lingering_nodes_s : _logger . warning ( "Server(s) are connected but no longer " "advertised: %s" , lingering_nodes_s ) unused_nodes_s = self . __node_couplets_s - connected_node_couplets_s for ( context , node ) in unused_nodes_s : _logger . info ( "Trying to connect unconnected server: " "CONTEXT=[%s] NODE=[%s]" , context , node ) self . __start_connection ( context , node , ccallbacks ) else : if not connected_node_couplets_s : _logger . error ( "All servers have gone away. Stopping " "client." ) try : self . set_servers ( [ ] ) except EnvironmentError : pass self . __quit_ev . set ( ) return interval_s = nsq . config . client . GRANULAR_CONNECTION_AUDIT_SLEEP_STEP_TIME_S audit_wait_s = float ( nsq . config . client . CONNECTION_AUDIT_WAIT_S ) while audit_wait_s > 0 and self . __quit_ev . is_set ( ) is False : gevent . sleep ( interval_s ) audit_wait_s -= interval_s
Monitor state of all connections and utility of all servers .
9,316
def __join_connections ( self ) : interval_s = nsq . config . client . CONNECTION_CLOSE_AUDIT_WAIT_S graceful_wait_s = nsq . config . client . CONNECTION_QUIT_CLOSE_TIMEOUT_S graceful = False while graceful_wait_s > 0 : if not self . __connections : break connected_list = [ c . is_connected for ( n , c , g ) in self . __connections ] if any ( connected_list ) is False : graceful = True break gevent . sleep ( interval_s ) graceful_wait_s -= interval_s if graceful is False : connected_list = [ c for ( n , c , g ) in self . __connections if c . is_connected ] _logger . error ( "We were told to terminate, but not all " "connections were stopped: [%s]" , connected_list )
Wait for all connections to close . There are no side - effects here . We just want to try and leave - after - everything has closed in general .
9,317
def __manage_connections ( self , ccallbacks = None ) : _logger . info ( "Running client." ) if self . __message_handler_cls is not None : self . __message_handler = self . __message_handler_cls ( self . __election , ccallbacks ) for ( context , node ) in self . __node_couplets_s : self . __start_connection ( context , node , ccallbacks ) self . __wait_for_one_server_connection ( ) self . __is_alive = True self . __ready_ev . set ( ) self . __audit_connections ( ccallbacks ) self . __join_connections ( ) _logger . info ( "Connection management has stopped." ) self . __is_alive = False
This runs as the main connection management greenlet .
9,318
def set_servers ( self , node_couplets ) : node_couplets_s = set ( node_couplets ) if node_couplets_s != self . __node_couplets_s : _logger . info ( "Servers have changed. NEW: %s REMOVED: %s" , node_couplets_s - self . __node_couplets_s , self . __node_couplets_s - node_couplets_s ) if not node_couplets_s : raise EnvironmentError ( "No servers available." ) self . __node_couplets_s = node_couplets_s
Set the current collection of servers . The entries are 2 - tuples of contexts and nodes .
9,319
def start ( self , ccallbacks = None ) : self . __manage_g = gevent . spawn ( self . __manage_connections , ccallbacks ) self . __ready_ev . wait ( )
Establish and maintain connections .
9,320
def stop ( self ) : _logger . debug ( "Emitting quit signal for connections." ) self . __quit_ev . set ( ) _logger . info ( "Waiting for connection manager to stop." ) self . __manage_g . join ( )
Stop all of the connections .
9,321
def run ( file_path , include_dirs = [ ] , dlems = False , nogui = False ) : import argparse args = argparse . Namespace ( ) args . lems_file = file_path args . I = include_dirs args . dlems = dlems args . nogui = nogui main ( args = args )
Function for running from a script or shell .
9,322
def connect ( self , nice_quit_ev ) : _logger . debug ( "Connecting to discovered node: [%s]" , self . server_host ) stop_epoch = time . time ( ) + nsq . config . client . MAXIMUM_CONNECT_ATTEMPT_PERIOD_S timeout_s = nsq . config . client . INITIAL_CONNECT_FAIL_WAIT_S backoff_rate = nsq . config . client . CONNECT_FAIL_WAIT_BACKOFF_RATE while stop_epoch >= time . time ( ) and nice_quit_ev . is_set ( ) is False : try : c = self . primitive_connect ( ) except gevent . socket . error : _logger . exception ( "Could not connect to discovered server: " "[%s]" , self . server_host ) else : _logger . info ( "Discovered server-node connected: [%s]" , self . server_host ) return c timeout_s = min ( timeout_s * backoff_rate , nsq . config . client . MAXIMUM_CONNECT_FAIL_WAIT_S ) _logger . info ( "Waiting for (%d) seconds before reconnecting." , timeout_s ) gevent . sleep ( timeout_s ) raise nsq . exceptions . NsqConnectGiveUpError ( "Could not connect to the nsqlookupd server: [%s]" % ( self . server_host , ) )
Connect the server . We expect this to implement backoff and all connection logistics for servers that were discovered via a lookup node .
9,323
def connect ( self , nice_quit_ev ) : _logger . debug ( "Connecting to explicit server node: [%s]" , self . server_host ) try : c = self . primitive_connect ( ) except gevent . socket . error : _logger . exception ( "Could not connect to explicit server: [%s]" , self . server_host ) raise nsq . exceptions . NsqConnectGiveUpError ( "Could not connect to the nsqd server: [%s]" % ( self . server_host , ) ) _logger . info ( "Explicit server-node connected: [%s]" , self . server_host ) return c
Connect the server . We expect this to implement connection logistics for servers that were explicitly prescribed to us .
9,324
def prepare ( self ) : self . target = self . fn self . targetheader = reader . get_tsv_header ( self . target ) self . decoyheader = reader . get_tsv_header ( self . decoyfn )
No percolator XML for protein tables
9,325
def obtain_token ( self ) : token_end_points = ( 'token/obtain' , 'obtain-token' , 'obtain_token' ) for end_point in token_end_points : try : return self . auth [ end_point ] . _ ( page_size = None ) [ 'token' ] except BeanBagException as e : if e . response . status_code != 404 : raise raise Exception ( 'Could not obtain token from any known URL.' )
Try to obtain token from all end - points that were ever used to serve the token . If the request returns 404 NOT FOUND retry with older version of the URL .
9,326
def results ( self , * args , ** kwargs ) : def worker ( ) : kwargs [ 'page' ] = 1 while True : response = self . client ( * args , ** kwargs ) if isinstance ( response , list ) : yield response break elif _is_page ( response ) : yield response [ 'results' ] if response [ 'next' ] : kwargs [ 'page' ] += 1 else : break else : raise NoResultsError ( response ) return itertools . chain . from_iterable ( worker ( ) )
Return an iterator with all pages of data . Return NoResultsError with response if there is unexpected data .
9,327
def get_isoquant_fields ( pqdb = False , poolnames = False ) : if pqdb is None : return { } try : channels_psms = pqdb . get_isoquant_amountpsms_channels ( ) except OperationalError : return { } quantheader , psmsheader = OrderedDict ( ) , OrderedDict ( ) for chan_name , amnt_psms_name in channels_psms : quantheader [ chan_name ] = poolnames if amnt_psms_name : psmsheader [ amnt_psms_name ] = poolnames quantheader . update ( psmsheader ) return quantheader
Returns a headerfield dict for isobaric quant channels . Channels are taken from DB and there isn t a pool - independent version of this yet
9,328
def watch_for_events ( ) : fd = inotify . init ( ) try : wd = inotify . add_watch ( fd , '/tmp' , inotify . IN_CLOSE_WRITE ) while True : for event in inotify . get_events ( fd ) : print ( "event:" , event . name , event . get_mask_description ( ) ) finally : os . close ( fd )
Wait for events and print them to stdout .
9,329
def format_body ( self , description , sys_info = None , traceback = None ) : body = BODY_ITEM_TEMPLATE % { 'name' : 'Description' , 'value' : description } if traceback : traceback = '\n' . join ( traceback . splitlines ( ) [ - NB_LINES_MAX : ] ) body += BODY_ITEM_TEMPLATE % { 'name' : 'Traceback' , 'value' : '```\n%s\n```' % traceback } if sys_info : sys_info = '- %s' % '\n- ' . join ( sys_info . splitlines ( ) ) body += BODY_ITEM_TEMPLATE % { 'name' : 'System information' , 'value' : sys_info } return body
Formats the body using markdown .
9,330
def list ( ) : "List EC2 name and public and private ip address" for node in env . nodes : print "%s (%s, %s)" % ( node . tags [ "Name" ] , node . ip_address , node . private_ip_address )
List EC2 name and public and private ip address
9,331
def quick_search ( self , name , platform = None , sort_by = None , desc = True ) : if platform is None : query_filter = "name:{0}" . format ( name ) else : query_filter = "name:{0},platforms:{1}" . format ( name , platform ) search_params = { "filter" : query_filter } if sort_by is not None : self . _validate_sort_field ( sort_by ) if desc : direction = self . SORT_ORDER_DESCENDING else : direction = self . SORT_ORDER_ASCENDING search_params [ "sort" ] = "{0}:{1}" . format ( sort_by , direction ) response = self . _query ( search_params ) return response
Quick search method that allows you to search for a game using only the title and the platform
9,332
def send_ping ( self , payload = None ) : yield from asyncio . sleep ( self . _interval ) self . _handler . send_ping ( payload = payload ) self . _start_timer ( payload = payload )
Sends the ping after the interval specified when initializing
9,333
def pong_received ( self , payload = None ) : if self . _timer is not None : self . _timer . cancel ( ) self . _failures = 0 asyncio . async ( self . send_ping ( payload = payload ) )
Called when a pong is received . So the timer is cancelled
9,334
def is_comparable_type ( var , type_ ) : other_types = COMPARABLE_TYPES . get ( type_ , type_ ) return isinstance ( var , other_types )
Check to see if var is an instance of known compatible types for type_
9,335
def smart_cast ( var , type_ ) : if type_ is None or var is None : return var try : if issubclass ( type_ , type ( None ) ) : return var except TypeError : pass if is_str ( var ) : if type_ in VALID_BOOL_TYPES : return bool_from_str ( var ) elif type_ is slice : args = [ None if len ( arg ) == 0 else int ( arg ) for arg in var . split ( ':' ) ] return slice ( * args ) elif type_ is list : subvar_list = var . split ( ',' ) return [ smart_cast2 ( subvar ) for subvar in subvar_list ] elif isinstance ( type_ , six . string_types ) : if type_ == 'fuzzy_subset' : return fuzzy_subset ( var ) if type_ == 'eval' : return eval ( var , { } , { } ) else : raise NotImplementedError ( 'Uknown smart type_=%r' % ( type_ , ) ) return type_ ( var )
casts var to type and tries to be clever when var is a string
9,336
def fuzzy_subset ( str_ ) : if str_ is None : return str_ if ':' in str_ : return smart_cast ( str_ , slice ) if str_ . startswith ( '[' ) : return smart_cast ( str_ [ 1 : - 1 ] , list ) else : return smart_cast ( str_ , list )
converts a string into an argument to list_take
9,337
def fuzzy_int ( str_ ) : try : ret = int ( str_ ) return ret except Exception : if re . match ( r'\d*,\d*,?\d*' , str_ ) : return tuple ( map ( int , str_ . split ( ',' ) ) ) if re . match ( r'\d*:\d*:?\d*' , str_ ) : return tuple ( range ( * map ( int , str_ . split ( ':' ) ) ) ) raise
lets some special strings be interpreted as ints
9,338
def get_type ( var ) : if HAVE_NUMPY and isinstance ( var , np . ndarray ) : if _WIN32 : type_ = var . dtype else : type_ = var . dtype . type elif HAVE_PANDAS and isinstance ( var , pd . Index ) : if _WIN32 : type_ = var . dtype else : type_ = var . dtype . type else : type_ = type ( var ) return type_
Gets types accounting for numpy
9,339
def get_homogenous_list_type ( list_ ) : if HAVE_NUMPY and isinstance ( list_ , np . ndarray ) : item = list_ elif isinstance ( list_ , list ) and len ( list_ ) > 0 : item = list_ [ 0 ] else : item = None if item is not None : if is_float ( item ) : type_ = float elif is_int ( item ) : type_ = int elif is_bool ( item ) : type_ = bool elif is_str ( item ) : type_ = str else : type_ = get_type ( item ) else : type_ = None return type_
Returns the best matching python type even if it is an ndarray assumes all items in the list are of the same type . does not check this
9,340
def pop ( self ) : if self . stack : val = self . stack [ 0 ] self . stack = self . stack [ 1 : ] return val else : raise StackError ( 'Stack empty' )
Pops a value off the top of the stack .
9,341
def create_spectra_lookup ( lookup , fn_spectra ) : to_store = [ ] mzmlmap = lookup . get_mzmlfile_map ( ) for fn , spectrum in fn_spectra : spec_id = '{}_{}' . format ( mzmlmap [ fn ] , spectrum [ 'scan' ] ) mzml_rt = round ( float ( spectrum [ 'rt' ] ) , 12 ) mzml_iit = round ( float ( spectrum [ 'iit' ] ) , 12 ) mz = float ( spectrum [ 'mz' ] ) to_store . append ( ( spec_id , mzmlmap [ fn ] , spectrum [ 'scan' ] , spectrum [ 'charge' ] , mz , mzml_rt , mzml_iit ) ) if len ( to_store ) == DB_STORE_CHUNK : lookup . store_mzmls ( to_store ) to_store = [ ] lookup . store_mzmls ( to_store ) lookup . index_mzml ( )
Stores all spectra rt injection time and scan nr in db
9,342
def assert_raises ( ex_type , func , * args , ** kwargs ) : r try : func ( * args , ** kwargs ) except Exception as ex : assert isinstance ( ex , ex_type ) , ( 'Raised %r but type should have been %r' % ( ex , ex_type ) ) return True else : raise AssertionError ( 'No error was raised' )
r Checks that a function raises an error when given specific arguments .
9,343
def command_for_all_connections ( self , cb ) : for connection in self . __master . connections : cb ( connection . command )
Invoke the callback with a command - object for each connection .
9,344
def dump_autogen_code ( fpath , autogen_text , codetype = 'python' , fullprint = None , show_diff = None , dowrite = None ) : import utool as ut if dowrite is None : dowrite = ut . get_argflag ( ( '-w' , '--write' ) ) if show_diff is None : show_diff = ut . get_argflag ( '--diff' ) num_context_lines = ut . get_argval ( '--diff' , type_ = int , default = None ) show_diff = show_diff or num_context_lines is not None num_context_lines = ut . get_argval ( '--diff' , type_ = int , default = None ) if fullprint is None : fullprint = True if fullprint is False : fullprint = ut . get_argflag ( '--print' ) print ( '[autogen] Autogenerated %s...\n+---\n' % ( fpath , ) ) if not dowrite : if fullprint : ut . print_code ( autogen_text , lexer_name = codetype ) print ( '\nL ' ) else : print ( 'specify --print to write to stdout' ) pass print ( 'specify -w to write, or --diff to compare' ) print ( '...would write to: %s' % fpath ) if show_diff : if ut . checkpath ( fpath , verbose = True ) : prev_text = ut . read_from ( fpath ) textdiff = ut . get_textdiff ( prev_text , autogen_text , num_context_lines = num_context_lines ) try : ut . print_difftext ( textdiff ) except UnicodeDecodeError : import unicodedata textdiff = unicodedata . normalize ( 'NFKD' , textdiff ) . encode ( 'ascii' , 'ignore' ) ut . print_difftext ( textdiff ) if dowrite : print ( 'WARNING: Not writing. Remove --diff from command line' ) elif dowrite : ut . write_to ( fpath , autogen_text )
Helper that write a file if - w is given on command line otherwise it just prints it out . It has the opption of comparing a diff to the file .
9,345
def autofix_codeblock ( codeblock , max_line_len = 80 , aggressive = False , very_aggressive = False , experimental = False ) : r import autopep8 arglist = [ '--max-line-length' , '80' ] if aggressive : arglist . extend ( [ '-a' ] ) if very_aggressive : arglist . extend ( [ '-a' , '-a' ] ) if experimental : arglist . extend ( [ '--experimental' ] ) arglist . extend ( [ '' ] ) autopep8_options = autopep8 . parse_args ( arglist ) fixed_codeblock = autopep8 . fix_code ( codeblock , options = autopep8_options ) return fixed_codeblock
r Uses autopep8 to format a block of code
9,346
def auto_docstr ( modname , funcname , verbose = True , moddir = None , modpath = None , ** kwargs ) : r func , module , error_str = load_func_from_module ( modname , funcname , verbose = verbose , moddir = moddir , modpath = modpath ) if error_str is None : try : docstr = make_default_docstr ( func , ** kwargs ) except Exception as ex : import utool as ut error_str = ut . formatex ( ex , 'Caught Error in parsing docstr' , tb = True ) error_str += ( '\n\nReplicateCommand:\n ' 'python -m utool --tf auto_docstr ' '--modname={modname} --funcname={funcname} --moddir={moddir}' ) . format ( modname = modname , funcname = funcname , moddir = moddir ) error_str += '\n kwargs=' + ut . repr4 ( kwargs ) return error_str else : docstr = error_str return docstr
r called from vim . Uses strings of filename and modnames to build docstr
9,347
def make_args_docstr ( argname_list , argtype_list , argdesc_list , ismethod , va_name = None , kw_name = None , kw_keys = [ ] ) : r import utool as ut if ismethod : argname_list = argname_list [ 1 : ] argtype_list = argtype_list [ 1 : ] argdesc_list = argdesc_list [ 1 : ] argdoc_list = [ arg + ' (%s): %s' % ( _type , desc ) for arg , _type , desc in zip ( argname_list , argtype_list , argdesc_list ) ] if va_name is not None : argdoc_list . append ( '*' + va_name + ':' ) if kw_name is not None : import textwrap prefix = '**' + kw_name + ': ' wrapped_lines = textwrap . wrap ( ', ' . join ( kw_keys ) , width = 70 - len ( prefix ) ) sep = '\n' + ( ' ' * len ( prefix ) ) kw_keystr = sep . join ( wrapped_lines ) argdoc_list . append ( ( prefix + kw_keystr ) . strip ( ) ) align_args = False if align_args : argdoc_aligned_list = ut . align_lines ( argdoc_list , character = '(' ) arg_docstr = '\n' . join ( argdoc_aligned_list ) else : arg_docstr = '\n' . join ( argdoc_list ) return arg_docstr
r Builds the argument docstring
9,348
def make_default_docstr ( func , with_args = True , with_ret = True , with_commandline = True , with_example = True , with_header = False , with_debug = False ) : r import utool as ut funcinfo = ut . util_inspect . infer_function_info ( func ) argname_list = funcinfo . argname_list argtype_list = funcinfo . argtype_list argdesc_list = funcinfo . argdesc_list return_header = funcinfo . return_header return_type = funcinfo . return_type return_name = funcinfo . return_name return_desc = funcinfo . return_desc funcname = funcinfo . funcname modname = funcinfo . modname defaults = funcinfo . defaults num_indent = funcinfo . num_indent needs_surround = funcinfo . needs_surround funcname = funcinfo . funcname ismethod = funcinfo . ismethod va_name = funcinfo . va_name kw_name = funcinfo . kw_name kw_keys = funcinfo . kw_keys docstr_parts = [ ] if with_header : header_block = funcname docstr_parts . append ( header_block ) if with_args and len ( argname_list ) > 0 : argheader = 'Args' arg_docstr = make_args_docstr ( argname_list , argtype_list , argdesc_list , ismethod , va_name , kw_name , kw_keys ) argsblock = make_docstr_block ( argheader , arg_docstr ) docstr_parts . append ( argsblock ) if with_ret and return_header is not None : if return_header is not None : return_doctr = make_returns_or_yeilds_docstr ( return_type , return_name , return_desc ) returnblock = make_docstr_block ( return_header , return_doctr ) docstr_parts . append ( returnblock ) if with_commandline : cmdlineheader = 'CommandLine' cmdlinecode = make_cmdline_docstr ( funcname , modname ) cmdlineblock = make_docstr_block ( cmdlineheader , cmdlinecode ) docstr_parts . append ( cmdlineblock ) if with_example : exampleheader = 'Example' examplecode = make_example_docstr ( funcname , modname , argname_list , defaults , return_type , return_name , ismethod ) examplecode_ = ut . indent ( examplecode , '>>> ' ) exampleblock = make_docstr_block ( exampleheader , examplecode_ ) docstr_parts . append ( exampleblock ) if with_debug : debugheader = 'Debug' debugblock = ut . codeblock ( ) . format ( num_indent = num_indent ) debugblock = make_docstr_block ( debugheader , debugblock ) docstr_parts . append ( debugblock ) if needs_surround : docstr_parts = [ 'r' ] default_docstr = '\n' . join ( docstr_parts ) else : default_docstr = '\n\n' . join ( docstr_parts ) docstr_indent = ' ' * ( num_indent + 4 ) default_docstr = ut . indent ( default_docstr , docstr_indent ) return default_docstr
r Tries to make a sensible default docstr so the user can fill things in without typing too much
9,349
def remove_codeblock_syntax_sentinals ( code_text ) : r flags = re . MULTILINE | re . DOTALL code_text_ = code_text code_text_ = re . sub ( r'^ *# *REM [^\n]*$\n?' , '' , code_text_ , flags = flags ) code_text_ = re . sub ( r'^ *# STARTBLOCK *$\n' , '' , code_text_ , flags = flags ) code_text_ = re . sub ( r'^ *# ENDBLOCK *$\n?' , '' , code_text_ , flags = flags ) code_text_ = code_text_ . rstrip ( ) return code_text_
r Removes template comments and vim sentinals
9,350
def sort_protein_group ( pgroup , sortfunctions , sortfunc_index ) : pgroup_out = [ ] subgroups = sortfunctions [ sortfunc_index ] ( pgroup ) sortfunc_index += 1 for subgroup in subgroups : if len ( subgroup ) > 1 and sortfunc_index < len ( sortfunctions ) : pgroup_out . extend ( sort_protein_group ( subgroup , sortfunctions , sortfunc_index ) ) else : pgroup_out . extend ( subgroup ) return pgroup_out
Recursive function that sorts protein group by a number of sorting functions .
9,351
def sort_amounts ( proteins , sort_index ) : amounts = { } for protein in proteins : amount_x_for_protein = protein [ sort_index ] try : amounts [ amount_x_for_protein ] . append ( protein ) except KeyError : amounts [ amount_x_for_protein ] = [ protein ] return [ v for k , v in sorted ( amounts . items ( ) , reverse = True ) ]
Generic function for sorting peptides and psms . Assumes a higher number is better for what is passed at sort_index position in protein .
9,352
def free ( self ) : if self . _ptr is None : return Gauged . map_free ( self . ptr ) SparseMap . ALLOCATIONS -= 1 self . _ptr = None
Free the map
9,353
def append ( self , position , array ) : if not Gauged . map_append ( self . ptr , position , array . ptr ) : raise MemoryError
Append an array to the end of the map . The position must be greater than any positions in the map
9,354
def slice ( self , start = 0 , end = 0 ) : tmp = Gauged . map_new ( ) if tmp is None : raise MemoryError if not Gauged . map_concat ( tmp , self . ptr , start , end , 0 ) : Gauged . map_free ( tmp ) raise MemoryError return SparseMap ( tmp )
Slice the map from [ start end )
9,355
def concat ( self , operand , start = 0 , end = 0 , offset = 0 ) : if not Gauged . map_concat ( self . ptr , operand . ptr , start , end , offset ) : raise MemoryError
Concat a map . You can also optionally slice the operand map and apply an offset to each position before concatting
9,356
def buffer ( self , byte_offset = 0 ) : contents = self . ptr . contents ptr = addressof ( contents . buffer . contents ) + byte_offset length = contents . length * 4 - byte_offset return buffer ( ( c_char * length ) . from_address ( ptr ) . raw ) if length else None
Get a copy of the map buffer
9,357
def matches ( target , entry ) : for t , e in itertools . zip_longest ( target , entry ) : if e and t != e : return False return entry [ 0 ] and entry [ 1 ]
Does the target match the whitelist entry?
9,358
def check_entry ( * entry ) : whitelist = read_whitelist ( ) if not check_allow_prompt ( entry , whitelist ) : whitelist . append ( entry ) write_whitelist ( whitelist )
Throws an exception if the entry isn t on the whitelist .
9,359
def load_uncached ( location , use_json = None ) : if not whitelist . is_file ( location ) : r = requests . get ( raw . raw ( location ) ) if not r . ok : raise ValueError ( 'Couldn\'t read %s with code %s:\n%s' % ( location , r . status_code , r . text ) ) data = r . text else : try : f = os . path . realpath ( os . path . abspath ( os . path . expanduser ( location ) ) ) data = open ( f ) . read ( ) except Exception as e : e . args = ( 'There was an error reading the file' , location , f ) + e . args raise if use_json is None : use_json = any ( location . endswith ( s ) for s in SUFFIXES ) if not use_json : return data try : return yaml . load ( data ) except Exception as e : e . args = ( 'There was a JSON error in the file' , location ) + e . args raise
Return data at either a file location or at the raw version of a URL or raise an exception .
9,360
def find_group_differences ( groups1 , groups2 ) : r import utool as ut item_to_others1 = { item : set ( _group ) - { item } for _group in groups1 for item in _group } item_to_others2 = { item : set ( _group ) - { item } for _group in groups2 for item in _group } flat_items1 = ut . flatten ( groups1 ) flat_items2 = ut . flatten ( groups2 ) flat_items = list ( set ( flat_items1 + flat_items2 ) ) errors = [ ] item_to_error = { } for item in flat_items : others1 = item_to_others1 . get ( item , set ( [ ] ) ) others2 = item_to_others2 . get ( item , set ( [ ] ) ) missing1 = others1 - others2 missing2 = others2 - others1 error = len ( missing1 ) + len ( missing2 ) if error > 0 : item_to_error [ item ] = error errors . append ( error ) total_error = sum ( errors ) return total_error
r Returns a measure of how disimilar two groupings are
9,361
def find_group_consistencies ( groups1 , groups2 ) : r group1_list = { tuple ( sorted ( _group ) ) for _group in groups1 } group2_list = { tuple ( sorted ( _group ) ) for _group in groups2 } common_groups = list ( group1_list . intersection ( group2_list ) ) return common_groups
r Returns a measure of group consistency
9,362
def compare_groups ( true_groups , pred_groups ) : r import utool as ut true = { frozenset ( _group ) for _group in true_groups } pred = { frozenset ( _group ) for _group in pred_groups } common = true . intersection ( pred ) true_sets = true . difference ( common ) pred_sets = pred . difference ( common ) pred_conn = { p : frozenset ( ps ) for ps in pred for p in ps } true_conn = { t : frozenset ( ts ) for ts in true for t in ts } pred_merges = [ ] true_merges = [ ] for ts in true_sets : ccs = set ( [ pred_conn . get ( t , frozenset ( ) ) for t in ts ] ) if frozenset . union ( * ccs ) == ts : pred_merges . append ( ccs ) true_merges . append ( ts ) true_splits = [ ] pred_splits = [ ] for ps in pred_sets : ccs = set ( [ true_conn . get ( p , frozenset ( ) ) for p in ps ] ) if frozenset . union ( * ccs ) == ps : true_splits . append ( ccs ) pred_splits . append ( ps ) pred_merges_flat = ut . flatten ( pred_merges ) true_splits_flat = ut . flatten ( true_splits ) pred_hybrid = frozenset ( map ( frozenset , pred_sets ) ) . difference ( set ( pred_splits + pred_merges_flat ) ) true_hybrid = frozenset ( map ( frozenset , true_sets ) ) . difference ( set ( true_merges + true_splits_flat ) ) comparisons = { 'common' : common , 'true_splits' : true_splits , 'true_merges' : true_merges , 'true_hybrid' : true_hybrid , 'pred_splits' : pred_splits , 'pred_merges' : pred_merges , 'pred_hybrid' : pred_hybrid , } return comparisons
r Finds how predictions need to be modified to match the true grouping .
9,363
def grouping_delta_stats ( old , new ) : import pandas as pd import utool as ut group_delta = ut . grouping_delta ( old , new ) stats = ut . odict ( ) unchanged = group_delta [ 'unchanged' ] splits = group_delta [ 'splits' ] merges = group_delta [ 'merges' ] hybrid = group_delta [ 'hybrid' ] statsmap = ut . partial ( lambda x : ut . stats_dict ( map ( len , x ) , size = True ) ) stats [ 'unchanged' ] = statsmap ( unchanged ) stats [ 'old_split' ] = statsmap ( splits [ 'old' ] ) stats [ 'new_split' ] = statsmap ( ut . flatten ( splits [ 'new' ] ) ) stats [ 'old_merge' ] = statsmap ( ut . flatten ( merges [ 'old' ] ) ) stats [ 'new_merge' ] = statsmap ( merges [ 'new' ] ) stats [ 'old_hybrid' ] = statsmap ( hybrid [ 'old' ] ) stats [ 'new_hybrid' ] = statsmap ( hybrid [ 'new' ] ) df = pd . DataFrame . from_dict ( stats , orient = 'index' ) df = df . loc [ list ( stats . keys ( ) ) ] return df
Returns statistics about grouping changes
9,364
def upper_diag_self_prodx ( list_ ) : return [ ( item1 , item2 ) for n1 , item1 in enumerate ( list_ ) for n2 , item2 in enumerate ( list_ ) if n1 < n2 ]
upper diagnoal of cartesian product of self and self . Weird name . fixme
9,365
def colwise_diag_idxs ( size , num = 2 ) : r import utool as ut diag_idxs = ut . iprod ( * [ range ( size ) for _ in range ( num ) ] ) upper_diag_idxs = [ tup [ : : - 1 ] for tup in diag_idxs if all ( [ a > b for a , b in ut . itertwo ( tup ) ] ) ] return upper_diag_idxs
r dont trust this implementation or this function name
9,366
def product_nonsame ( list1 , list2 ) : for item1 , item2 in itertools . product ( list1 , list2 ) : if item1 != item2 : yield ( item1 , item2 )
product of list1 and list2 where items are non equal
9,367
def greedy_max_inden_setcover ( candidate_sets_dict , items , max_covers = None ) : uncovered_set = set ( items ) rejected_keys = set ( ) accepted_keys = set ( ) covered_items_list = [ ] while True : if max_covers is not None and len ( covered_items_list ) >= max_covers : break maxkey = None maxlen = - 1 for key , candidate_items in six . iteritems ( candidate_sets_dict ) : if key in rejected_keys or key in accepted_keys : continue lenval = len ( candidate_items ) if uncovered_set . issuperset ( candidate_items ) : if lenval > maxlen : maxkey = key maxlen = lenval else : rejected_keys . add ( key ) if maxkey is None : break maxval = candidate_sets_dict [ maxkey ] accepted_keys . add ( maxkey ) covered_items_list . append ( list ( maxval ) ) uncovered_set . difference_update ( maxval ) uncovered_items = list ( uncovered_set ) covertup = uncovered_items , covered_items_list , accepted_keys return covertup
greedy algorithm for maximum independent set cover
9,368
def setcover_greedy ( candidate_sets_dict , items = None , set_weights = None , item_values = None , max_weight = None ) : r import utool as ut solution_cover = { } if items is None : items = ut . flatten ( candidate_sets_dict . values ( ) ) if set_weights is None : get_weight = len else : def get_weight ( solution_cover ) : sum ( [ set_weights [ key ] for key in solution_cover . keys ( ) ] ) if item_values is None : get_value = len else : def get_value ( vals ) : sum ( [ item_values [ v ] for v in vals ] ) if max_weight is None : max_weight = get_weight ( candidate_sets_dict ) avail_covers = { key : set ( val ) for key , val in candidate_sets_dict . items ( ) } while get_weight ( solution_cover ) < max_weight and len ( avail_covers ) > 0 : avail_covers . values ( ) uncovered_values = list ( map ( get_value , avail_covers . values ( ) ) ) chosen_idx = ut . argmax ( uncovered_values ) if uncovered_values [ chosen_idx ] <= 0 : break chosen_key = list ( avail_covers . keys ( ) ) [ chosen_idx ] chosen_set = avail_covers [ chosen_key ] solution_cover [ chosen_key ] = candidate_sets_dict [ chosen_key ] del avail_covers [ chosen_key ] for vals in avail_covers . values ( ) : vals . difference_update ( chosen_set ) return solution_cover
r Greedy algorithm for various covering problems . approximation gaurentees depending on specifications like set_weights and item values
9,369
def item_hist ( list_ ) : dict_hist = { } for item in list_ : if item not in dict_hist : dict_hist [ item ] = 0 dict_hist [ item ] += 1 return dict_hist
counts the number of times each item appears in the dictionary
9,370
def get_nth_prime ( n , max_prime = 4100 , safe = True ) : if n <= 100 : first_100_primes = ( 2 , 3 , 5 , 7 , 11 , 13 , 17 , 19 , 23 , 29 , 31 , 37 , 41 , 43 , 47 , 53 , 59 , 61 , 67 , 71 , 73 , 79 , 83 , 89 , 97 , 101 , 103 , 107 , 109 , 113 , 127 , 131 , 137 , 139 , 149 , 151 , 157 , 163 , 167 , 173 , 179 , 181 , 191 , 193 , 197 , 199 , 211 , 223 , 227 , 229 , 233 , 239 , 241 , 251 , 257 , 263 , 269 , 271 , 277 , 281 , 283 , 293 , 307 , 311 , 313 , 317 , 331 , 337 , 347 , 349 , 353 , 359 , 367 , 373 , 379 , 383 , 389 , 397 , 401 , 409 , 419 , 421 , 431 , 433 , 439 , 443 , 449 , 457 , 461 , 463 , 467 , 479 , 487 , 491 , 499 , 503 , 509 , 521 , 523 , 541 , ) nth_prime = first_100_primes [ n - 1 ] else : if safe : primes = [ num for num in range ( 2 , max_prime ) if is_prime ( num ) ] nth_prime = primes [ n ] else : nth_prime = get_nth_prime_bruteforce ( n ) return nth_prime
hacky but still brute force algorithm for finding nth prime for small tests
9,371
def knapsack ( items , maxweight , method = 'recursive' ) : r if method == 'recursive' : return knapsack_recursive ( items , maxweight ) elif method == 'iterative' : return knapsack_iterative ( items , maxweight ) elif method == 'ilp' : return knapsack_ilp ( items , maxweight ) else : raise NotImplementedError ( '[util_alg] knapsack method=%r' % ( method , ) )
r Solve the knapsack problem by finding the most valuable subsequence of items subject that weighs no more than maxweight .
9,372
def knapsack_ilp ( items , maxweight , verbose = False ) : import pulp values = [ t [ 0 ] for t in items ] weights = [ t [ 1 ] for t in items ] indices = [ t [ 2 ] for t in items ] prob = pulp . LpProblem ( "Knapsack" , pulp . LpMaximize ) x = pulp . LpVariable . dicts ( name = 'x' , indexs = indices , lowBound = 0 , upBound = 1 , cat = pulp . LpInteger ) prob . objective = sum ( v * x [ i ] for v , i in zip ( values , indices ) ) prob . add ( sum ( w * x [ i ] for w , i in zip ( weights , indices ) ) <= maxweight ) pulp . PULP_CBC_CMD ( ) . solve ( prob ) flags = [ x [ i ] . varValue for i in indices ] total_value = sum ( [ val for val , flag in zip ( values , flags ) if flag ] ) items_subset = [ item for item , flag in zip ( items , flags ) if flag ] if verbose : print ( prob ) print ( 'OPT:' ) print ( '\n' . join ( [ ' %s = %s' % ( x [ i ] . name , x [ i ] . varValue ) for i in indices ] ) ) print ( 'total_value = %r' % ( total_value , ) ) return total_value , items_subset
solves knapsack using an integer linear program
9,373
def knapsack_iterative ( items , maxweight ) : weights = [ t [ 1 ] for t in items ] max_exp = max ( [ number_of_decimals ( w_ ) for w_ in weights ] ) coeff = 10 ** max_exp int_maxweight = int ( maxweight * coeff ) int_items = [ ( v , int ( w * coeff ) , idx ) for v , w , idx in items ] return knapsack_iterative_int ( int_items , int_maxweight )
items = int_items maxweight = int_maxweight
9,374
def knapsack_iterative_int ( items , maxweight ) : r values = [ t [ 0 ] for t in items ] weights = [ t [ 1 ] for t in items ] maxsize = maxweight + 1 dpmat = defaultdict ( lambda : defaultdict ( lambda : np . inf ) ) kmat = defaultdict ( lambda : defaultdict ( lambda : False ) ) idx_subset = [ ] for w in range ( maxsize ) : dpmat [ 0 ] [ w ] = 0 for idx in range ( len ( items ) ) : item_val = values [ idx ] item_weight = weights [ idx ] for w in range ( maxsize ) : valid_item = item_weight <= w if idx > 0 : prev_val = dpmat [ idx - 1 ] [ w ] prev_noitem_val = dpmat [ idx - 1 ] [ w - item_weight ] else : prev_val = 0 prev_noitem_val = 0 withitem_val = item_val + prev_noitem_val more_valuable = withitem_val > prev_val if valid_item and more_valuable : dpmat [ idx ] [ w ] = withitem_val kmat [ idx ] [ w ] = True else : dpmat [ idx ] [ w ] = prev_val kmat [ idx ] [ w ] = False K = maxweight for idx in reversed ( range ( len ( items ) ) ) : if kmat [ idx ] [ K ] : idx_subset . append ( idx ) K = K - weights [ idx ] idx_subset = sorted ( idx_subset ) items_subset = [ items [ i ] for i in idx_subset ] total_value = dpmat [ len ( items ) - 1 ] [ maxweight ] return total_value , items_subset
r Iterative knapsack method
9,375
def knapsack_iterative_numpy ( items , maxweight ) : items = np . array ( items ) weights = items . T [ 1 ] max_exp = max ( [ number_of_decimals ( w_ ) for w_ in weights ] ) coeff = 10 ** max_exp weights = ( weights * coeff ) . astype ( np . int ) values = items . T [ 0 ] MAXWEIGHT = int ( maxweight * coeff ) W_SIZE = MAXWEIGHT + 1 dpmat = np . full ( ( len ( items ) , W_SIZE ) , np . inf ) kmat = np . full ( ( len ( items ) , W_SIZE ) , 0 , dtype = np . bool ) idx_subset = [ ] for w in range ( W_SIZE ) : dpmat [ 0 ] [ w ] = 0 for idx in range ( 1 , len ( items ) ) : item_val = values [ idx ] item_weight = weights [ idx ] for w in range ( W_SIZE ) : valid_item = item_weight <= w prev_val = dpmat [ idx - 1 ] [ w ] if valid_item : prev_noitem_val = dpmat [ idx - 1 ] [ w - item_weight ] withitem_val = item_val + prev_noitem_val more_valuable = withitem_val > prev_val else : more_valuable = False dpmat [ idx ] [ w ] = withitem_val if more_valuable else prev_val kmat [ idx ] [ w ] = more_valuable K = MAXWEIGHT for idx in reversed ( range ( 1 , len ( items ) ) ) : if kmat [ idx , K ] : idx_subset . append ( idx ) K = K - weights [ idx ] idx_subset = sorted ( idx_subset ) items_subset = [ items [ i ] for i in idx_subset ] total_value = dpmat [ len ( items ) - 1 ] [ MAXWEIGHT ] return total_value , items_subset
Iterative knapsack method
9,376
def knapsack_greedy ( items , maxweight ) : r items_subset = [ ] total_weight = 0 total_value = 0 for item in items : value , weight = item [ 0 : 2 ] if total_weight + weight > maxweight : continue else : items_subset . append ( item ) total_weight += weight total_value += value return total_value , items_subset
r non - optimal greedy version of knapsack algorithm does not sort input . Sort the input by largest value first if desired .
9,377
def choose ( n , k ) : import scipy . misc return scipy . misc . comb ( n , k , exact = True , repetition = False )
N choose k
9,378
def almost_eq ( arr1 , arr2 , thresh = 1E-11 , ret_error = False ) : error = np . abs ( arr1 - arr2 ) passed = error < thresh if ret_error : return passed , error return passed
checks if floating point number are equal to a threshold
9,379
def norm_zero_one ( array , dim = None ) : if not util_type . is_float ( array ) : array = array . astype ( np . float32 ) array_max = array . max ( dim ) array_min = array . min ( dim ) array_exnt = np . subtract ( array_max , array_min ) array_norm = np . divide ( np . subtract ( array , array_min ) , array_exnt ) return array_norm
normalizes a numpy array from 0 to 1 based in its extent
9,380
def group_indices ( groupid_list ) : item_list = range ( len ( groupid_list ) ) grouped_dict = util_dict . group_items ( item_list , groupid_list ) keys_ = list ( grouped_dict . keys ( ) ) try : keys = sorted ( keys_ ) except TypeError : keys = util_list . sortedby2 ( keys_ , keys_ ) groupxs = util_dict . dict_take ( grouped_dict , keys ) return keys , groupxs
groups indicies of each item in groupid_list
9,381
def ungroup_gen ( grouped_items , groupxs , fill = None ) : import utool as ut minpergroup = [ min ( xs ) if len ( xs ) else 0 for xs in groupxs ] minval = min ( minpergroup ) if len ( minpergroup ) else 0 flat_groupx = ut . flatten ( groupxs ) sortx = ut . argsort ( flat_groupx ) groupx_sorted = ut . take ( flat_groupx , sortx ) flat_items = ut . iflatten ( grouped_items ) toyeild = { } items_yeilded = 0 current_index = 0 num_fills_before = [ minval ] + ( np . diff ( groupx_sorted ) - 1 ) . tolist ( ) + [ 0 ] fills = num_fills_before [ items_yeilded ] if fills > 0 : for _ in range ( fills ) : yield None current_index += 1 for yeild_at , item in zip ( flat_groupx , flat_items ) : if yeild_at > current_index : toyeild [ yeild_at ] = item elif yeild_at == current_index : yield item current_index += 1 items_yeilded += 1 fills = num_fills_before [ items_yeilded ] if fills > 0 : for _ in range ( fills ) : yield None current_index += 1 while current_index in toyeild : item = toyeild . pop ( current_index ) yield item current_index += 1 items_yeilded += 1 fills = num_fills_before [ items_yeilded ] if fills > 0 : for _ in range ( fills ) : yield None current_index += 1
Ungroups items returning a generator . Note that this is much slower than the list version and is not gaurenteed to have better memory usage .
9,382
def ungroup_unique ( unique_items , groupxs , maxval = None ) : if maxval is None : maxpergroup = [ max ( xs ) if len ( xs ) else 0 for xs in groupxs ] maxval = max ( maxpergroup ) if len ( maxpergroup ) else 0 ungrouped_items = [ None ] * ( maxval + 1 ) for item , xs in zip ( unique_items , groupxs ) : for x in xs : ungrouped_items [ x ] = item return ungrouped_items
Ungroups unique items to correspond to original non - unique list
9,383
def edit_distance ( string1 , string2 ) : import utool as ut try : import Levenshtein except ImportError as ex : ut . printex ( ex , 'pip install python-Levenshtein' ) raise import utool as ut isiter1 = ut . isiterable ( string1 ) isiter2 = ut . isiterable ( string2 ) strs1 = string1 if isiter1 else [ string1 ] strs2 = string2 if isiter2 else [ string2 ] distmat = [ [ Levenshtein . distance ( str1 , str2 ) for str2 in strs2 ] for str1 in strs1 ] if not isiter2 : distmat = ut . take_column ( distmat , 0 ) if not isiter1 : distmat = distmat [ 0 ] return distmat
Edit distance algorithm . String1 and string2 can be either strings or lists of strings
9,384
def standardize_boolexpr ( boolexpr_ , parens = False ) : r import utool as ut import re onlyvars = boolexpr_ onlyvars = re . sub ( '\\bnot\\b' , '' , onlyvars ) onlyvars = re . sub ( '\\band\\b' , '' , onlyvars ) onlyvars = re . sub ( '\\bor\\b' , '' , onlyvars ) onlyvars = re . sub ( '\\(' , '' , onlyvars ) onlyvars = re . sub ( '\\)' , '' , onlyvars ) varnames = ut . remove_doublspaces ( onlyvars ) . strip ( ) . split ( ' ' ) varied_dict = { var : [ True , False ] for var in varnames } bool_states = ut . all_dict_combinations ( varied_dict ) outputs = [ eval ( boolexpr_ , state . copy ( ) , state . copy ( ) ) for state in bool_states ] true_states = ut . compress ( bool_states , outputs ) true_tuples = ut . take_column ( true_states , varnames ) true_cases = [ str ( '' . join ( [ str ( int ( t ) ) for t in tup ] ) ) for tup in true_tuples ] ones_bin = [ int ( x , 2 ) for x in true_cases ] from quine_mccluskey . qm import QuineMcCluskey qm = QuineMcCluskey ( ) result = qm . simplify ( ones = ones_bin , num_bits = len ( varnames ) ) grouped_terms = [ dict ( ut . group_items ( varnames , rs ) ) for rs in result ] def parenjoin ( char , list_ ) : if len ( list_ ) == 0 : return '' else : if parens : return '(' + char . join ( list_ ) + ')' else : return char . join ( list_ ) if parens : expanded_terms = [ ( term . get ( '1' , [ ] ) + [ '(not ' + b + ')' for b in term . get ( '0' , [ ] ) ] + [ parenjoin ( ' ^ ' , term . get ( '^' , [ ] ) ) , parenjoin ( ' ~ ' , term . get ( '~' , [ ] ) ) , ] ) for term in grouped_terms ] else : expanded_terms = [ ( term . get ( '1' , [ ] ) + [ 'not ' + b for b in term . get ( '0' , [ ] ) ] + [ parenjoin ( ' ^ ' , term . get ( '^' , [ ] ) ) , parenjoin ( ' ~ ' , term . get ( '~' , [ ] ) ) , ] ) for term in grouped_terms ] final_terms = [ [ t for t in term if t ] for term in expanded_terms ] products = [ parenjoin ( ' and ' , [ f for f in form if f ] ) for form in final_terms ] final_expr = ' or ' . join ( products ) return final_expr
r Standardizes a boolean expression into an or - ing of and - ed variables
9,385
def expensive_task_gen ( num = 8700 ) : r import utool as ut for x in range ( 0 , num ) : with ut . Timer ( verbose = False ) as t : ut . is_prime ( x ) yield t . ellapsed
r Runs a task that takes some time
9,386
def factors ( n ) : return set ( reduce ( list . __add__ , ( [ i , n // i ] for i in range ( 1 , int ( n ** 0.5 ) + 1 ) if n % i == 0 ) ) )
Computes all the integer factors of the number n
9,387
def add_protein_data ( proteins , pgdb , headerfields , genecentric = False , pool_to_output = False ) : proteindata = create_featuredata_map ( pgdb , genecentric = genecentric , psm_fill_fun = add_psms_to_proteindata , pgene_fill_fun = add_protgene_to_protdata , count_fun = count_peps_psms , pool_to_output = pool_to_output , get_uniques = True ) dataget_fun = { True : get_protein_data_genecentric , False : get_protein_data_pgrouped } [ genecentric is not False ] firstfield = prottabledata . ACCESSIONS [ genecentric ] for protein in proteins : outprotein = { k : v for k , v in protein . items ( ) } outprotein [ firstfield ] = outprotein . pop ( prottabledata . HEADER_PROTEIN ) protein_acc = protein [ prottabledata . HEADER_PROTEIN ] outprotein . update ( dataget_fun ( proteindata , protein_acc , headerfields ) ) outprotein = { k : str ( v ) for k , v in outprotein . items ( ) } yield outprotein
First creates a map with all master proteins with data then outputs protein data dicts for rows of a tsv . If a pool is given then only output for that pool will be shown in the protein table .
9,388
def get_protein_data_pgrouped ( proteindata , p_acc , headerfields ) : report = get_protein_data_base ( proteindata , p_acc , headerfields ) return get_cov_protnumbers ( proteindata , p_acc , report )
Parses protein data for a certain protein into tsv output dictionary
9,389
def keys ( self , namespace , prefix = None , limit = None , offset = None ) : params = [ namespace ] query = 'SELECT key FROM gauged_keys WHERE namespace = %s' if prefix is not None : query += ' AND key LIKE %s' params . append ( prefix + '%' ) if limit is not None : query += ' LIMIT %s' params . append ( limit ) if offset is not None : query += ' OFFSET %s' params . append ( offset ) cursor = self . cursor cursor . execute ( query , params ) return [ key for key , in cursor ]
Get keys from a namespace
9,390
def get_block ( self , namespace , offset , key ) : cursor = self . cursor cursor . execute ( 'SELECT data, flags FROM gauged_data ' 'WHERE namespace = %s AND "offset" = %s AND key = %s' , ( namespace , offset , key ) ) row = cursor . fetchone ( ) return ( None , None ) if row is None else row
Get the block identified by namespace offset key and value
9,391
def block_offset_bounds ( self , namespace ) : cursor = self . cursor cursor . execute ( 'SELECT MIN("offset"), MAX("offset") ' 'FROM gauged_statistics WHERE namespace = %s' , ( namespace , ) ) return cursor . fetchone ( )
Get the minimum and maximum block offset for the specified namespace
9,392
def set_writer_position ( self , name , timestamp ) : execute = self . cursor . execute execute ( 'DELETE FROM gauged_writer_history WHERE id = %s' , ( name , ) ) execute ( 'INSERT INTO gauged_writer_history (id, timestamp) ' 'VALUES (%s, %s)' , ( name , timestamp , ) )
Insert a timestamp to keep track of the current writer position
9,393
def add_cache ( self , namespace , key , query_hash , length , cache ) : start = 0 bulk_insert = self . bulk_insert cache_len = len ( cache ) row = '(%s,%s,%s,%s,%s,%s)' query = 'INSERT INTO gauged_cache ' '(namespace, key, "hash", length, start, value) VALUES ' execute = self . cursor . execute query_hash = self . psycopg2 . Binary ( query_hash ) while start < cache_len : rows = cache [ start : start + bulk_insert ] params = [ ] for timestamp , value in rows : params . extend ( ( namespace , key , query_hash , length , timestamp , value ) ) insert = ( row + ',' ) * ( len ( rows ) - 1 ) + row execute ( query + insert , params ) start += bulk_insert self . db . commit ( )
Add cached values for the specified date range and query
9,394
def get_environment_vars ( filename ) : if sys . platform == "linux" or sys . platform == "linux2" : return { 'LD_PRELOAD' : path . join ( LIBFAKETIME_DIR , "libfaketime.so.1" ) , 'FAKETIME_SKIP_CMDS' : 'nodejs' , 'FAKETIME_TIMESTAMP_FILE' : filename , } elif sys . platform == "darwin" : return { 'DYLD_INSERT_LIBRARIES' : path . join ( LIBFAKETIME_DIR , "libfaketime.1.dylib" ) , 'DYLD_FORCE_FLAT_NAMESPACE' : '1' , 'FAKETIME_TIMESTAMP_FILE' : filename , } else : raise RuntimeError ( "libfaketime does not support '{}' platform" . format ( sys . platform ) )
Return a dict of environment variables required to run a service under faketime .
9,395
def change_time ( filename , newtime ) : with open ( filename , "w" ) as faketimetxt_handle : faketimetxt_handle . write ( "@" + newtime . strftime ( "%Y-%m-%d %H:%M:%S" ) )
Change the time of a process or group of processes by writing a new time to the time file .
9,396
def filter_unique_peptides ( peptides , score , ns ) : scores = { 'q' : 'q_value' , 'pep' : 'pep' , 'p' : 'p_value' , 'svm' : 'svm_score' } highest = { } for el in peptides : featscore = float ( el . xpath ( 'xmlns:%s' % scores [ score ] , namespaces = ns ) [ 0 ] . text ) seq = reader . get_peptide_seq ( el , ns ) if seq not in highest : highest [ seq ] = { 'pep_el' : formatting . stringify_strip_namespace_declaration ( el , ns ) , 'score' : featscore } if score == 'svm' : if featscore > highest [ seq ] [ 'score' ] : highest [ seq ] = { 'pep_el' : formatting . stringify_strip_namespace_declaration ( el , ns ) , 'score' : featscore } else : if featscore < highest [ seq ] [ 'score' ] : highest [ seq ] = { 'pep_el' : formatting . stringify_strip_namespace_declaration ( el , ns ) , 'score' : featscore } formatting . clear_el ( el ) for pep in list ( highest . values ( ) ) : yield pep [ 'pep_el' ]
Filters unique peptides from multiple Percolator output XML files . Takes a dir with a set of XMLs a score to filter on and a namespace . Outputs an ElementTree .
9,397
def import_symbol ( name = None , path = None , typename = None , base_path = None ) : _ , symbol = _import ( name or typename , path or base_path ) return symbol
Import a module or a typename within a module from its name .
9,398
def add_to_win32_PATH ( script_fpath , * add_path_list ) : r import utool as ut write_dir = dirname ( script_fpath ) key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]' rtype = 'REG_EXPAND_SZ' win_pathlist = list ( os . environ [ 'PATH' ] . split ( os . path . pathsep ) ) new_path_list = ut . unique_ordered ( win_pathlist + list ( add_path_list ) ) print ( '\n' . join ( new_path_list ) ) pathtxt = pathsep . join ( new_path_list ) varval_list = [ ( 'Path' , pathtxt ) ] regfile_str = make_regfile_str ( key , varval_list , rtype ) ut . view_directory ( write_dir ) print ( regfile_str ) ut . writeto ( script_fpath , regfile_str , mode = 'wb' ) print ( 'Please have an admin run the script. You may need to restart' )
r Writes a registery script to update the PATH variable into the sync registry
9,399
def dzip ( list1 , list2 ) : r try : len ( list1 ) except TypeError : list1 = list ( list1 ) try : len ( list2 ) except TypeError : list2 = list ( list2 ) if len ( list1 ) == 0 and len ( list2 ) == 1 : list2 = [ ] if len ( list2 ) == 1 and len ( list1 ) > 1 : list2 = list2 * len ( list1 ) if len ( list1 ) != len ( list2 ) : raise ValueError ( 'out of alignment len(list1)=%r, len(list2)=%r' % ( len ( list1 ) , len ( list2 ) ) ) return dict ( zip ( list1 , list2 ) )
r Zips elementwise pairs between list1 and list2 into a dictionary . Values from list2 can be broadcast onto list1 .