idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
9,500 | def get_textdiff ( text1 , text2 , num_context_lines = 0 , ignore_whitespace = False ) : r import difflib text1 = ensure_unicode ( text1 ) text2 = ensure_unicode ( text2 ) text1_lines = text1 . splitlines ( ) text2_lines = text2 . splitlines ( ) if ignore_whitespace : text1_lines = [ t . rstrip ( ) for t in text1_lines ] text2_lines = [ t . rstrip ( ) for t in text2_lines ] ndiff_kw = dict ( linejunk = difflib . IS_LINE_JUNK , charjunk = difflib . IS_CHARACTER_JUNK ) else : ndiff_kw = { } all_diff_lines = list ( difflib . ndiff ( text1_lines , text2_lines , ** ndiff_kw ) ) if num_context_lines is None : diff_lines = all_diff_lines else : from utool import util_list ismarked_list = [ len ( line ) > 0 and line [ 0 ] in '+-?' for line in all_diff_lines ] isvalid_list = ismarked_list [ : ] for i in range ( 1 , num_context_lines + 1 ) : isvalid_list [ : - i ] = util_list . or_lists ( isvalid_list [ : - i ] , ismarked_list [ i : ] ) isvalid_list [ i : ] = util_list . or_lists ( isvalid_list [ i : ] , ismarked_list [ : - i ] ) USE_BREAK_LINE = True if USE_BREAK_LINE : diff_lines = [ ] prev = False visual_break = '\n <... FILTERED CONTEXT ...> \n' for line , valid in zip ( all_diff_lines , isvalid_list ) : if valid : diff_lines . append ( line ) elif prev : if False : diff_lines . append ( visual_break ) prev = valid else : diff_lines = util_list . compress ( all_diff_lines , isvalid_list ) return '\n' . join ( diff_lines ) | r Uses difflib to return a difference string between two similar texts |
9,501 | def conj_phrase ( list_ , cond = 'or' ) : if len ( list_ ) == 0 : return '' elif len ( list_ ) == 1 : return list_ [ 0 ] elif len ( list_ ) == 2 : return ' ' . join ( ( list_ [ 0 ] , cond , list_ [ 1 ] ) ) else : condstr = '' . join ( ( ', ' + cond , ' ' ) ) return ', ' . join ( ( ', ' . join ( list_ [ : - 2 ] ) , condstr . join ( list_ [ - 2 : ] ) ) ) | Joins a list of words using English conjunction rules |
9,502 | def bubbletext ( text , font = 'cybermedium' ) : r import utool as ut pyfiglet = ut . tryimport ( 'pyfiglet' , 'git+https://github.com/pwaller/pyfiglet' ) if pyfiglet is None : return text else : bubble_text = pyfiglet . figlet_format ( text , font = font ) return bubble_text | r Uses pyfiglet to create bubble text . |
9,503 | def is_url ( str_ ) : return any ( [ str_ . startswith ( 'http://' ) , str_ . startswith ( 'https://' ) , str_ . startswith ( 'www.' ) , '.org/' in str_ , '.com/' in str_ , ] ) | heuristic check if str is url formatted |
9,504 | def chr_range ( * args , ** kw ) : r if len ( args ) == 1 : stop , = args start , step = 0 , 1 elif len ( args ) == 2 : start , stop = args step = 1 elif len ( args ) == 3 : start , stop , step = args else : raise ValueError ( 'incorrect args' ) chr_ = six . unichr base = ord ( kw . get ( 'base' , 'a' ) ) if isinstance ( start , int ) : start = base + start if isinstance ( stop , int ) : stop = base + stop if isinstance ( start , six . string_types ) : start = ord ( start ) if isinstance ( stop , six . string_types ) : stop = ord ( stop ) if step is None : step = 1 list_ = list ( map ( six . text_type , map ( chr_ , range ( start , stop , step ) ) ) ) return list_ | r Like range but returns characters |
9,505 | def highlight_regex ( str_ , pat , reflags = 0 , color = 'red' ) : matches = list ( re . finditer ( pat , str_ , flags = reflags ) ) colored = str_ for match in reversed ( matches ) : start = match . start ( ) end = match . end ( ) colored_part = color_text ( colored [ start : end ] , color ) colored = colored [ : start ] + colored_part + colored [ end : ] return colored | FIXME Use pygments instead |
9,506 | def highlight_multi_regex ( str_ , pat_to_color , reflags = 0 ) : colored = str_ to_replace = [ ] for pat , color in pat_to_color . items ( ) : matches = list ( re . finditer ( pat , str_ , flags = reflags ) ) for match in matches : start = match . start ( ) end = match . end ( ) to_replace . append ( ( end , start , color ) ) for tup in reversed ( sorted ( to_replace ) ) : end , start , color = tup colored_part = color_text ( colored [ start : end ] , color ) colored = colored [ : start ] + colored_part + colored [ end : ] return colored | FIXME Use pygments instead . must be mututally exclusive |
9,507 | def find_block_end ( row , line_list , sentinal , direction = 1 ) : import re row_ = row line_ = line_list [ row_ ] flag1 = row_ == 0 or row_ == len ( line_list ) - 1 flag2 = re . match ( sentinal , line_ ) if not ( flag1 or flag2 ) : while True : if ( row_ == 0 or row_ == len ( line_list ) - 1 ) : break line_ = line_list [ row_ ] if re . match ( sentinal , line_ ) : break row_ += direction return row_ | Searches up and down until it finds the endpoints of a block Rectify with find_paragraph_end in pyvim_funcs |
9,508 | def compress_pdf ( pdf_fpath , output_fname = None ) : import utool as ut ut . assertpath ( pdf_fpath ) suffix = '_' + ut . get_datestamp ( False ) + '_compressed' print ( 'pdf_fpath = %r' % ( pdf_fpath , ) ) output_pdf_fpath = ut . augpath ( pdf_fpath , suffix , newfname = output_fname ) print ( 'output_pdf_fpath = %r' % ( output_pdf_fpath , ) ) gs_exe = find_ghostscript_exe ( ) cmd_list = ( gs_exe , '-sDEVICE=pdfwrite' , '-dCompatibilityLevel=1.4' , '-dNOPAUSE' , '-dQUIET' , '-dBATCH' , '-sOutputFile=' + output_pdf_fpath , pdf_fpath ) ut . cmd ( * cmd_list ) return output_pdf_fpath | uses ghostscript to write a pdf |
9,509 | def make_full_document ( text , title = None , preamp_decl = { } , preamb_extra = None ) : r import utool as ut doc_preamb = ut . codeblock ( ) if preamb_extra is not None : if isinstance ( preamb_extra , ( list , tuple ) ) : preamb_extra = '\n' . join ( preamb_extra ) doc_preamb += '\n' + preamb_extra + '\n' if title is not None : preamp_decl [ 'title' ] = title decl_lines = [ r'\{key}{{{val}}}' . format ( key = key , val = val ) for key , val in preamp_decl . items ( ) ] doc_decllines = '\n' . join ( decl_lines ) doc_header = ut . codeblock ( r ) if preamp_decl . get ( 'title' ) is not None : doc_header += r'\maketitle' doc_footer = ut . codeblock ( r ) text_ = '\n' . join ( ( doc_preamb , doc_decllines , doc_header , text , doc_footer ) ) return text_ | r dummy preamble and document to wrap around latex fragment |
9,510 | def render_latex_text ( input_text , nest_in_doc = False , preamb_extra = None , appname = 'utool' , verbose = None ) : import utool as ut if verbose is None : verbose = ut . VERBOSE dpath = ut . ensure_app_resource_dir ( appname , 'latex_tmp' ) fname = 'temp_render_latex' pdf_fpath = ut . compile_latex_text ( input_text , dpath = dpath , fname = fname , preamb_extra = preamb_extra , verbose = verbose ) ut . startfile ( pdf_fpath ) return pdf_fpath | compiles latex and shows the result |
9,511 | def render_latex ( input_text , dpath = None , fname = None , preamb_extra = None , verbose = 1 , ** kwargs ) : import utool as ut import vtool as vt input_text_ = '\pagenumbering{gobble}\n' + input_text img_fname = ut . ensure_ext ( fname , [ '.jpg' ] + list ( ut . IMG_EXTENSIONS ) ) img_fpath = join ( dpath , img_fname ) pdf_fpath = ut . compile_latex_text ( input_text_ , fname = fname , dpath = dpath , preamb_extra = preamb_extra , verbose = verbose , move = False ) ext = splitext ( img_fname ) [ 1 ] fpath_in = ut . convert_pdf_to_image ( pdf_fpath , ext = ext , verbose = verbose ) vt . clipwhite_ondisk ( fpath_in , fpath_out = img_fpath , verbose = verbose > 1 ) return img_fpath | Renders latex text into a jpeg . |
9,512 | def get_latex_figure_str2 ( fpath_list , cmdname , ** kwargs ) : import utool as ut from os . path import relpath if kwargs . pop ( 'relpath' , True ) : start = ut . truepath ( '~/latex/crall-candidacy-2015' ) fpath_list = [ relpath ( fpath , start ) for fpath in fpath_list ] cmdname = ut . latex_sanitize_command_name ( cmdname ) kwargs [ 'caption_str' ] = kwargs . get ( 'caption_str' , cmdname ) figure_str = ut . get_latex_figure_str ( fpath_list , ** kwargs ) latex_block = ut . latex_newcommand ( cmdname , figure_str ) return latex_block | hack for candidacy |
9,513 | def add ( self , data , value = None , timestamp = None , namespace = None , debug = False ) : if value is not None : return self . add ( ( ( data , value ) , ) , timestamp = timestamp , namespace = namespace , debug = debug ) writer = self . writer if writer is None : raise GaugedUseAfterFreeError if timestamp is None : timestamp = long ( time ( ) * 1000 ) config = self . config block_size = config . block_size this_block = timestamp // block_size this_array = ( timestamp % block_size ) // config . resolution if namespace is None : namespace = config . namespace if this_block < self . current_block or ( this_block == self . current_block and this_array < self . current_array ) : if config . append_only_violation == Writer . ERROR : msg = 'Gauged is append-only; timestamps must be increasing' raise GaugedAppendOnlyError ( msg ) elif config . append_only_violation == Writer . REWRITE : this_block = self . current_block this_array = self . current_array else : return if isinstance ( data , unicode ) : data = data . encode ( 'utf8' ) if debug : return self . debug ( timestamp , namespace , data ) if this_block > self . current_block : self . flush_blocks ( ) self . current_block = this_block self . current_array = this_array elif this_array > self . current_array : if not Gauged . writer_flush_arrays ( writer , self . current_array ) : raise MemoryError self . current_array = this_array data_points = 0 namespace_statistics = self . statistics [ namespace ] whitelist = config . key_whitelist skip_long_keys = config . key_overflow == Writer . IGNORE skip_gauge_nan = config . gauge_nan == Writer . IGNORE if isinstance ( data , str ) and skip_gauge_nan and skip_long_keys and whitelist is None : data_points = c_uint32 ( 0 ) if not Gauged . writer_emit_pairs ( writer , namespace , data , byref ( data_points ) ) : raise MemoryError data_points = data_points . value else : if isinstance ( data , dict ) : data = data . iteritems ( ) elif isinstance ( data , str ) : data = self . parse_query ( data ) emit = Gauged . writer_emit for key , value in data : key = to_bytes ( key ) if whitelist is not None and key not in whitelist : continue try : value = float ( value ) except ValueError : value = float ( 'nan' ) if value != value : if skip_gauge_nan : continue raise GaugedNaNError success = emit ( writer , namespace , key , c_float ( value ) ) if success != 1 : if not success : raise MemoryError elif success == Writer . KEY_OVERFLOW and not skip_long_keys : msg = 'Key is larger than the driver allows ' msg += '(%s)' % key raise GaugedKeyOverflowError ( msg ) data_points += 1 namespace_statistics . data_points += data_points if self . flush_now : self . flush ( ) | Queue a gauge or gauges to be written |
9,514 | def flush ( self ) : writer = self . writer if writer is None : raise GaugedUseAfterFreeError self . flush_writer_position ( ) keys = self . translate_keys ( ) blocks = [ ] current_block = self . current_block statistics = self . statistics driver = self . driver flags = 0 for namespace , key , block in self . pending_blocks ( ) : length = block . byte_length ( ) if not length : continue key_id = keys [ ( namespace , key ) ] statistics [ namespace ] . byte_count += length blocks . append ( ( namespace , current_block , key_id , block . buffer ( ) , flags ) ) if self . config . overwrite_blocks : driver . replace_blocks ( blocks ) else : driver . insert_or_append_blocks ( blocks ) if not Gauged . writer_flush_maps ( writer , True ) : raise MemoryError update_namespace = driver . add_namespace_statistics for namespace , stats in statistics . iteritems ( ) : update_namespace ( namespace , self . current_block , stats . data_points , stats . byte_count ) statistics . clear ( ) driver . commit ( ) self . flush_now = False | Flush all pending gauges |
9,515 | def resume_from ( self ) : position = self . driver . get_writer_position ( self . config . writer_name ) return position + self . config . resolution if position else 0 | Get a timestamp representing the position just after the last written gauge |
9,516 | def clear_from ( self , timestamp ) : block_size = self . config . block_size offset , remainder = timestamp // block_size , timestamp % block_size if remainder : raise ValueError ( 'Timestamp must be on a block boundary' ) self . driver . clear_from ( offset , timestamp ) | Clear all data from timestamp onwards . Note that the timestamp is rounded down to the nearest block boundary |
9,517 | def clear_key_before ( self , key , namespace = None , timestamp = None ) : block_size = self . config . block_size if namespace is None : namespace = self . config . namespace if timestamp is not None : offset , remainder = divmod ( timestamp , block_size ) if remainder : raise ValueError ( 'timestamp must be on a block boundary' ) if offset == 0 : raise ValueError ( 'cannot delete before offset zero' ) offset -= 1 self . driver . clear_key_before ( key , namespace , offset , timestamp ) else : self . driver . clear_key_before ( key , namespace ) | Clear all data before timestamp for a given key . Note that the timestamp is rounded down to the nearest block boundary |
9,518 | def _to_ctfile_counts_line ( self , key ) : counter = OrderedCounter ( self . counts_line_format ) self [ key ] [ 'number_of_atoms' ] = str ( len ( self . atoms ) ) self [ key ] [ 'number_of_bonds' ] = str ( len ( self . bonds ) ) counts_line = '' . join ( [ str ( value ) . rjust ( spacing ) for value , spacing in zip ( self [ key ] . values ( ) , counter . values ( ) ) ] ) return '{}\n' . format ( counts_line ) | Create counts line in CTfile format . |
9,519 | def _to_ctfile_atom_block ( self , key ) : counter = OrderedCounter ( Atom . atom_block_format ) ctab_atom_block = '\n' . join ( [ '' . join ( [ str ( value ) . rjust ( spacing ) for value , spacing in zip ( atom . _ctab_data . values ( ) , counter . values ( ) ) ] ) for atom in self [ key ] ] ) return '{}\n' . format ( ctab_atom_block ) | Create atom block in CTfile format . |
9,520 | def _to_ctfile_bond_block ( self , key ) : counter = OrderedCounter ( Bond . bond_block_format ) ctab_bond_block = '\n' . join ( [ '' . join ( [ str ( value ) . rjust ( spacing ) for value , spacing in zip ( bond . _ctab_data . values ( ) , counter . values ( ) ) ] ) for bond in self [ key ] ] ) return '{}\n' . format ( ctab_bond_block ) | Create bond block in CTfile format . |
9,521 | def _to_ctfile_property_block ( self ) : ctab_properties_data = defaultdict ( list ) for atom in self . atoms : for ctab_property_key , ctab_property_value in atom . _ctab_property_data . items ( ) : ctab_properties_data [ ctab_property_key ] . append ( OrderedDict ( zip ( self . ctab_conf [ self . version ] [ ctab_property_key ] [ 'values' ] , [ atom . atom_number , ctab_property_value ] ) ) ) ctab_property_lines = [ ] for ctab_property_key , ctab_property_value in ctab_properties_data . items ( ) : for entry in ctab_property_value : ctab_property_line = '{} {}{}' . format ( self . ctab_conf [ self . version ] [ ctab_property_key ] [ 'fmt' ] , 1 , '' . join ( [ str ( value ) . rjust ( 4 ) for value in entry . values ( ) ] ) ) ctab_property_lines . append ( ctab_property_line ) if ctab_property_lines : return '{}\n' . format ( '\n' . join ( ctab_property_lines ) ) return '' | Create ctab properties block in CTfile format from atom - specific properties . |
9,522 | def delete_atom ( self , * atom_numbers ) : for atom_number in atom_numbers : deletion_atom = self . atom_by_number ( atom_number = atom_number ) for atom in self . atoms : if int ( atom . atom_number ) > int ( atom_number ) : atom . atom_number = str ( int ( atom . atom_number ) - 1 ) for index , bond in enumerate ( self . bonds ) : bond . update_atom_numbers ( ) if atom_number in { bond . first_atom_number , bond . second_atom_number } : self . bonds . remove ( bond ) for atom in self . atoms : if deletion_atom in atom . neighbors : atom . neighbors . remove ( deletion_atom ) self . atoms . remove ( deletion_atom ) | Delete atoms by atom number . |
9,523 | def from_molfile ( cls , molfile , data = None ) : if not data : data = OrderedDict ( ) if not isinstance ( molfile , Molfile ) : raise ValueError ( 'Not a Molfile type: "{}"' . format ( type ( molfile ) ) ) if not isinstance ( data , dict ) : raise ValueError ( 'Not a dict type: "{}"' . format ( type ( data ) ) ) sdfile = cls ( ) sdfile [ '1' ] = OrderedDict ( ) sdfile [ '1' ] [ 'molfile' ] = molfile sdfile [ '1' ] [ 'data' ] = data return sdfile | Construct new SDfile object from Molfile object . |
9,524 | def add_data ( self , id , key , value ) : self [ str ( id ) ] [ 'data' ] . setdefault ( key , [ ] ) self [ str ( id ) ] [ 'data' ] [ key ] . append ( value ) | Add new data item . |
9,525 | def add_molfile ( self , molfile , data ) : if not isinstance ( molfile , Molfile ) : raise ValueError ( 'Not a Molfile type: "{}"' . format ( type ( molfile ) ) ) if not isinstance ( data , dict ) : raise ValueError ( 'Not a dict type: "{}"' . format ( type ( data ) ) ) entry_ids = sorted ( self . keys ( ) , key = lambda x : int ( x ) ) if entry_ids : last_entry_id = str ( entry_ids [ - 1 ] ) else : last_entry_id = '0' new_entry_id = str ( int ( last_entry_id ) + 1 ) self [ new_entry_id ] = OrderedDict ( ) self [ new_entry_id ] [ 'molfile' ] = molfile self [ new_entry_id ] [ 'data' ] = data | Add Molfile and data to SDfile object . |
9,526 | def add_sdfile ( self , sdfile ) : if not isinstance ( sdfile , SDfile ) : raise ValueError ( 'Not a SDfile type: "{}"' . format ( type ( sdfile ) ) ) for entry_id in sdfile : self . add_molfile ( molfile = sdfile [ entry_id ] [ 'molfile' ] , data = sdfile [ entry_id ] [ 'data' ] ) | Add new SDfile to current SDfile . |
9,527 | def neighbor_atoms ( self , atom_symbol = None ) : if not atom_symbol : return self . neighbors else : return [ atom for atom in self . neighbors if atom [ 'atom_symbol' ] == atom_symbol ] | Access neighbor atoms . |
9,528 | def update_atom_numbers ( self ) : self . _ctab_data [ 'first_atom_number' ] = self . first_atom . atom_number self . _ctab_data [ 'second_atom_number' ] = self . second_atom . atom_number | Update links first_atom_number - > second_atom_number |
9,529 | def default ( self , o ) : if isinstance ( o , Atom ) or isinstance ( o , Bond ) : return o . _ctab_data else : return o . __dict__ | Default encoder . |
9,530 | def get ( self , server ) : server_config = self . config . get ( server ) try : while server_config is None : new_config = self . _read_next_config ( ) server_config = new_config . get ( server ) new_config . update ( self . config ) self . config = new_config except StopIteration : return _default_server_configuration ( server ) if CONFIG_URL_KEY_NAME not in server_config : message = "'%s' must be specified in configuration for '%s'" % ( CONFIG_URL_KEY_NAME , server ) raise ServerConfigMissingUrlError ( message ) return ServerConfig ( server_config ) | Returns ServerConfig instance with configuration given server . |
9,531 | def free ( self ) : if self . _ptr is None : return Gauged . array_free ( self . ptr ) FloatArray . ALLOCATIONS -= 1 self . _ptr = None | Free the underlying C array |
9,532 | def generate_psms_quanted ( quantdb , tsvfn , isob_header , oldheader , isobaric = False , precursor = False ) : allquants , sqlfields = quantdb . select_all_psm_quants ( isobaric , precursor ) quant = next ( allquants ) for rownr , psm in enumerate ( readers . generate_tsv_psms ( tsvfn , oldheader ) ) : outpsm = { x : y for x , y in psm . items ( ) } if precursor : pquant = quant [ sqlfields [ 'precursor' ] ] if pquant is None : pquant = 'NA' outpsm . update ( { mzidtsvdata . HEADER_PRECURSOR_QUANT : str ( pquant ) } ) if isobaric : isoquants = { } while quant [ 0 ] == rownr : isoquants . update ( { quant [ sqlfields [ 'isochan' ] ] : str ( quant [ sqlfields [ 'isoquant' ] ] ) } ) try : quant = next ( allquants ) except StopIteration : break outpsm . update ( get_quant_NAs ( isoquants , isob_header ) ) else : try : quant = next ( allquants ) except StopIteration : yield outpsm break yield outpsm | Takes dbfn and connects gets quants for each line in tsvfn sorts them in line by using keys in quantheader list . |
9,533 | def t_escaped_BACKSPACE_CHAR ( self , t ) : r'\x62' t . lexer . pop_state ( ) t . value = unichr ( 0x0008 ) return t | r \ x62 |
9,534 | def t_escaped_FORM_FEED_CHAR ( self , t ) : r'\x66' t . lexer . pop_state ( ) t . value = unichr ( 0x000c ) return t | r \ x66 |
9,535 | def t_escaped_CARRIAGE_RETURN_CHAR ( self , t ) : r'\x72' t . lexer . pop_state ( ) t . value = unichr ( 0x000d ) return t | r \ x72 |
9,536 | def t_escaped_LINE_FEED_CHAR ( self , t ) : r'\x6E' t . lexer . pop_state ( ) t . value = unichr ( 0x000a ) return t | r \ x6E |
9,537 | def t_escaped_TAB_CHAR ( self , t ) : r'\x74' t . lexer . pop_state ( ) t . value = unichr ( 0x0009 ) return t | r \ x74 |
9,538 | def get_mzid_specfile_ids ( mzidfn , namespace ) : sid_fn = { } for specdata in mzid_specdata_generator ( mzidfn , namespace ) : sid_fn [ specdata . attrib [ 'id' ] ] = specdata . attrib [ 'name' ] return sid_fn | Returns mzid spectra data filenames and their IDs used in the mzIdentML file as a dict . Keys == IDs values == fns |
9,539 | def get_specidentitem_percolator_data ( item , xmlns ) : percomap = { '{0}userParam' . format ( xmlns ) : PERCO_HEADERMAP , } percodata = { } for child in item : try : percoscore = percomap [ child . tag ] [ child . attrib [ 'name' ] ] except KeyError : continue else : percodata [ percoscore ] = child . attrib [ 'value' ] outkeys = [ y for x in list ( percomap . values ( ) ) for y in list ( x . values ( ) ) ] for key in outkeys : try : percodata [ key ] except KeyError : percodata [ key ] = 'NA' return percodata | Loop through SpecIdentificationItem children . Find percolator data by matching to a dict lookup . Return a dict containing percolator data |
9,540 | def locate_path ( dname , recurse_down = True ) : tried_fpaths = [ ] root_dir = os . getcwd ( ) while root_dir is not None : dpath = join ( root_dir , dname ) if exists ( dpath ) : return dpath else : tried_fpaths . append ( dpath ) _new_root = dirname ( root_dir ) if _new_root == root_dir : root_dir = None break else : root_dir = _new_root if not recurse_down : break msg = 'Cannot locate dname=%r' % ( dname , ) msg = ( '\n[sysreq!] Checked: ' . join ( tried_fpaths ) ) print ( msg ) raise ImportError ( msg ) | Search for a path |
9,541 | def total_purge_developed_repo ( repodir ) : r assert repodir is not None import utool as ut import os repo = ut . util_git . Repo ( dpath = repodir ) user = os . environ [ 'USER' ] fmtdict = dict ( user = user , modname = repo . modname , reponame = repo . reponame , dpath = repo . dpath , global_site_pkgs = ut . get_global_dist_packages_dir ( ) , local_site_pkgs = ut . get_local_dist_packages_dir ( ) , venv_site_pkgs = ut . get_site_packages_dir ( ) , ) commands = [ _ . format ( ** fmtdict ) for _ in [ 'pip uninstall {modname}' , 'sudo -H pip uninstall {modname}' , 'sudo pip uninstall {modname}' , 'easy_install -m {modname}' , 'cd {dpath} && python setup.py develop --uninstall' , 'sudo chown -R {user}:{user} {dpath}' , ] ] print ( 'Normal uninstall commands' ) print ( '\n' . join ( commands ) ) possible_link_paths = [ _ . format ( ** fmtdict ) for _ in [ '{dpath}/{modname}.egg-info' , '{dpath}/build' , '{venv_site_pkgs}/{reponame}.egg-info' , '{local_site_pkgs}/{reponame}.egg-info' , '{venv_site_pkgs}/{reponame}.egg-info' , ] ] from os . path import exists , basename existing_link_paths = [ path for path in possible_link_paths ] print ( '# Delete paths and eggs' ) for path in existing_link_paths : if exists ( path ) : if ut . get_file_info ( path ) [ 'owner' ] != user : print ( 'sudo /bin/rm -rf {path}' . format ( path = path ) ) else : print ( '/bin/rm -rf {path}' . format ( path = path ) ) print ( '# Make sure nothing is in the easy install paths' ) easyinstall_paths = [ _ . format ( ** fmtdict ) for _ in [ '{venv_site_pkgs}/easy-install.pth' , '{local_site_pkgs}/easy-install.pth' , '{venv_site_pkgs}/easy-install.pth' , ] ] for path in easyinstall_paths : if exists ( path ) : easy_install_list = ut . readfrom ( path , verbose = False ) . strip ( ) . split ( '\n' ) easy_install_list_ = [ basename ( p ) for p in easy_install_list ] index1 = ut . listfind ( easy_install_list_ , repo . reponame ) index2 = ut . listfind ( easy_install_list_ , repo . modname ) if index1 is not None or index2 is not None : print ( 'Found at index1=%r, index=%r' % ( index1 , index2 ) ) if ut . get_file_info ( path ) [ 'owner' ] != user : print ( 'sudo gvim {path}' . format ( path = path ) ) else : print ( 'gvim {path}' . format ( path = path ) ) checkcmds = [ _ . format ( ** fmtdict ) for _ in [ 'python -c "import {modname}; print({modname}.__file__)"' ] ] import sys assert repo . modname not in sys . modules print ( "# CHECK STATUS" ) for cmd in checkcmds : print ( cmd ) | r Outputs commands to help purge a repo |
9,542 | def add_dict ( self , dyn_dict ) : 'Adds a dictionary to the prefs' if not isinstance ( dyn_dict , dict ) : raise Exception ( 'DynStruct.add_dict expects a dictionary.' + 'Recieved: ' + six . text_type ( type ( dyn_dict ) ) ) for ( key , val ) in six . iteritems ( dyn_dict ) : self [ key ] = val | Adds a dictionary to the prefs |
9,543 | def to_dict ( self ) : dyn_dict = { } for ( key , val ) in six . iteritems ( self . __dict__ ) : if key not in self . _printable_exclude : dyn_dict [ key ] = val return dyn_dict | Converts dynstruct to a dictionary . |
9,544 | def execstr ( self , local_name ) : execstr = '' for ( key , val ) in six . iteritems ( self . __dict__ ) : if key not in self . _printable_exclude : execstr += key + ' = ' + local_name + '.' + key + '\n' return execstr | returns a string which when evaluated will add the stored variables to the current namespace |
9,545 | def get_proteins_for_peptide ( self , psm_id ) : protsql = self . get_sql_select ( [ 'protein_acc' ] , 'protein_psm' ) protsql = '{0} WHERE psm_id=?' . format ( protsql ) cursor = self . get_cursor ( ) proteins = cursor . execute ( protsql , psm_id ) . fetchall ( ) return [ x [ 0 ] for x in proteins ] | Returns list of proteins for a passed psm_id |
9,546 | def raise_if_error ( frame ) : if "status" not in frame or frame [ "status" ] == b"\x00" : return codes_and_exceptions = { b"\x01" : exceptions . ZigBeeUnknownError , b"\x02" : exceptions . ZigBeeInvalidCommand , b"\x03" : exceptions . ZigBeeInvalidParameter , b"\x04" : exceptions . ZigBeeTxFailure } if frame [ "status" ] in codes_and_exceptions : raise codes_and_exceptions [ frame [ "status" ] ] ( ) raise exceptions . ZigBeeUnknownStatus ( ) | Checks a frame and raises the relevant exception if required . |
9,547 | def hex_to_int ( value ) : if version_info . major >= 3 : return int . from_bytes ( value , "big" ) return int ( value . encode ( "hex" ) , 16 ) | Convert hex string like \ x0A \ xE3 to 2787 . |
9,548 | def adc_to_percentage ( value , max_volts , clamp = True ) : percentage = ( 100.0 / const . ADC_MAX_VAL ) * value return max ( min ( 100 , percentage ) , 0 ) if clamp else percentage | Convert the ADC raw value to a percentage . |
9,549 | def convert_adc ( value , output_type , max_volts ) : return { const . ADC_RAW : lambda x : x , const . ADC_PERCENTAGE : adc_to_percentage , const . ADC_VOLTS : adc_to_volts , const . ADC_MILLIVOLTS : adc_to_millivolts } [ output_type ] ( value , max_volts ) | Converts the output from the ADC into the desired type . |
9,550 | def _frame_received ( self , frame ) : try : self . _rx_frames [ frame [ "frame_id" ] ] = frame except KeyError : pass _LOGGER . debug ( "Frame received: %s" , frame ) for handler in self . _rx_handlers : handler ( frame ) | Put the frame into the _rx_frames dict with a key of the frame_id . |
9,551 | def _send ( self , ** kwargs ) : if kwargs . get ( "dest_addr_long" ) is not None : self . zb . remote_at ( ** kwargs ) else : self . zb . at ( ** kwargs ) | Send a frame to either the local ZigBee or a remote device . |
9,552 | def _send_and_wait ( self , ** kwargs ) : frame_id = self . next_frame_id kwargs . update ( dict ( frame_id = frame_id ) ) self . _send ( ** kwargs ) timeout = datetime . now ( ) + const . RX_TIMEOUT while datetime . now ( ) < timeout : try : frame = self . _rx_frames . pop ( frame_id ) raise_if_error ( frame ) return frame except KeyError : sleep ( 0.1 ) continue _LOGGER . exception ( "Did not receive response within configured timeout period." ) raise exceptions . ZigBeeResponseTimeout ( ) | Send a frame to either the local ZigBee or a remote device and wait for a pre - defined amount of time for its response . |
9,553 | def _get_parameter ( self , parameter , dest_addr_long = None ) : frame = self . _send_and_wait ( command = parameter , dest_addr_long = dest_addr_long ) return frame [ "parameter" ] | Fetches and returns the value of the specified parameter . |
9,554 | def get_sample ( self , dest_addr_long = None ) : frame = self . _send_and_wait ( command = b"IS" , dest_addr_long = dest_addr_long ) if "parameter" in frame : return frame [ "parameter" ] [ 0 ] return { } | Initiate a sample and return its data . |
9,555 | def read_digital_pin ( self , pin_number , dest_addr_long = None ) : sample = self . get_sample ( dest_addr_long = dest_addr_long ) try : return sample [ const . DIGITAL_PINS [ pin_number ] ] except KeyError : raise exceptions . ZigBeePinNotConfigured ( "Pin %s (%s) is not configured as a digital input or output." % ( pin_number , const . IO_PIN_COMMANDS [ pin_number ] ) ) | Fetches a sample and returns the boolean value of the requested digital pin . |
9,556 | def set_gpio_pin ( self , pin_number , setting , dest_addr_long = None ) : assert setting in const . GPIO_SETTINGS . values ( ) self . _send_and_wait ( command = const . IO_PIN_COMMANDS [ pin_number ] , parameter = setting . value , dest_addr_long = dest_addr_long ) | Set a gpio pin setting . |
9,557 | def get_gpio_pin ( self , pin_number , dest_addr_long = None ) : frame = self . _send_and_wait ( command = const . IO_PIN_COMMANDS [ pin_number ] , dest_addr_long = dest_addr_long ) value = frame [ "parameter" ] return const . GPIO_SETTINGS [ value ] | Get a gpio pin setting . |
9,558 | def get_supply_voltage ( self , dest_addr_long = None ) : value = self . _get_parameter ( b"%V" , dest_addr_long = dest_addr_long ) return ( hex_to_int ( value ) * ( 1200 / 1024.0 ) ) / 1000 | Fetches the value of %V and returns it as volts . |
9,559 | def add ( self , key ) : if key not in self . _map : self . _map [ key ] = link = _Link ( ) root = self . _root last = root . prev link . prev , link . next , link . key = last , root , key last . next = root . prev = weakref . proxy ( link ) | Store new key in a new link at the end of the linked list |
9,560 | def index ( self , item ) : for count , other in enumerate ( self ) : if item == other : return count raise ValueError ( '%r is not in OrderedSet' % ( item , ) ) | Find the index of item in the OrderedSet |
9,561 | def value ( self , key , timestamp = None , namespace = None ) : return self . make_context ( key = key , end = timestamp , namespace = namespace ) . value ( ) | Get the value of a gauge at the specified time |
9,562 | def aggregate ( self , key , aggregate , start = None , end = None , namespace = None , percentile = None ) : return self . make_context ( key = key , aggregate = aggregate , start = start , end = end , namespace = namespace , percentile = percentile ) . aggregate ( ) | Get an aggregate of all gauge data stored in the specified date range |
9,563 | def value_series ( self , key , start = None , end = None , interval = None , namespace = None , cache = None ) : return self . make_context ( key = key , start = start , end = end , interval = interval , namespace = namespace , cache = cache ) . value_series ( ) | Get a time series of gauge values |
9,564 | def aggregate_series ( self , key , aggregate , start = None , end = None , interval = None , namespace = None , cache = None , percentile = None ) : return self . make_context ( key = key , aggregate = aggregate , start = start , end = end , interval = interval , namespace = namespace , cache = cache , percentile = percentile ) . aggregate_series ( ) | Get a time series of gauge aggregates |
9,565 | def keys ( self , prefix = None , limit = None , offset = None , namespace = None ) : return self . make_context ( prefix = prefix , limit = limit , offset = offset , namespace = namespace ) . keys ( ) | Get gauge keys |
9,566 | def statistics ( self , start = None , end = None , namespace = None ) : return self . make_context ( start = start , end = end , namespace = namespace ) . statistics ( ) | Get write statistics for the specified namespace and date range |
9,567 | def sync ( self ) : self . driver . create_schema ( ) self . driver . set_metadata ( { 'current_version' : Gauged . VERSION , 'initial_version' : Gauged . VERSION , 'block_size' : self . config . block_size , 'resolution' : self . config . resolution , 'created_at' : long ( time ( ) * 1000 ) } , replace = False ) | Create the necessary schema |
9,568 | def make_context ( self , ** kwargs ) : self . check_schema ( ) return Context ( self . driver , self . config , ** kwargs ) | Create a new context for reading data |
9,569 | def check_schema ( self ) : if self . valid_schema : return config = self . config metadata = self . metadata ( ) if 'current_version' not in metadata : raise GaugedSchemaError ( 'Gauged schema not found, ' 'try a gauged.sync()' ) if metadata [ 'current_version' ] != Gauged . VERSION : msg = 'The schema is version %s while this Gauged is version %s. ' msg += 'Try upgrading Gauged and/or running gauged_migrate.py' msg = msg % ( metadata [ 'current_version' ] , Gauged . VERSION ) raise GaugedVersionMismatchError ( msg ) expected_block_size = '%s/%s' % ( config . block_size , config . resolution ) block_size = '%s/%s' % ( metadata [ 'block_size' ] , metadata [ 'resolution' ] ) if block_size != expected_block_size : msg = 'Expected %s and got %s' % ( expected_block_size , block_size ) warn ( msg , GaugedBlockSizeMismatch ) self . valid_schema = True | Check the schema exists and matches configuration |
9,570 | def nx_dag_node_rank ( graph , nodes = None ) : import utool as ut source = list ( ut . nx_source_nodes ( graph ) ) [ 0 ] longest_paths = dict ( [ ( target , dag_longest_path ( graph , source , target ) ) for target in graph . nodes ( ) ] ) node_to_rank = ut . map_dict_vals ( len , longest_paths ) if nodes is None : return node_to_rank else : ranks = ut . dict_take ( node_to_rank , nodes ) return ranks | Returns rank of nodes that define the level each node is on in a topological sort . This is the same as the Graphviz dot rank . |
9,571 | def nx_all_nodes_between ( graph , source , target , data = False ) : import utool as ut if source is None : sources = list ( ut . nx_source_nodes ( graph ) ) assert len ( sources ) == 1 , ( 'specify source if there is not only one' ) source = sources [ 0 ] if target is None : sinks = list ( ut . nx_sink_nodes ( graph ) ) assert len ( sinks ) == 1 , ( 'specify sink if there is not only one' ) target = sinks [ 0 ] all_simple_paths = list ( nx . all_simple_paths ( graph , source , target ) ) nodes = sorted ( set . union ( * map ( set , all_simple_paths ) ) ) return nodes | Find all nodes with on paths between source and target . |
9,572 | def nx_all_simple_edge_paths ( G , source , target , cutoff = None , keys = False , data = False ) : if cutoff is None : cutoff = len ( G ) - 1 if cutoff < 1 : return import utool as ut import six visited_nodes = [ source ] visited_edges = [ ] if G . is_multigraph ( ) : get_neighbs = ut . partial ( G . edges , keys = keys , data = data ) else : get_neighbs = ut . partial ( G . edges , data = data ) edge_stack = [ iter ( get_neighbs ( source ) ) ] while edge_stack : children_edges = edge_stack [ - 1 ] child_edge = six . next ( children_edges , None ) if child_edge is None : edge_stack . pop ( ) visited_nodes . pop ( ) if len ( visited_edges ) > 0 : visited_edges . pop ( ) elif len ( visited_nodes ) < cutoff : child_node = child_edge [ 1 ] if child_node == target : yield visited_edges + [ child_edge ] elif child_node not in visited_nodes : visited_nodes . append ( child_node ) visited_edges . append ( child_edge ) edge_stack . append ( iter ( get_neighbs ( child_node ) ) ) else : for edge in [ child_edge ] + list ( children_edges ) : if edge [ 1 ] == target : yield visited_edges + [ edge ] edge_stack . pop ( ) visited_nodes . pop ( ) if len ( visited_edges ) > 0 : visited_edges . pop ( ) | Returns each path from source to target as a list of edges . |
9,573 | def nx_delete_node_attr ( graph , name , nodes = None ) : if nodes is None : nodes = list ( graph . nodes ( ) ) removed = 0 node_dict = nx_node_dict ( graph ) if isinstance ( name , list ) : for node in nodes : for name_ in name : try : del node_dict [ node ] [ name_ ] removed += 1 except KeyError : pass else : for node in nodes : try : del node_dict [ node ] [ name ] removed += 1 except KeyError : pass return removed | Removes node attributes |
9,574 | def nx_delete_edge_attr ( graph , name , edges = None ) : removed = 0 keys = [ name ] if not isinstance ( name , ( list , tuple ) ) else name if edges is None : if graph . is_multigraph ( ) : edges = graph . edges ( keys = True ) else : edges = graph . edges ( ) if graph . is_multigraph ( ) : for u , v , k in edges : for key_ in keys : try : del graph [ u ] [ v ] [ k ] [ key_ ] removed += 1 except KeyError : pass else : for u , v in edges : for key_ in keys : try : del graph [ u ] [ v ] [ key_ ] removed += 1 except KeyError : pass return removed | Removes an attributes from specific edges in the graph |
9,575 | def nx_gen_node_values ( G , key , nodes , default = util_const . NoParam ) : node_dict = nx_node_dict ( G ) if default is util_const . NoParam : return ( node_dict [ n ] [ key ] for n in nodes ) else : return ( node_dict [ n ] . get ( key , default ) for n in nodes ) | Generates attributes values of specific nodes |
9,576 | def nx_gen_node_attrs ( G , key , nodes = None , default = util_const . NoParam , on_missing = 'error' , on_keyerr = 'default' ) : if on_missing is None : on_missing = 'error' if default is util_const . NoParam and on_keyerr == 'default' : on_keyerr = 'error' if nodes is None : nodes = G . nodes ( ) node_dict = nx_node_dict ( G ) if on_missing == 'error' : node_data = ( ( n , node_dict [ n ] ) for n in nodes ) elif on_missing == 'filter' : node_data = ( ( n , node_dict [ n ] ) for n in nodes if n in G ) elif on_missing == 'default' : node_data = ( ( n , node_dict . get ( n , { } ) ) for n in nodes ) else : raise KeyError ( 'on_missing={} must be error, filter or default' . format ( on_missing ) ) if on_keyerr == 'error' : node_attrs = ( ( n , d [ key ] ) for n , d in node_data ) elif on_keyerr == 'filter' : node_attrs = ( ( n , d [ key ] ) for n , d in node_data if key in d ) elif on_keyerr == 'default' : node_attrs = ( ( n , d . get ( key , default ) ) for n , d in node_data ) else : raise KeyError ( 'on_keyerr={} must be error filter or default' . format ( on_keyerr ) ) return node_attrs | Improved generator version of nx . get_node_attributes |
9,577 | def nx_gen_edge_values ( G , key , edges = None , default = util_const . NoParam , on_missing = 'error' , on_keyerr = 'default' ) : if edges is None : edges = G . edges ( ) if on_missing is None : on_missing = 'error' if on_keyerr is None : on_keyerr = 'default' if default is util_const . NoParam and on_keyerr == 'default' : on_keyerr = 'error' if on_missing == 'error' : data_iter = ( G . adj [ u ] [ v ] for u , v in edges ) elif on_missing == 'default' : data_iter = ( G . adj [ u ] [ v ] if G . has_edge ( u , v ) else { } for u , v in edges ) else : raise KeyError ( 'on_missing={} must be error, filter or default' . format ( on_missing ) ) if on_keyerr == 'error' : value_iter = ( d [ key ] for d in data_iter ) elif on_keyerr == 'default' : value_iter = ( d . get ( key , default ) for d in data_iter ) else : raise KeyError ( 'on_keyerr={} must be error or default' . format ( on_keyerr ) ) return value_iter | Generates attributes values of specific edges |
9,578 | def nx_gen_edge_attrs ( G , key , edges = None , default = util_const . NoParam , on_missing = 'error' , on_keyerr = 'default' ) : if on_missing is None : on_missing = 'error' if default is util_const . NoParam and on_keyerr == 'default' : on_keyerr = 'error' if edges is None : if G . is_multigraph ( ) : raise NotImplementedError ( '' ) else : edges = G . edges ( ) if on_missing == 'error' : edge_data = ( ( ( u , v ) , G . adj [ u ] [ v ] ) for u , v in edges ) elif on_missing == 'filter' : edge_data = ( ( ( u , v ) , G . adj [ u ] [ v ] ) for u , v in edges if G . has_edge ( u , v ) ) elif on_missing == 'default' : edge_data = ( ( ( u , v ) , G . adj [ u ] [ v ] ) if G . has_edge ( u , v ) else ( ( u , v ) , { } ) for u , v in edges ) else : raise KeyError ( 'on_missing={}' . format ( on_missing ) ) if on_keyerr == 'error' : edge_attrs = ( ( e , d [ key ] ) for e , d in edge_data ) elif on_keyerr == 'filter' : edge_attrs = ( ( e , d [ key ] ) for e , d in edge_data if key in d ) elif on_keyerr == 'default' : edge_attrs = ( ( e , d . get ( key , default ) ) for e , d in edge_data ) else : raise KeyError ( 'on_keyerr={}' . format ( on_keyerr ) ) return edge_attrs | Improved generator version of nx . get_edge_attributes |
9,579 | def nx_minimum_weight_component ( graph , weight = 'weight' ) : mwc = nx . minimum_spanning_tree ( graph , weight = weight ) neg_edges = ( e for e , w in nx_gen_edge_attrs ( graph , weight ) if w < 0 ) mwc . add_edges_from ( neg_edges ) return mwc | A minimum weight component is an MST + all negative edges |
9,580 | def nx_ensure_agraph_color ( graph ) : from plottool import color_funcs import plottool as pt def _fix_agraph_color ( data ) : try : orig_color = data . get ( 'color' , None ) alpha = data . get ( 'alpha' , None ) color = orig_color if color is None and alpha is not None : color = [ 0 , 0 , 0 ] if color is not None : color = pt . ensure_nonhex_color ( color ) color = list ( color_funcs . ensure_base255 ( color ) ) if alpha is not None : if len ( color ) == 3 : color += [ int ( alpha * 255 ) ] else : color [ 3 ] = int ( alpha * 255 ) color = tuple ( color ) if len ( color ) == 3 : data [ 'color' ] = '#%02x%02x%02x' % color else : data [ 'color' ] = '#%02x%02x%02x%02x' % color except Exception as ex : import utool as ut ut . printex ( ex , keys = [ 'color' , 'orig_color' , 'data' ] ) raise for node , node_data in graph . nodes ( data = True ) : data = node_data _fix_agraph_color ( data ) for u , v , edge_data in graph . edges ( data = True ) : data = edge_data _fix_agraph_color ( data ) | changes colors to hex strings on graph attrs |
9,581 | def dag_longest_path ( graph , source , target ) : if source == target : return [ source ] allpaths = nx . all_simple_paths ( graph , source , target ) longest_path = [ ] for l in allpaths : if len ( l ) > len ( longest_path ) : longest_path = l return longest_path | Finds the longest path in a dag between two nodes |
9,582 | def simplify_graph ( graph ) : import utool as ut nodes = sorted ( list ( graph . nodes ( ) ) ) node_lookup = ut . make_index_lookup ( nodes ) if graph . is_multigraph ( ) : edges = list ( graph . edges ( keys = True ) ) else : edges = list ( graph . edges ( ) ) new_nodes = ut . take ( node_lookup , nodes ) if graph . is_multigraph ( ) : new_edges = [ ( node_lookup [ e [ 0 ] ] , node_lookup [ e [ 1 ] ] , e [ 2 ] , { } ) for e in edges ] else : new_edges = [ ( node_lookup [ e [ 0 ] ] , node_lookup [ e [ 1 ] ] ) for e in edges ] cls = graph . __class__ new_graph = cls ( ) new_graph . add_nodes_from ( new_nodes ) new_graph . add_edges_from ( new_edges ) return new_graph | strips out everything but connectivity |
9,583 | def subgraph_from_edges ( G , edge_list , ref_back = True ) : sub_nodes = list ( { y for x in edge_list for y in x [ 0 : 2 ] } ) multi_edge_list = [ edge [ 0 : 3 ] for edge in edge_list ] if ref_back : G_sub = G . subgraph ( sub_nodes ) for edge in G_sub . edges ( keys = True ) : if edge not in multi_edge_list : G_sub . remove_edge ( * edge ) else : G_sub = G . subgraph ( sub_nodes ) . copy ( ) for edge in G_sub . edges ( keys = True ) : if edge not in multi_edge_list : G_sub . remove_edge ( * edge ) return G_sub | Creates a networkx graph that is a subgraph of G defined by the list of edges in edge_list . |
9,584 | def all_multi_paths ( graph , source , target , data = False ) : r path_multiedges = list ( nx_all_simple_edge_paths ( graph , source , target , keys = True , data = data ) ) return path_multiedges | r Returns specific paths along multi - edges from the source to this table . Multipaths are identified by edge keys . |
9,585 | def bfs_conditional ( G , source , reverse = False , keys = True , data = False , yield_nodes = True , yield_if = None , continue_if = None , visited_nodes = None , yield_source = False ) : if reverse and hasattr ( G , 'reverse' ) : G = G . reverse ( ) if isinstance ( G , nx . Graph ) : neighbors = functools . partial ( G . edges , data = data ) else : neighbors = functools . partial ( G . edges , keys = keys , data = data ) queue = collections . deque ( [ ] ) if visited_nodes is None : visited_nodes = set ( [ ] ) else : visited_nodes = set ( visited_nodes ) if source not in visited_nodes : if yield_nodes and yield_source : yield source visited_nodes . add ( source ) new_edges = neighbors ( source ) if isinstance ( new_edges , list ) : new_edges = iter ( new_edges ) queue . append ( ( source , new_edges ) ) while queue : parent , edges = queue [ 0 ] for edge in edges : child = edge [ 1 ] if yield_nodes : if child not in visited_nodes : if yield_if is None or yield_if ( G , child , edge ) : yield child else : if yield_if is None or yield_if ( G , child , edge ) : yield edge if child not in visited_nodes : visited_nodes . add ( child ) if continue_if is None or continue_if ( G , child , edge ) : new_edges = neighbors ( child ) if isinstance ( new_edges , list ) : new_edges = iter ( new_edges ) queue . append ( ( child , new_edges ) ) queue . popleft ( ) | Produce edges in a breadth - first - search starting at source but only return nodes that satisfiy a condition and only iterate past a node if it satisfies a different condition . |
9,586 | def color_nodes ( graph , labelattr = 'label' , brightness = .878 , outof = None , sat_adjust = None ) : import plottool as pt import utool as ut node_to_lbl = nx . get_node_attributes ( graph , labelattr ) unique_lbls = sorted ( set ( node_to_lbl . values ( ) ) ) ncolors = len ( unique_lbls ) if outof is None : if ( ncolors ) == 1 : unique_colors = [ pt . LIGHT_BLUE ] elif ( ncolors ) == 2 : unique_colors = [ 'royalblue' , 'orange' ] unique_colors = list ( map ( pt . color_funcs . ensure_base01 , unique_colors ) ) else : unique_colors = pt . distinct_colors ( ncolors , brightness = brightness ) else : unique_colors = pt . distinct_colors ( outof , brightness = brightness ) if sat_adjust : unique_colors = [ pt . color_funcs . adjust_hsv_of_rgb ( c , sat_adjust = sat_adjust ) for c in unique_colors ] if outof is None : lbl_to_color = ut . dzip ( unique_lbls , unique_colors ) else : gray = pt . color_funcs . ensure_base01 ( 'lightgray' ) unique_colors = [ gray ] + unique_colors offset = max ( 1 , min ( unique_lbls ) ) - 1 node_to_lbl = ut . map_vals ( lambda nid : max ( 0 , nid - offset ) , node_to_lbl ) lbl_to_color = ut . dzip ( range ( outof + 1 ) , unique_colors ) node_to_color = ut . map_vals ( lbl_to_color , node_to_lbl ) nx . set_node_attributes ( graph , name = 'color' , values = node_to_color ) ut . nx_ensure_agraph_color ( graph ) | Colors edges and nodes by nid |
9,587 | def approx_min_num_components ( nodes , negative_edges ) : import utool as ut num = 0 g_neg = nx . Graph ( ) g_neg . add_nodes_from ( nodes ) g_neg . add_edges_from ( negative_edges ) if nx . __version__ . startswith ( '2' ) : deg0_nodes = [ n for n , d in g_neg . degree ( ) if d == 0 ] else : deg0_nodes = [ n for n , d in g_neg . degree_iter ( ) if d == 0 ] for u , v in ut . itertwo ( deg0_nodes ) : nx_contracted_nodes ( g_neg , v , u , inplace = True ) unused = list ( g_neg . nodes ( ) ) g_pos = nx . complement ( g_neg ) if False : from networkx . algorithms . approximation import clique maxiset , cliques = clique . clique_removal ( g_pos ) num = len ( cliques ) return num while len ( unused ) > 0 : num += 1 idx1 = 0 n1 = unused [ idx1 ] unused . remove ( n1 ) neigbs = list ( g_pos . neighbors ( n1 ) ) neigbs = ut . isect ( neigbs , unused ) while len ( neigbs ) > 0 : idx2 = 0 n2 = neigbs [ idx2 ] unused . remove ( n2 ) g_neg = nx . contracted_nodes ( g_neg , n1 , n2 ) g_pos = nx . complement ( g_neg ) neigbs = list ( g_pos . neighbors ( n1 ) ) neigbs = ut . isect ( neigbs , unused ) print ( 'num = %r' % ( num , ) ) return num | Find approximate minimum number of connected components possible Each edge represents that two nodes must be separated |
9,588 | def solve ( self , y , h , t_end ) : ts = [ ] ys = [ ] yi = y ti = 0.0 while ti < t_end : ts . append ( ti ) yi = self . step ( yi , None , ti , h ) ys . append ( yi ) ti += h return ts , ys | Given a function initial conditions step size and end value this will calculate an unforced system . The default start time is t = 0 . 0 but this can be changed . |
9,589 | def step ( self , y , u , t , h ) : k1 = h * self . func ( t , y , u ) k2 = h * self . func ( t + .5 * h , y + .5 * h * k1 , u ) k3 = h * self . func ( t + .5 * h , y + .5 * h * k2 , u ) k4 = h * self . func ( t + h , y + h * k3 , u ) return y + ( k1 + 2 * k2 + 2 * k3 + k4 ) / 6.0 | This is called by solve but can be called by the user who wants to run through an integration with a control force . |
9,590 | def generate_proteins ( pepfn , proteins , pepheader , scorecol , minlog , higherbetter = True , protcol = False ) : protein_peptides = { } if minlog : higherbetter = False if not protcol : protcol = peptabledata . HEADER_MASTERPROTEINS for psm in reader . generate_tsv_psms ( pepfn , pepheader ) : p_acc = psm [ protcol ] if ';' in p_acc : continue protein_peptides = evaluate_peptide ( protein_peptides , psm , p_acc , higherbetter , scorecol , fncol = False ) if minlog : try : nextbestscore = min ( [ pep [ 'score' ] for pep in protein_peptides . values ( ) if pep [ 'score' ] > 0 ] ) except ValueError : import sys sys . stderr . write ( 'Cannot find score of type {} which is above 0. ' 'Only scores above zero can have a -log value. ' 'Exiting.' . format ( scorecol ) ) sys . exit ( 1 ) nextbestscore = - log ( nextbestscore , 10 ) for protein in proteins : try : peptide = protein_peptides [ protein [ prottabledata . HEADER_PROTEIN ] ] except KeyError : print ( 'WARNING - protein {} not found in peptide ' 'table' . format ( protein [ prottabledata . HEADER_PROTEIN ] ) ) peptide = { 'score' : 'NA' } if minlog and peptide [ 'score' ] != 'NA' : peptide [ 'score' ] = log_score ( peptide [ 'score' ] , nextbestscore ) protein [ prottabledata . HEADER_QSCORE ] = str ( peptide [ 'score' ] ) yield protein | Best peptide for each protein in a table |
9,591 | def add ( self , child ) : if isinstance ( child , Run ) : self . add_run ( child ) elif isinstance ( child , Record ) : self . add_record ( child ) elif isinstance ( child , EventRecord ) : self . add_event_record ( child ) elif isinstance ( child , DataDisplay ) : self . add_data_display ( child ) elif isinstance ( child , DataWriter ) : self . add_data_writer ( child ) elif isinstance ( child , EventWriter ) : self . add_event_writer ( child ) else : raise ModelError ( 'Unsupported child element' ) | Adds a typed child object to the simulation spec . |
9,592 | def fetch ( self , id_ , return_fields = None ) : game_params = { "id" : id_ } if return_fields is not None : self . _validate_return_fields ( return_fields ) field_list = "," . join ( return_fields ) game_params [ "field_list" ] = field_list response = self . _query ( game_params , direct = True ) return response | Wrapper for fetching details of game by ID |
9,593 | def define_options ( self , names , parser_options = None ) : def copy_option ( options , name ) : return { k : v for k , v in options [ name ] . items ( ) } if parser_options is None : parser_options = { } options = { } for name in names : try : option = copy_option ( parser_options , name ) except KeyError : option = copy_option ( shared_options , name ) try : options . update ( { option [ 'clarg' ] : option } ) except TypeError : options . update ( { option [ 'clarg' ] [ 0 ] : option } ) return options | Given a list of option names this returns a list of dicts defined in all_options and self . shared_options . These can then be used to populate the argparser with |
9,594 | def current_memory_usage ( ) : import psutil proc = psutil . Process ( os . getpid ( ) ) meminfo = proc . memory_info ( ) rss = meminfo [ 0 ] vms = meminfo [ 1 ] return rss | Returns this programs current memory usage in bytes |
9,595 | def num_unused_cpus ( thresh = 10 ) : import psutil cpu_usage = psutil . cpu_percent ( percpu = True ) return sum ( [ p < thresh for p in cpu_usage ] ) | Returns the number of cpus with utilization less than thresh percent |
9,596 | def get_protein_group_content ( pgmap , master ) : pg_content = [ [ 0 , master , protein , len ( peptides ) , len ( [ psm for pgpsms in peptides . values ( ) for psm in pgpsms ] ) , sum ( [ psm [ 1 ] for pgpsms in peptides . values ( ) for psm in pgpsms ] ) , next ( iter ( next ( iter ( peptides . values ( ) ) ) ) ) [ 3 ] , next ( iter ( next ( iter ( peptides . values ( ) ) ) ) ) [ 2 ] , ] for protein , peptides in pgmap . items ( ) ] return pg_content | For each master protein we generate the protein group proteins complete with sequences psm_ids and scores . Master proteins are included in this group . |
9,597 | def get_protein_data ( peptide , pdata , headerfields , accfield ) : report = get_proteins ( peptide , pdata , headerfields ) return get_cov_descriptions ( peptide , pdata , report ) | These fields are currently not pool dependent so headerfields is ignored |
9,598 | def get_num_chunks ( length , chunksize ) : r n_chunks = int ( math . ceil ( length / chunksize ) ) return n_chunks | r Returns the number of chunks that a list will be split into given a chunksize . |
9,599 | def ProgChunks ( list_ , chunksize , nInput = None , ** kwargs ) : if nInput is None : nInput = len ( list_ ) n_chunks = get_num_chunks ( nInput , chunksize ) kwargs [ 'length' ] = n_chunks if 'freq' not in kwargs : kwargs [ 'freq' ] = 1 chunk_iter = util_iter . ichunks ( list_ , chunksize ) progiter_ = ProgressIter ( chunk_iter , ** kwargs ) return progiter_ | Yeilds an iterator in chunks and computes progress Progress version of ut . ichunks |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.