idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
6,000 | def value ( self ) : value = getattr ( self . instrument , self . probe_name ) self . buffer . append ( value ) return value | reads the value from the instrument |
6,001 | def load_and_append ( probe_dict , probes , instruments = { } ) : loaded_failed = { } updated_probes = { } updated_probes . update ( probes ) updated_instruments = { } updated_instruments . update ( instruments ) new_instruments = list ( set ( probe_dict . keys ( ) ) - set ( probes . keys ( ) ) ) if new_instruments != [ ] : updated_instruments , failed = Instrument . load_and_append ( { instrument_name : instrument_name for instrument_name in new_instruments } , instruments ) if failed != [ ] : for failed_instrument in set ( failed ) - set ( instruments . keys ( ) ) : for probe_name in probe_dict [ failed_instrument ] : loaded_failed [ probe_name ] = ValueError ( 'failed to load instrument {:s} already exists. Did not load!' . format ( failed_instrument ) ) del probe_dict [ failed_instrument ] for instrument_name , probe_names in probe_dict . items ( ) : if not instrument_name in updated_probes : updated_probes . update ( { instrument_name : { } } ) for probe_name in probe_names . split ( ',' ) : if probe_name in updated_probes [ instrument_name ] : loaded_failed [ probe_name ] = ValueError ( 'failed to load probe {:s} already exists. Did not load!' . format ( probe_name ) ) else : probe_instance = Probe ( updated_instruments [ instrument_name ] , probe_name ) updated_probes [ instrument_name ] . update ( { probe_name : probe_instance } ) return updated_probes , loaded_failed , updated_instruments | load probes from probe_dict and append to probes if additional instruments are required create them and add them to instruments |
6,002 | def get ( self , key ) : try : return self [ self . id_lookup . get ( key ) ] except TypeError : raise KeyError | Returns an address by user controlled input ID |
6,003 | def get_index ( self , key ) : try : return self [ self . index_lookup . get ( key ) ] except TypeError : raise KeyError | Returns an address by input index a value that matches the list index of the provided lookup value not necessarily the result . |
6,004 | def _igamc ( a , x ) : ax = math . exp ( a * math . log ( x ) - x - math . lgamma ( a ) ) y = 1.0 - a z = x + y + 1.0 c = 0.0 pkm2 = 1.0 qkm2 = x pkm1 = x + 1.0 qkm1 = z * x ans = pkm1 / qkm1 while True : c += 1.0 y += 1.0 z += 2.0 yc = y * c pk = pkm1 * z - pkm2 * yc qk = qkm1 * z - qkm2 * yc if qk != 0 : r = pk / qk t = abs ( ( ans - r ) / r ) ans = r else : t = 1.0 pkm2 = pkm1 pkm1 = pk qkm2 = qkm1 qkm1 = qk if abs ( pk ) > BIG : pkm2 *= BIGINV pkm1 *= BIGINV qkm2 *= BIGINV qkm1 *= BIGINV if t <= MACHEP : return ans * ax | Complemented incomplete Gamma integral . |
6,005 | def main ( ) : dem = '../tests/data/Jamaica_dem.tif' num_proc = 2 wp = '../tests/data/tmp_results/wtsd_delineation' TauDEMWorkflow . watershed_delineation ( num_proc , dem , workingdir = wp ) | The simplest usage of watershed delineation based on TauDEM . |
6,006 | def _get_line ( self , search_string , search_file , return_string = True , case_sens = True ) : if os . path . isfile ( search_file ) : if type ( search_string ) == type ( '' ) : search_string = [ search_string ] if not case_sens : search_string = [ i . lower ( ) for i in search_string ] with open ( search_file ) as fp : for line in fp : query_line = line if case_sens else line . lower ( ) if all ( [ i in query_line for i in search_string ] ) : return line if return_string else True if return_string : raise Exception ( '%s not found in %s' % ( ' & ' . join ( search_string ) , search_file ) ) else : return False else : raise Exception ( '%s file does not exist' % search_file ) | Return the first line containing a set of strings in a file . |
6,007 | def get_cutoff_energy ( self ) : return Value ( scalars = [ Scalar ( value = self . settings [ "kinetic-energy cutoff" ] ) ] , units = self . settings [ 'kinetic-energy cutoff units' ] ) | Determine the cutoff energy from the output |
6,008 | def get_pp_name ( self ) : ppnames = [ ] natomtypes = int ( self . _get_line ( 'number of atomic types' , self . outputf ) . split ( ) [ 5 ] ) with open ( self . outputf ) as fp : for line in fp : if "PseudoPot. #" in line : ppnames . append ( Scalar ( value = next ( fp ) . split ( '/' ) [ - 1 ] . rstrip ( ) ) ) if len ( ppnames ) == natomtypes : return Value ( scalars = ppnames ) raise Exception ( 'Could not find %i pseudopotential names' % natomtypes ) | Determine the pseudopotential names from the output |
6,009 | def get_U_settings ( self ) : with open ( self . outputf ) as fp : for line in fp : if "LDA+U calculation" in line : U_param = { } U_param [ 'Type' ] = line . split ( ) [ 0 ] U_param [ 'Values' ] = { } for nl in range ( 15 ) : line2 = next ( fp ) . split ( ) if len ( line2 ) > 1 and line2 [ 0 ] == "atomic" : pass elif len ( line2 ) == 6 : U_param [ 'Values' ] [ line2 [ 0 ] ] = { } U_param [ 'Values' ] [ line2 [ 0 ] ] [ 'L' ] = float ( line2 [ 1 ] ) U_param [ 'Values' ] [ line2 [ 0 ] ] [ 'U' ] = float ( line2 [ 2 ] ) U_param [ 'Values' ] [ line2 [ 0 ] ] [ 'J' ] = float ( line2 [ 4 ] ) else : break return Value ( ** U_param ) return None | Determine the DFT + U type and parameters from the output |
6,010 | def get_vdW_settings ( self ) : xc = self . get_xc_functional ( ) . scalars [ 0 ] . value if 'vdw' in xc . lower ( ) : return Value ( scalars = [ Scalar ( value = xc ) ] ) else : vdW_dict = { 'xdm' : 'Becke-Johnson XDM' , 'ts' : 'Tkatchenko-Scheffler' , 'ts-vdw' : 'Tkatchenko-Scheffler' , 'tkatchenko-scheffler' : 'Tkatchenko-Scheffler' , 'grimme-d2' : 'Grimme D2' , 'dft-d' : 'Grimme D2' } if self . _get_line ( 'vdw_corr' , self . inputf , return_string = False , case_sens = False ) : line = self . _get_line ( 'vdw_corr' , self . inputf , return_string = True , case_sens = False ) vdwkey = str ( line . split ( '=' ) [ - 1 ] . replace ( "'" , "" ) . replace ( ',' , '' ) . lower ( ) . rstrip ( ) ) return Value ( scalars = [ Scalar ( value = vdW_dict [ vdwkey ] ) ] ) return None | Determine the vdW type if using vdW xc functional or correction scheme from the input otherwise |
6,011 | def get_stresses ( self ) : if "stress" not in self . settings : return None wrapped = [ [ Scalar ( value = x ) for x in y ] for y in self . settings [ "stress" ] ] return Property ( matrices = [ wrapped ] , units = self . settings [ "stress units" ] ) | Determine the stress tensor from the output |
6,012 | def get_dos ( self ) : fildos = '' for f in self . _files : with open ( f , 'r' ) as fp : first_line = next ( fp ) if "E (eV)" in first_line and "Int dos(E)" in first_line : fildos = f ndoscol = len ( next ( fp ) . split ( ) ) - 2 fp . close ( ) break fp . close ( ) if not fildos : return None line = self . _get_line ( 'the Fermi energy is' , self . outputf ) efermi = float ( line . split ( 'is' ) [ - 1 ] . split ( ) [ 0 ] ) energy = [ ] dos = [ ] fp = open ( fildos , 'r' ) next ( fp ) for line in fp : ls = line . split ( ) energy . append ( Scalar ( value = float ( ls [ 0 ] ) - efermi ) ) dos . append ( Scalar ( value = sum ( [ float ( i ) for i in ls [ 1 : 1 + ndoscol ] ] ) ) ) return Property ( scalars = dos , units = 'number of states per unit cell' , conditions = Value ( name = 'energy' , scalars = energy , units = 'eV' ) ) | Find the total DOS shifted by the Fermi energy |
6,013 | def get_band_gap ( self ) : dosdata = self . get_dos ( ) if type ( dosdata ) == type ( None ) : return None else : energy = dosdata . conditions . scalars dos = dosdata . scalars step_size = energy [ 1 ] . value - energy [ 0 ] . value not_found = True l = 0 bot = 10 ** 3 top = - 10 ** 3 while not_found and l < len ( dos ) : e = float ( energy [ l ] . value ) dens = float ( dos [ l ] . value ) if e < 0 and dens > 1e-3 : bot = e elif e > 0 and dens > 1e-3 : top = e not_found = False l += 1 if top < bot : raise Exception ( 'Algorithm failed to find the band gap' ) elif top - bot < step_size * 2 : return Property ( scalars = [ Scalar ( value = 0 ) ] , units = 'eV' ) else : bandgap = float ( top - bot ) return Property ( scalars = [ Scalar ( value = round ( bandgap , 3 ) ) ] , units = 'eV' ) | Compute the band gap from the DOS |
6,014 | def get_category_aliases_under ( parent_alias = None ) : return [ ch . alias for ch in get_cache ( ) . get_children_for ( parent_alias , only_with_aliases = True ) ] | Returns a list of category aliases under the given parent . |
6,015 | def get_category_lists ( init_kwargs = None , additional_parents_aliases = None , obj = None ) : init_kwargs = init_kwargs or { } additional_parents_aliases = additional_parents_aliases or [ ] parent_aliases = additional_parents_aliases if obj is not None : ctype = ContentType . objects . get_for_model ( obj ) cat_ids = [ item [ 0 ] for item in get_tie_model ( ) . objects . filter ( content_type = ctype , object_id = obj . id ) . values_list ( 'category_id' ) . all ( ) ] parent_aliases = list ( get_cache ( ) . get_parents_for ( cat_ids ) . union ( additional_parents_aliases ) ) lists = [ ] aliases = get_cache ( ) . sort_aliases ( parent_aliases ) categories_cache = get_cache ( ) . get_categories ( aliases , obj ) for parent_alias in aliases : catlist = CategoryList ( parent_alias , ** init_kwargs ) if obj is not None : catlist . set_obj ( obj ) cache = [ ] try : cache = categories_cache [ parent_alias ] except KeyError : pass catlist . set_get_categories_cache ( cache ) lists . append ( catlist ) return lists | Returns a list of CategoryList objects optionally associated with a given model instance . |
6,016 | def register_lists ( self , category_lists , lists_init_kwargs = None , editor_init_kwargs = None ) : lists_init_kwargs = lists_init_kwargs or { } editor_init_kwargs = editor_init_kwargs or { } for lst in category_lists : if isinstance ( lst , string_types ) : lst = self . list_cls ( lst , ** lists_init_kwargs ) elif not isinstance ( lst , CategoryList ) : raise SitecatsConfigurationError ( '`CategoryRequestHandler.register_lists()` accepts only ' '`CategoryList` objects or category aliases.' ) if self . _obj : lst . set_obj ( self . _obj ) for name , val in lists_init_kwargs . items ( ) : setattr ( lst , name , val ) lst . enable_editor ( ** editor_init_kwargs ) self . _lists [ lst . get_id ( ) ] = lst | Registers CategoryList objects to handle their requests . |
6,017 | def action_remove ( cls , request , category_list ) : if not category_list . editor . allow_remove : raise SitecatsSecurityException ( '`action_remove()` is not supported by parent `%s`category.' % category_list . alias ) category_id = int ( request . POST . get ( 'category_id' , 0 ) ) if not category_id : raise SitecatsSecurityException ( 'Unsupported `category_id` value - `%s` - is passed to `action_remove()`.' % category_id ) category = get_cache ( ) . get_category_by_id ( category_id ) if not category : raise SitecatsSecurityException ( 'Unable to get `%s` category in `action_remove()`.' % category_id ) cat_ident = category . alias or category . id if category . is_locked : raise SitecatsSecurityException ( '`action_remove()` is not supported by `%s` category.' % cat_ident ) if category . parent_id != category_list . get_id ( ) : raise SitecatsSecurityException ( '`action_remove()` is unable to remove `%s`: ' 'not a child of parent `%s` category.' % ( cat_ident , category_list . alias ) ) min_num = category_list . editor . min_num def check_min_num ( num ) : if min_num is not None and num - 1 < min_num : subcats_str = ungettext_lazy ( 'subcategory' , 'subcategories' , min_num ) error_msg = _ ( 'Unable to remove "%(target_category)s" category from "%(parent_category)s": ' 'parent category requires at least %(num)s %(subcats_str)s.' ) % { 'target_category' : category . title , 'parent_category' : category_list . get_title ( ) , 'num' : min_num , 'subcats_str' : subcats_str } raise SitecatsValidationError ( error_msg ) child_ids = get_cache ( ) . get_child_ids ( category_list . alias ) check_min_num ( len ( child_ids ) ) if category_list . obj is None : category . delete ( ) else : check_min_num ( category_list . obj . get_ties_for_categories_qs ( child_ids ) . count ( ) ) category_list . obj . remove_from_category ( category ) return True | Handles remove action from CategoryList editor . |
6,018 | def action_add ( cls , request , category_list ) : if not category_list . editor . allow_add : raise SitecatsSecurityException ( '`action_add()` is not supported by `%s` category.' % category_list . alias ) titles = request . POST . get ( 'category_title' , '' ) . strip ( ) if not titles : raise SitecatsSecurityException ( 'Unsupported `category_title` value - `%s` - is passed to `action_add()`.' % titles ) if category_list . editor . category_separator is None : titles = [ titles ] else : titles = [ title . strip ( ) for title in titles . split ( category_list . editor . category_separator ) if title . strip ( ) ] def check_max_num ( num , max_num , category_title ) : if max_num is not None and num + 1 > max_num : subcats_str = ungettext_lazy ( 'subcategory' , 'subcategories' , max_num ) error_msg = _ ( 'Unable to add "%(target_category)s" category into "%(parent_category)s": ' 'parent category can have at most %(num)s %(subcats_str)s.' ) % { 'target_category' : category_title , 'parent_category' : category_list . get_title ( ) , 'num' : max_num , 'subcats_str' : subcats_str } raise SitecatsValidationError ( error_msg ) target_category = None for category_title in titles : exists = get_cache ( ) . find_category ( category_list . alias , category_title ) if exists and category_list . obj is None : return exists if not exists and not category_list . editor . allow_new : error_msg = _ ( 'Unable to create a new "%(new_category)s" category inside of "%(parent_category)s": ' 'parent category does not support this action.' ) % { 'new_category' : category_title , 'parent_category' : category_list . get_title ( ) } raise SitecatsNewCategoryException ( error_msg ) max_num = category_list . editor . max_num child_ids = get_cache ( ) . get_child_ids ( category_list . alias ) if not exists : if category_list . obj is None : check_max_num ( len ( child_ids ) , max_num , category_title ) target_category = get_category_model ( ) . add ( category_title , request . user , parent = category_list . get_category_model ( ) ) else : target_category = exists if category_list . obj is not None : check_max_num ( category_list . obj . get_ties_for_categories_qs ( child_ids ) . count ( ) , max_num , category_title ) category_list . obj . add_to_category ( target_category , request . user ) return target_category | Handles add action from CategoryList editor . |
6,019 | def shrink ( image , apikey ) : def _handle_response ( response ) : body = json . loads ( response . read ( ) ) if response . code == TinyPNGResponse . SUCCESS_CODE : body [ 'location' ] = response . headers . getheader ( "Location" ) try : body [ 'bytes' ] = urlopen ( body [ 'location' ] ) . read ( ) except : body [ 'bytes' ] = None return response . code , body auth = b64encode ( bytes ( "api:" + apikey ) ) . decode ( "ascii" ) request = Request ( TINYPNG_SHRINK_URL , image ) request . add_header ( "Authorization" , "Basic %s" % auth ) try : response = urlopen ( request ) ( code , response_dict ) = _handle_response ( response ) except HTTPError as e : ( code , response_dict ) = _handle_response ( e ) return TinyPNGResponse ( code , ** response_dict ) | To shrink a PNG image post the data to the API service . The response is a JSON message . The initial request must be authorized with HTTP Basic authorization . |
6,020 | def download_and_install_dependencies ( ) : try : import requests except ImportError : raise ValueError ( "Python 3.6+ is required." ) dependencies = { "hmm_databases" : HMM_URL } if sys . platform . startswith ( "linux" ) or "bsd" in sys . platform : dependencies [ "prodigal" ] = "{}.linux" . format ( BASE_PRODIGAL ) dependencies [ "louvain" ] = ( "https://lip6.github.io/Louvain-BinaryBuild/" "louvain_linux.tar.gz" ) elif sys . platform == "darwin" : dependencies [ "prodigal" ] = "{}.osx.10.9.5" . format ( BASE_PRODIGAL ) dependencies [ "louvain" ] = ( "https://github.com/lip6/Louvain-BinaryBuilds/raw/osx/" "louvain_osx.tar.gz" ) elif sys . platform . startswith ( "win" ) or sys . platform == "cygwin" : dependencies [ "prodigal" ] = "{}.windows.exe" dependencies [ "louvain" ] = ( "https://ci.appveyor.com/api/projects/yanntm/" "Louvain-BinaryBuild/artifacts/website/" "louvain_windows.tar.gz" ) else : raise NotImplementedError ( "Your platform is not supported: {}" . format ( sys . platform ) ) cache_dir = pathlib . Path . cwd ( ) / pathlib . Path ( "cache" ) try : print ( "Downloading dependencies..." ) cache_dir . mkdir ( ) for dependency_name , url in dependencies . items ( ) : print ( "Downloading {} at {}" . format ( dependency_name , url ) ) request = requests . get ( url ) basename = url . split ( "/" ) [ - 1 ] with open ( cache_dir / basename , "wb" ) as handle : print ( dependency_name , basename , cache_dir / basename ) handle . write ( request . content ) except FileExistsError : print ( "Using cached dependencies..." ) share_dir = pathlib . Path . cwd ( ) tools_dir = share_dir / "tools" louvain_dir = tools_dir / "louvain" louvain_dir . mkdir ( parents = True , exist_ok = True ) louvain_basename = dependencies [ "louvain" ] . split ( "/" ) [ - 1 ] louvain_path = louvain_dir / louvain_basename ( cache_dir / louvain_basename ) . replace ( louvain_path ) with tarfile . open ( louvain_path , "r:gz" ) as tar : tar . extractall ( ) hmm_basename = dependencies [ "hmm_databases" ] . split ( "/" ) [ - 1 ] hmm_path = share_dir / hmm_basename ( cache_dir / hmm_basename ) . replace ( hmm_path ) prodigal_basename = dependencies [ "prodigal" ] . split ( "/" ) [ - 1 ] prodigal_path = tools_dir / "prodigal" ( cache_dir / prodigal_basename ) . replace ( prodigal_path ) | Setup URLS and download dependencies for Python 3 . 6 + |
6,021 | def get ( self ) : ret_list = [ ] if hasattr ( self , "font" ) : ret_list . append ( self . font ) if hasattr ( self , "size" ) : ret_list . append ( self . size ) if hasattr ( self , "text" ) : ret_list . append ( self . text ) return ret_list | method to fetch all contents as a list |
6,022 | def extract_by_prefix_surfix ( text , prefix , surfix , minlen = None , maxlen = None , include = False ) : if minlen is None : minlen = 0 if maxlen is None : maxlen = 2 ** 30 pattern = r % ( prefix , minlen , maxlen , surfix ) if include : return [ prefix + s + surfix for s in re . findall ( pattern , text ) ] else : return re . findall ( pattern , text ) | Extract the text in between a prefix and surfix . It use non - greedy match . |
6,023 | def extract_number ( text ) : result = list ( ) chunk = list ( ) valid_char = set ( ".1234567890" ) for char in text : if char in valid_char : chunk . append ( char ) else : result . append ( "" . join ( chunk ) ) chunk = list ( ) result . append ( "" . join ( chunk ) ) result_new = list ( ) for number in result : if "." in number : try : result_new . append ( float ( number ) ) except : pass else : try : result_new . append ( int ( number ) ) except : pass return result_new | Extract digit character from text . |
6,024 | def extract_email ( text ) : result = list ( ) for tp in re . findall ( _regex_extract_email , text . lower ( ) ) : for email in tp : if re . match ( _regex_validate_email , email ) : result . append ( email ) return result | Extract email from text . |
6,025 | def sign ( self , headers : Mapping , method = None , path = None ) : required_headers = self . header_list message = generate_message ( required_headers , headers , method , path ) signature = encode_string ( self . _signer . sign ( message ) , 'base64' ) ret_headers = multidict . CIMultiDict ( headers ) ret_headers [ 'Authorization' ] = self . _signature_tpl % signature . decode ( 'ascii' ) return ret_headers | Add Signature Authorization header to case - insensitive header dict . |
6,026 | async def verify ( self , headers : Mapping , method = None , path = None ) : if not 'authorization' in headers : return False auth_type , auth_params = parse_authorization_header ( headers [ 'authorization' ] ) if auth_type . lower ( ) != 'signature' : return False for param in ( 'algorithm' , 'keyId' , 'signature' ) : if param not in auth_params : raise VerifierException ( "Unsupported HTTP signature, missing '{}'" . format ( param ) ) auth_headers = ( auth_params . get ( 'headers' ) or 'date' ) . lower ( ) . strip ( ) . split ( ) missing_reqd = set ( self . _required_headers ) - set ( auth_headers ) if missing_reqd : error_headers = ', ' . join ( missing_reqd ) raise VerifierException ( 'One or more required headers not provided: {}' . format ( error_headers ) ) key_id , algo = auth_params [ 'keyId' ] , auth_params [ 'algorithm' ] if not self . _handlers . supports ( algo ) : raise VerifierException ( "Unsupported HTTP signature algorithm '{}'" . format ( algo ) ) pubkey = await self . _key_finder . find_key ( key_id , algo ) if not pubkey : raise VerifierException ( "Cannot locate public key for '{}'" . format ( key_id ) ) LOGGER . debug ( "Got %s public key for '%s': %s" , algo , key_id , pubkey ) handler = self . _handlers . create_verifier ( algo , pubkey ) message = generate_message ( auth_headers , headers , method , path ) signature = auth_params [ 'signature' ] raw_signature = decode_string ( signature , 'base64' ) if handler . verify ( message , raw_signature ) : return { 'verified' : True , 'algorithm' : algo , 'headers' : auth_headers , 'keyId' : key_id , 'key' : pubkey , 'signature' : signature } raise VerifierException ( "Signature could not be verified for keyId '{}'" . format ( key_id ) ) | Parse Signature Authorization header and verify signature |
6,027 | def docpie ( self , argv = None ) : token = self . _prepare_token ( argv ) self . check_flag_and_handler ( token ) if token . error is not None : self . exception_handler ( token . error ) try : result , dashed = self . _match ( token ) except DocpieExit as e : self . exception_handler ( e ) value = result . get_value ( self . appeared_only , False ) self . clear ( ) self . update ( value ) if self . appeared_only : self . _drop_non_appeared ( ) logger . debug ( 'get all matched value %s' , self ) rest = list ( self . usages ) rest . remove ( result ) self . _add_rest_value ( rest ) logger . debug ( 'merged rest values, now %s' , self ) self . _add_option_value ( ) self . _dashes_value ( dashed ) return dict ( self ) | match the argv for each usages return dict . |
6,028 | def clone_exception ( error , args ) : new_error = error . __class__ ( * args ) new_error . __dict__ = error . __dict__ return new_error | return a new cloned error |
6,029 | def to_dict ( self ) : config = { 'stdopt' : self . stdopt , 'attachopt' : self . attachopt , 'attachvalue' : self . attachvalue , 'auto2dashes' : self . auto2dashes , 'case_sensitive' : self . case_sensitive , 'namedoptions' : self . namedoptions , 'appearedonly' : self . appeared_only , 'optionsfirst' : self . options_first , 'option_name' : self . option_name , 'usage_name' : self . usage_name , 'name' : self . name , 'help' : self . help , 'version' : self . version } text = { 'doc' : self . doc , 'usage_text' : self . usage_text , 'option_sections' : self . option_sections , } option = { } for title , options in self . options . items ( ) : option [ title ] = [ convert_2_dict ( x ) for x in options ] usage = [ convert_2_dict ( x ) for x in self . usages ] return { '__version__' : self . _version , '__class__' : 'Docpie' , '__config__' : config , '__text__' : text , 'option' : option , 'usage' : usage , 'option_names' : [ list ( x ) for x in self . opt_names ] , 'opt_names_required_max_args' : self . opt_names_required_max_args } | Convert Docpie into a JSONlizable dict . |
6,030 | def from_dict ( cls , dic ) : if '__version__' not in dic : raise ValueError ( 'Not support old docpie data' ) data_version = int ( dic [ '__version__' ] . replace ( '.' , '' ) ) this_version = int ( cls . _version . replace ( '.' , '' ) ) logger . debug ( 'this: %s, old: %s' , this_version , data_version ) if data_version < this_version : raise ValueError ( 'Not support old docpie data' ) assert dic [ '__class__' ] == 'Docpie' config = dic [ '__config__' ] help = config . pop ( 'help' ) version = config . pop ( 'version' ) option_name = config . pop ( 'option_name' ) usage_name = config . pop ( 'usage_name' ) self = cls ( None , ** config ) self . option_name = option_name self . usage_name = usage_name text = dic [ '__text__' ] self . doc = text [ 'doc' ] self . usage_text = text [ 'usage_text' ] self . option_sections = text [ 'option_sections' ] self . opt_names = [ set ( x ) for x in dic [ 'option_names' ] ] self . opt_names_required_max_args = dic [ 'opt_names_required_max_args' ] self . set_config ( help = help , version = version ) self . options = o = { } for title , options in dic [ 'option' ] . items ( ) : opt_ins = [ convert_2_object ( x , { } , self . namedoptions ) for x in options ] o [ title ] = opt_ins self . usages = [ convert_2_object ( x , self . options , self . namedoptions ) for x in dic [ 'usage' ] ] return self | Convert dict generated by convert_2_dict into Docpie instance |
6,031 | def set_config ( self , ** config ) : reinit = False if 'stdopt' in config : stdopt = config . pop ( 'stdopt' ) reinit = ( stdopt != self . stdopt ) self . stdopt = stdopt if 'attachopt' in config : attachopt = config . pop ( 'attachopt' ) reinit = reinit or ( attachopt != self . attachopt ) self . attachopt = attachopt if 'attachvalue' in config : attachvalue = config . pop ( 'attachvalue' ) reinit = reinit or ( attachvalue != self . attachvalue ) self . attachvalue = attachvalue if 'auto2dashes' in config : self . auto2dashes = config . pop ( 'auto2dashes' ) if 'name' in config : name = config . pop ( 'name' ) reinit = reinit or ( name != self . name ) self . name = name if 'help' in config : self . help = config . pop ( 'help' ) self . _set_or_remove_extra_handler ( self . help , ( '--help' , '-h' ) , self . help_handler ) if 'version' in config : self . version = config . pop ( 'version' ) self . _set_or_remove_extra_handler ( self . version is not None , ( '--version' , '-v' ) , self . version_handler ) if 'case_sensitive' in config : case_sensitive = config . pop ( 'case_sensitive' ) reinit = reinit or ( case_sensitive != self . case_sensitive ) self . case_sensitive = case_sensitive if 'optionsfirst' in config : self . options_first = config . pop ( 'optionsfirst' ) if 'appearedonly' in config : self . appeared_only = config . pop ( 'appearedonly' ) if 'namedoptions' in config : namedoptions = config . pop ( 'namedoptions' ) reinit = reinit or ( namedoptions != self . namedoptions ) self . namedoptions = namedoptions if 'extra' in config : self . extra . update ( self . _formal_extra ( config . pop ( 'extra' ) ) ) if config : raise ValueError ( '`%s` %s not accepted key argument%s' % ( '`, `' . join ( config ) , 'is' if len ( config ) == 1 else 'are' , '' if len ( config ) == 1 else 's' ) ) if self . doc is not None and reinit : logger . warning ( 'You changed the config that requires re-initialized' ' `Docpie` object. Create a new one instead' ) self . _init ( ) | Shadow all the current config . |
6,032 | def find_flag_alias ( self , flag ) : for each in self . opt_names : if flag in each : result = set ( each ) result . remove ( flag ) return result return None | Return alias set of a flag ; return None if flag is not defined in Options . |
6,033 | def set_auto_handler ( self , flag , handler ) : assert flag . startswith ( '-' ) and flag not in ( '-' , '--' ) alias = self . find_flag_alias ( flag ) or [ ] self . extra [ flag ] = handler for each in alias : self . extra [ each ] = handler | Set pre - auto - handler for a flag . |
6,034 | def preview ( self , stream = sys . stdout ) : write = stream . write write ( ( '[Quick preview of Docpie %s]' % self . _version ) . center ( 80 , '=' ) ) write ( '\n' ) write ( ' sections ' . center ( 80 , '-' ) ) write ( '\n' ) write ( self . usage_text ) write ( '\n' ) option_sections = self . option_sections if option_sections : write ( '\n' ) write ( '\n' . join ( option_sections . values ( ) ) ) write ( '\n' ) write ( ' str ' . center ( 80 , '-' ) ) write ( '\n[%s]\n' % self . usage_name ) for each in self . usages : write ( ' %s\n' % each ) write ( '\n[Options:]\n\n' ) for title , sections in self . options . items ( ) : if title : full_title = '%s %s' % ( title , self . option_name ) else : full_title = self . option_name write ( full_title ) write ( '\n' ) for each in sections : write ( ' %s\n' % each ) write ( '\n' ) write ( ' repr ' . center ( 80 , '-' ) ) write ( '\n[%s]\n' % self . usage_name ) for each in self . usages : write ( ' %r\n' % each ) write ( '\n[Options:]\n\n' ) for title , sections in self . options . items ( ) : if title : full_title = '%s %s' % ( title , self . option_name ) else : full_title = self . option_name write ( full_title ) write ( '\n' ) for each in sections : write ( ' %r\n' % each ) write ( '\n' ) write ( ' auto handlers ' . center ( 80 , '-' ) ) write ( '\n' ) for key , value in self . extra . items ( ) : write ( '%s %s\n' % ( key , value ) ) | A quick preview of docpie . Print all the parsed object |
6,035 | def refresh_core ( self ) : self . log . info ( 'Sending out mass query for all attributes' ) for key in ATTR_CORE : self . query ( key ) | Query device for all attributes that exist regardless of power state . |
6,036 | def poweron_refresh ( self ) : if self . _poweron_refresh_successful : return else : self . refresh_all ( ) self . _loop . call_later ( 2 , self . poweron_refresh ) | Keep requesting all attributes until it works . |
6,037 | def refresh_all ( self ) : self . log . info ( 'refresh_all' ) for key in LOOKUP : self . query ( key ) | Query device for all attributes that are known . |
6,038 | def connection_made ( self , transport ) : self . log . info ( 'Connection established to AVR' ) self . transport = transport limit_low , limit_high = self . transport . get_write_buffer_limits ( ) self . log . debug ( 'Write buffer limits %d to %d' , limit_low , limit_high ) self . command ( 'ECH1' ) self . refresh_core ( ) | Called when asyncio . Protocol establishes the network connection . |
6,039 | def data_received ( self , data ) : self . buffer += data . decode ( ) self . log . debug ( 'Received %d bytes from AVR: %s' , len ( self . buffer ) , self . buffer ) self . _assemble_buffer ( ) | Called when asyncio . Protocol detects received data from network . |
6,040 | def connection_lost ( self , exc ) : if exc is None : self . log . warning ( 'eof from receiver?' ) else : self . log . warning ( 'Lost connection to receiver: %s' , exc ) self . transport = None if self . _connection_lost_callback : self . _loop . call_soon ( self . _connection_lost_callback ) | Called when asyncio . Protocol loses the network connection . |
6,041 | def _assemble_buffer ( self ) : self . transport . pause_reading ( ) for message in self . buffer . split ( ';' ) : if message != '' : self . log . debug ( 'assembled message ' + message ) self . _parse_message ( message ) self . buffer = "" self . transport . resume_reading ( ) return | Split up received data from device into individual commands . |
6,042 | def _populate_inputs ( self , total ) : total = total + 1 for input_number in range ( 1 , total ) : self . query ( 'ISN' + str ( input_number ) . zfill ( 2 ) ) | Request the names for all active configured inputs on the device . |
6,043 | def formatted_command ( self , command ) : command = command command = command . encode ( ) self . log . debug ( '> %s' , command ) try : self . transport . write ( command ) time . sleep ( 0.01 ) except : self . log . warning ( 'No transport found, unable to send command' ) | Issue a raw formatted command to the device . |
6,044 | def dump_rawdata ( self ) : if hasattr ( self , 'transport' ) : attrs = vars ( self . transport ) return ', ' . join ( "%s: %s" % item for item in attrs . items ( ) ) | Return contents of transport object for debugging forensics . |
6,045 | def add_upsert ( self , value , criteria ) : value = value . strip ( ) v = value . lower ( ) self . lower_val_to_val [ v ] = value criteria_array = self . upserts . get ( v ) if criteria_array is None : criteria_array = [ ] self . upserts_size [ v ] = 31 + len ( value ) criteria_array . append ( criteria . to_dict ( ) ) self . upserts [ v ] = criteria_array self . upserts_size [ v ] += criteria . json_size ( ) | Add a tag or populator to the batch by value and criteria |
6,046 | def add_delete ( self , value ) : value = value . strip ( ) v = value . lower ( ) self . lower_val_to_val [ v ] = value if len ( v ) == 0 : raise ValueError ( "Invalid value for delete. Value is empty." ) self . deletes . add ( v ) | Delete a tag or populator by value - these are processed before upserts |
6,047 | def parts ( self ) : parts = [ ] upserts = dict ( ) deletes = [ ] max_upload_size = 700000 base_part_size = 118 if not self . replace_all : base_part_size += 1 part_size = base_part_size for value in self . upserts : if ( part_size + self . upserts_size [ value ] ) >= max_upload_size : parts . append ( BatchPart ( self . replace_all , upserts , deletes ) ) upserts = dict ( ) deletes = [ ] part_size = base_part_size upserts [ self . lower_val_to_val [ value ] ] = self . upserts [ value ] part_size += self . upserts_size [ value ] for value in self . deletes : if ( part_size + len ( value ) + 4 ) >= max_upload_size : parts . append ( BatchPart ( self . replace_all , upserts , deletes ) ) upserts = dict ( ) deletes = [ ] part_size = base_part_size deletes . append ( { 'value' : self . lower_val_to_val [ value ] } ) part_size += len ( value ) + 4 if len ( upserts ) + len ( deletes ) > 0 : parts . append ( BatchPart ( self . replace_all , upserts , deletes ) ) if len ( parts ) == 0 : if not self . replace_all : raise ValueError ( "Batch has no data, and 'replace_all' is False" ) parts . append ( BatchPart ( self . replace_all , dict ( ) , [ ] ) ) parts [ - 1 ] . set_last_part ( ) return parts | Return an array of batch parts to submit |
6,048 | def build_json ( self , guid ) : upserts = [ ] for value in self . upserts : upserts . append ( { "value" : value , "criteria" : self . upserts [ value ] } ) return json . dumps ( { 'replace_all' : self . replace_all , 'guid' : guid , 'complete' : self . complete , 'upserts' : upserts , 'deletes' : self . deletes } ) | Build JSON with the input guid |
6,049 | def _ensure_field ( self , key ) : if self . _has_field : self . _size += 2 self . _has_field = True self . _size += len ( key ) + 4 | Ensure a non - array field |
6,050 | def _ensure_array ( self , key , value ) : if key not in self . _json_dict : self . _json_dict [ key ] = [ ] self . _size += 2 self . _ensure_field ( key ) if len ( self . _json_dict [ key ] ) > 0 : self . _size += 2 if isinstance ( value , str ) : self . _size += 2 self . _size += len ( str ( value ) ) self . _json_dict [ key ] . append ( value ) | Ensure an array field |
6,051 | def add_tcp_flag ( self , tcp_flag ) : if tcp_flag not in [ 1 , 2 , 4 , 8 , 16 , 32 , 64 , 128 ] : raise ValueError ( "Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]" ) prev_size = 0 if self . _json_dict . get ( 'tcp_flags' ) is None : self . _json_dict [ 'tcp_flags' ] = 0 else : prev_size = len ( str ( self . _json_dict [ 'tcp_flags' ] ) ) + len ( 'tcp_flags' ) + 3 self . _json_dict [ 'tcp_flags' ] |= tcp_flag new_size = len ( str ( self . _json_dict [ 'tcp_flags' ] ) ) + len ( 'tcp_flags' ) + 3 self . _size += new_size - prev_size if prev_size == 0 and self . _has_field : self . _size += 2 self . _has_field = True | Add a single TCP flag - will be OR d into the existing bitmask |
6,052 | def set_tcp_flags ( self , tcp_flags ) : if tcp_flags < 0 or tcp_flags > 255 : raise ValueError ( "Invalid tcp_flags. Valid: 0-255." ) prev_size = 0 if self . _json_dict . get ( 'tcp_flags' ) is not None : prev_size = len ( str ( self . _json_dict [ 'tcp_flags' ] ) ) + len ( 'tcp_flags' ) + 3 self . _json_dict [ 'tcp_flags' ] = tcp_flags new_size = len ( str ( self . _json_dict [ 'tcp_flags' ] ) ) + len ( 'tcp_flags' ) + 3 self . _size += new_size - prev_size if prev_size == 0 and self . _has_field : self . _size += 2 self . _has_field = True | Set the complete tcp flag bitmask |
6,053 | def _submit_batch ( self , url , batch ) : batch_parts = batch . parts ( ) guid = "" headers = { 'User-Agent' : 'kentik-python-api/0.1' , 'Content-Type' : 'application/json' , 'X-CH-Auth-Email' : self . api_email , 'X-CH-Auth-API-Token' : self . api_token } last_part = dict ( ) for batch_part in batch_parts : resp = requests . post ( url , headers = headers , data = batch_part . build_json ( guid ) ) print ( resp . text ) resp . raise_for_status ( ) last_part = resp . json ( ) guid = last_part [ 'guid' ] if guid is None or len ( guid ) == 0 : raise RuntimeError ( 'guid not found in batch response' ) return last_part | Submit the batch returning the JSON - > dict from the last HTTP response |
6,054 | def submit_populator_batch ( self , column_name , batch ) : if not set ( column_name ) . issubset ( _allowedCustomDimensionChars ) : raise ValueError ( 'Invalid custom dimension name "%s": must only contain letters, digits, and underscores' % column_name ) if len ( column_name ) < 3 or len ( column_name ) > 20 : raise ValueError ( 'Invalid value "%s": must be between 3-20 characters' % column_name ) url = '%s/api/v5/batch/customdimensions/%s/populators' % ( self . base_url , column_name ) resp_json_dict = self . _submit_batch ( url , batch ) if resp_json_dict . get ( 'error' ) is not None : raise RuntimeError ( 'Error received from server: %s' % resp_json_dict [ 'error' ] ) return resp_json_dict [ 'guid' ] | Submit a populator batch |
6,055 | def submit_tag_batch ( self , batch ) : url = '%s/api/v5/batch/tags' % self . base_url self . _submit_batch ( url , batch ) | Submit a tag batch |
6,056 | def fetch_batch_status ( self , guid ) : url = '%s/api/v5/batch/%s/status' % ( self . base_url , guid ) headers = { 'User-Agent' : 'kentik-python-api/0.1' , 'Content-Type' : 'application/json' , 'X-CH-Auth-Email' : self . api_email , 'X-CH-Auth-API-Token' : self . api_token } resp = requests . get ( url , headers = headers ) resp . raise_for_status ( ) return BatchResponse ( guid , resp . json ( ) ) | Fetch the status of a batch given the guid |
6,057 | def predict_files ( self , files ) : imgs = [ 0 ] * len ( files ) for i , file in enumerate ( files ) : img = cv2 . imread ( file ) . astype ( 'float64' ) img = cv2 . resize ( img , ( 224 , 224 ) ) img = preprocess_input ( img ) if img is None : print ( 'failed to open: {}, continuing...' . format ( file ) ) imgs [ i ] = img return self . model . predict ( np . array ( imgs ) ) | reads files off disk resizes them and then predicts them files should be a list or itrerable of file paths that lead to images they are then loaded with opencv resized and predicted |
6,058 | def rename_genome ( genome_in , genome_out = None ) : if genome_out is None : genome_out = "{}_renamed.fa" . format ( genome_in . split ( "." ) [ 0 ] ) with open ( genome_out , "w" ) as output_handle : for record in SeqIO . parse ( genome_in , "fasta" ) : new_record_id = record . id . replace ( " " , "_" ) new_record_id = new_record_id . replace ( "-" , "_" ) new_record_id = new_record_id . replace ( "\t" , "_" ) new_record_id = re . sub ( "[^_A-Za-z0-9]+" , "" , new_record_id ) header = ">{}\n" . format ( new_record_id ) output_handle . write ( header ) output_handle . write ( "{}\n" . format ( str ( record . seq ) ) ) | Rename genome and slugify headers |
6,059 | def filter_genome ( genome_in , threshold = 500 , list_records = None ) : if list_records is None : def truth ( * args ) : del args return True is_a_record_to_keep = truth else : try : with open ( list_records ) as records_handle : records_to_keep = records_handle . readlines ( ) except OSError : if not hasattr ( list_records , "__contains__" ) : raise else : records_to_keep = list_records is_a_record_to_keep = records_to_keep . __contains__ records_to_write = ( record for record in SeqIO . parse ( genome_in , "fasta" ) if ( len ( record . seq ) >= threshold and is_a_record_to_keep ( record . id ) ) ) return records_to_write | Filter fasta file according to various parameters . |
6,060 | def rename_proteins ( prot_in , prot_out = None , chunk_size = DEFAULT_CHUNK_SIZE ) : if prot_out is None : prot_out = "{}_renamed.fa" . format ( prot_in . split ( "." ) [ 0 ] ) with open ( prot_out , "w" ) as prot_out_handle : for record in SeqIO . parse ( prot_in , "fasta" ) : header = record . description name , pos_start , _ , _ , _ = header . split ( "#" ) chunk_start = int ( pos_start ) // chunk_size name_split = name . split ( "_" ) contig_name = "_" . join ( name_split [ : - 1 ] ) gene_id = name_split [ - 1 ] new_record_id = "{}_{}__gene{}" . format ( contig_name , chunk_start , gene_id ) prot_out_handle . write ( ">{}\n" . format ( new_record_id ) ) prot_out_handle . write ( "{}\n" . format ( str ( record . seq ) ) ) | Rename prodigal output files |
6,061 | def write_records ( records , output_file , split = False ) : if split : for record in records : with open ( "{}{}.fa" . format ( output_file , record . id ) , "w" ) as record_handle : SeqIO . write ( record , record_handle , "fasta" ) else : SeqIO . write ( records , output_file , "fasta" ) | Write FASTA records |
6,062 | def add_sample ( self , ** data ) : missing_dimensions = set ( data ) . difference ( self . dimensions ) if missing_dimensions : raise KeyError ( 'Dimensions not defined in this series: %s' % ', ' . join ( missing_dimensions ) ) for dim in self . dimensions : getattr ( self , dim ) . append ( data . get ( dim ) ) | Add a sample to this series . |
6,063 | def samples ( self ) : names = self . series . dimensions for values in zip ( * ( getattr ( self . series , name ) for name in names ) ) : yield dict ( zip ( names , values ) ) | Yield the samples as dicts keyed by dimensions . |
6,064 | def write_binary ( filename , data ) : dir = os . path . dirname ( filename ) if not os . path . exists ( dir ) : os . makedirs ( dir ) with open ( filename , 'wb' ) as f : f . write ( data ) | Create path to filename and saves binary data |
6,065 | def files_with_exts ( root = '.' , suffix = '' ) : return ( os . path . join ( rootdir , filename ) for rootdir , dirnames , filenames in os . walk ( root ) for filename in filenames if filename . endswith ( suffix ) ) | Returns generator that contains filenames from root directory and ends with suffix |
6,066 | def find_apikey ( ) : env_keys = [ 'TINYPNG_APIKEY' , 'TINYPNG_API_KEY' ] paths = [ ] paths . append ( os . path . join ( os . path . abspath ( "." ) , "tinypng.key" ) ) paths . append ( os . path . expanduser ( "~/.tinypng.key" ) ) for env_key in env_keys : if os . environ . get ( env_key ) : return os . environ . get ( env_key ) for path in paths : if os . path . exists ( path ) : return open ( path , 'rt' ) . read ( ) . strip ( ) return None | Finds TinyPNG API key |
6,067 | def compare_packages ( rpm_str_a , rpm_str_b , arch_provided = True ) : logger . debug ( 'resolve_versions(%s, %s)' , rpm_str_a , rpm_str_b ) evr_a = parse_package ( rpm_str_a , arch_provided ) [ 'EVR' ] evr_b = parse_package ( rpm_str_b , arch_provided ) [ 'EVR' ] return labelCompare ( evr_a , evr_b ) | Compare two RPM strings to determine which is newer |
6,068 | def compare_evrs ( evr_a , evr_b ) : a_epoch , a_ver , a_rel = evr_a b_epoch , b_ver , b_rel = evr_b if a_epoch != b_epoch : return a_newer if a_epoch > b_epoch else b_newer ver_comp = compare_versions ( a_ver , b_ver ) if ver_comp != a_eq_b : return ver_comp rel_comp = compare_versions ( a_rel , b_rel ) return rel_comp | Compare two EVR tuples to determine which is newer |
6,069 | def compare_versions ( version_a , version_b ) : logger . debug ( 'compare_versions(%s, %s)' , version_a , version_b ) if version_a == version_b : return a_eq_b try : chars_a , chars_b = list ( version_a ) , list ( version_b ) except TypeError : raise RpmError ( 'Could not compare {0} to ' '{1}' . format ( version_a , version_b ) ) while len ( chars_a ) != 0 and len ( chars_b ) != 0 : logger . debug ( 'starting loop comparing %s ' 'to %s' , chars_a , chars_b ) _check_leading ( chars_a , chars_b ) if chars_a [ 0 ] == '~' and chars_b [ 0 ] == '~' : map ( lambda x : x . pop ( 0 ) , ( chars_a , chars_b ) ) elif chars_a [ 0 ] == '~' : return b_newer elif chars_b [ 0 ] == '~' : return a_newer if len ( chars_a ) == 0 or len ( chars_b ) == 0 : break block_res = _get_block_result ( chars_a , chars_b ) if block_res != a_eq_b : return block_res if len ( chars_a ) == len ( chars_b ) : logger . debug ( 'versions are equal' ) return a_eq_b else : logger . debug ( 'versions not equal' ) return a_newer if len ( chars_a ) > len ( chars_b ) else b_newer | Compare two RPM version strings |
6,070 | def package ( package_string , arch_included = True ) : logger . debug ( 'package(%s, %s)' , package_string , arch_included ) pkg_info = parse_package ( package_string , arch_included ) pkg = Package ( pkg_info [ 'name' ] , pkg_info [ 'EVR' ] [ 0 ] , pkg_info [ 'EVR' ] [ 1 ] , pkg_info [ 'EVR' ] [ 2 ] , pkg_info [ 'arch' ] , package_str = package_string ) return pkg | Parse an RPM version string |
6,071 | def parse_package ( package_string , arch_included = True ) : logger . debug ( 'parse_package(%s, %s)' , package_string , arch_included ) default_epoch = '0' arch = None if arch_included : char_list = list ( package_string ) arch = _pop_arch ( char_list ) package_string = '' . join ( char_list ) logger . debug ( 'updated version_string: %s' , package_string ) try : name , epoch , version , release = _rpm_re . match ( package_string ) . groups ( ) except AttributeError : raise RpmError ( 'Could not parse package string: %s' % package_string ) if epoch == '' or epoch is None : epoch = default_epoch info = { 'name' : name , 'EVR' : ( epoch , version , release ) , 'arch' : arch } logger . debug ( 'parsed information: %s' , info ) return info | Parse an RPM version string to get name version and arch |
6,072 | def _pop_arch ( char_list ) : logger . debug ( '_pop_arch(%s)' , char_list ) arch_list = [ ] char = char_list . pop ( ) while char != '.' : arch_list . insert ( 0 , char ) try : char = char_list . pop ( ) except IndexError : raise RpmError ( 'Could not parse an architecture. Did you mean to ' 'set the arch_included flag to False?' ) logger . debug ( 'arch chars: %s' , arch_list ) return '' . join ( arch_list ) | Pop the architecture from a version string and return it |
6,073 | def _check_leading ( * char_lists ) : logger . debug ( '_check_leading(%s)' , char_lists ) for char_list in char_lists : while ( len ( char_list ) != 0 and not char_list [ 0 ] . isalnum ( ) and not char_list [ 0 ] == '~' ) : char_list . pop ( 0 ) logger . debug ( 'updated list: %s' , char_list ) | Remove any non - alphanumeric or non - ~ leading characters |
6,074 | def _trim_zeros ( * char_lists ) : logger . debug ( '_trim_zeros(%s)' , char_lists ) for char_list in char_lists : while len ( char_list ) != 0 and char_list [ 0 ] == '0' : char_list . pop ( 0 ) logger . debug ( 'updated block: %s' , char_list ) | Trim any zeros from provided character lists |
6,075 | def _pop_digits ( char_list ) : logger . debug ( '_pop_digits(%s)' , char_list ) digits = [ ] while len ( char_list ) != 0 and char_list [ 0 ] . isdigit ( ) : digits . append ( char_list . pop ( 0 ) ) logger . debug ( 'got digits: %s' , digits ) logger . debug ( 'updated char list: %s' , char_list ) return digits | Pop consecutive digits from the front of list and return them |
6,076 | def _pop_letters ( char_list ) : logger . debug ( '_pop_letters(%s)' , char_list ) letters = [ ] while len ( char_list ) != 0 and char_list [ 0 ] . isalpha ( ) : letters . append ( char_list . pop ( 0 ) ) logger . debug ( 'got letters: %s' , letters ) logger . debug ( 'updated char list: %s' , char_list ) return letters | Pop consecutive letters from the front of a list and return them |
6,077 | def _compare_blocks ( block_a , block_b ) : logger . debug ( '_compare_blocks(%s, %s)' , block_a , block_b ) if block_a [ 0 ] . isdigit ( ) : _trim_zeros ( block_a , block_b ) if len ( block_a ) != len ( block_b ) : logger . debug ( 'block lengths are not equal' ) return a_newer if len ( block_a ) > len ( block_b ) else b_newer if block_a == block_b : logger . debug ( 'blocks are equal' ) return a_eq_b else : logger . debug ( 'blocks are not equal' ) return a_newer if block_a > block_b else b_newer | Compare two blocks of characters |
6,078 | def _get_block_result ( chars_a , chars_b ) : logger . debug ( '_get_block_result(%s, %s)' , chars_a , chars_b ) first_is_digit = chars_a [ 0 ] . isdigit ( ) pop_func = _pop_digits if first_is_digit else _pop_letters return_if_no_b = a_newer if first_is_digit else b_newer block_a , block_b = pop_func ( chars_a ) , pop_func ( chars_b ) if len ( block_b ) == 0 : logger . debug ( 'blocks are equal' ) return return_if_no_b return _compare_blocks ( block_a , block_b ) | Get the first block from two character lists and compare |
6,079 | def list_ ( * , cursor : str = None , exclude_archived : bool = None , exclude_members : bool = None , limit : int = None ) -> snug . Query [ Page [ t . List [ Channel ] ] ] : kwargs = { 'exclude_archived' : exclude_archived , 'exclude_members' : exclude_members , 'limit' : limit } response = yield { 'cursor' : cursor , ** kwargs } try : next_cursor = response [ 'response_metadata' ] [ 'next_cursor' ] except KeyError : next_query = None else : next_query = list_ ( ** kwargs , cursor = next_cursor ) return Page ( load_channel_list ( response [ 'channels' ] ) , next_query = next_query , ) | list all channels |
6,080 | def create ( name : str , * , validate : bool = None ) -> snug . Query [ Channel ] : return { 'name' : name , 'validate' : validate } | create a new channel |
6,081 | def tube ( self , name ) : if name in self . _tubes : return self . _tubes [ name ] assert name , 'Tube name must be specified' t = self . _tube_cls ( self , name ) self . _tubes [ name ] = t return t | Returns tube by its name |
6,082 | def device_measurement ( device , ts = None , part = None , result = None , code = None , ** kwargs ) : if ts is None : ts = local_now ( ) payload = MeasurementPayload ( device = device , part = part ) m = Measurement ( ts , result , code , list ( kwargs ) ) payload . measurements . append ( m ) m . add_sample ( ts , ** kwargs ) return dumps ( payload ) | Returns a JSON MeasurementPayload ready to be send through a transport . |
6,083 | def add_sample ( self , ts , ** kwargs ) : if not self . series . offsets : self . ts = ts offset = 0 else : dt = ts - self . ts offset = ( dt . days * 24 * 60 * 60 * 1000 + dt . seconds * 1000 + dt . microseconds // 1000 ) self . series . add_sample ( offset , ** kwargs ) | Add a sample to this measurements . |
6,084 | def samples ( self ) : names = self . series . dimensions for n , offset in enumerate ( self . series . offsets ) : dt = datetime . timedelta ( microseconds = offset * 1000 ) d = { "ts" : self . ts + dt } for name in names : d [ name ] = getattr ( self . series , name ) [ n ] yield d | Yield samples as dictionaries keyed by dimensions . |
6,085 | def determine_format ( data , extension = None ) : if isinstance ( data , ( os . PathLike , str ) ) : data = open ( data , 'rb' ) data_reader = DataReader ( data ) data_reader . seek ( 0 , os . SEEK_SET ) d = data_reader . read ( 4 ) if d . startswith ( ( b'ID3' , b'\xFF\xFB' ) ) : if extension is None or extension . endswith ( '.mp3' ) : return MP3 if d . startswith ( ( b'fLaC' , b'ID3' ) ) : if extension is None or extension . endswith ( '.flac' ) : return FLAC if d . startswith ( b'RIFF' ) : if extension is None or extension . endswith ( '.wav' ) : return WAV return None | Determine the format of an audio file . |
6,086 | def load ( f ) : if isinstance ( f , ( os . PathLike , str ) ) : fileobj = open ( f , 'rb' ) else : try : f . read ( 0 ) except AttributeError : raise ValueError ( "Not a valid file-like object." ) except Exception : raise ValueError ( "Can't read from file-like object." ) fileobj = f parser_cls = determine_format ( fileobj , os . path . splitext ( fileobj . name ) [ 1 ] ) if parser_cls is None : raise UnsupportedFormat ( "Supported format signature not found." ) else : fileobj . seek ( 0 , os . SEEK_SET ) return parser_cls . load ( fileobj ) | Load audio metadata from filepath or file - like object . |
6,087 | def loads ( b ) : parser_cls = determine_format ( b ) if parser_cls is None : raise UnsupportedFormat ( "Supported format signature not found." ) return parser_cls . load ( b ) | Load audio metadata from a bytes - like object . |
6,088 | def Find ( self , node_type , item_type ) : if node_type == OtherNodes . DirectionNode : child = self . GetChild ( len ( self . children ) - 1 ) while child is not None and not isinstance ( child . GetItem ( ) , item_type ) : if child . GetItem ( ) . __class__ . __name__ == item_type . __name__ : return True child = child . GetChild ( 0 ) if node_type == OtherNodes . ExpressionNode : child = self . GetChild ( len ( self . children ) - 2 ) while child is not None and not isinstance ( child . GetItem ( ) , item_type ) : if child . GetItem ( ) . __class__ . __name__ == item_type . __name__ : return True child = child . GetChild ( 0 ) | method for finding specific types of notation from nodes . will currently return the first one it encounters because this method s only really intended for some types of notation for which the exact value doesn t really matter . |
6,089 | def count_lines ( abspath ) : with open ( abspath , "rb" ) as f : i = 0 for line in f : i += 1 pass return i | Count how many lines in a pure text file . |
6,090 | def lines_stats ( dir_path , file_filter ) : n_files = 0 n_lines = 0 for p in Path ( dir_path ) . select_file ( file_filter ) : n_files += 1 n_lines += count_lines ( p . abspath ) return n_files , n_lines | Lines count of selected files under a directory . |
6,091 | def parse_content ( self , text ) : match = re . search ( self . usage_re_str . format ( self . usage_name ) , text , flags = ( re . DOTALL if self . case_sensitive else ( re . DOTALL | re . IGNORECASE ) ) ) if match is None : return dic = match . groupdict ( ) logger . debug ( dic ) self . raw_content = dic [ 'raw' ] if dic [ 'sep' ] in ( '\n' , '\r\n' ) : self . formal_content = dic [ 'section' ] return reallen = len ( dic [ 'name' ] ) replace = '' . ljust ( reallen ) drop_name = match . expand ( '%s\\g<sep>\\g<section>' % replace ) self . formal_content = self . drop_started_empty_lines ( drop_name ) . rstrip ( ) | get Usage section and set to raw_content formal_content of no title and empty - line version |
6,092 | def spaceless_pdf_plot_maker ( array , filename , vmax = None , dpi = DEFAULT_DPI ) : if vmax is None : vmax = np . percentile ( array , DEFAULT_SATURATION_THRESHOLD ) plt . gca ( ) . set_axis_off ( ) plt . subplots_adjust ( top = 1 , bottom = 0 , right = 1 , left = 0 , hspace = 0 , wspace = 0 ) plt . margins ( 0 , 0 ) plt . gca ( ) . xaxis . set_major_locator ( plt . NullLocator ( ) ) plt . gca ( ) . yaxis . set_major_locator ( plt . NullLocator ( ) ) plt . figure ( ) if SEABORN : sns . heatmap ( array , vmax = vmax , cmap = "Reds" ) else : plt . imshow ( array , vmax = vmax , cmap = "Reds" , interpolation = "none" ) plt . colorbar ( ) plt . savefig ( filename , bbox_inches = "tight" , pad_inches = 0.0 , dpi = dpi ) plt . close ( ) | Draw a pretty plot from an array |
6,093 | def draw_sparse_matrix ( array_filename , output_image , vmax = DEFAULT_SATURATION_THRESHOLD , max_size_matrix = DEFAULT_MAX_SIZE_MATRIX , ) : matrix = np . loadtxt ( array_filename , dtype = np . int32 , skiprows = 1 ) try : row , col , data = matrix . T except ValueError : row , col , data = matrix size = max ( np . amax ( row ) , np . amax ( col ) ) + 1 S = sparse . coo_matrix ( ( data , ( row , col ) ) , shape = ( size , size ) ) if max_size_matrix <= 0 : binning = 1 else : binning = ( size // max_size_matrix ) + 1 binned_S = hcs . bin_sparse ( S , subsampling_factor = binning ) dense_S = binned_S . todense ( ) dense_S = dense_S + dense_S . T - np . diag ( np . diag ( dense_S ) ) normed_S = hcs . normalize_dense ( dense_S ) spaceless_pdf_plot_maker ( normed_S , output_image , vmax = vmax ) | Draw a quick preview of a sparse matrix with automated binning and normalization . |
6,094 | def nth ( iterable , n , default = None ) : return next ( itertools . islice ( iterable , n , None ) , default ) | Returns the nth item or a default value . |
6,095 | def pull ( iterable , n ) : fifo = collections . deque ( maxlen = n ) for i in iterable : fifo . append ( i ) return list ( fifo ) | Return last n items of the iterable as a list . |
6,096 | def running_window ( iterable , size ) : if size > len ( iterable ) : raise ValueError ( "size can not be greater than length of iterable." ) fifo = collections . deque ( maxlen = size ) for i in iterable : fifo . append ( i ) if len ( fifo ) == size : yield list ( fifo ) | Generate n - size running window . |
6,097 | def cycle_running_window ( iterable , size ) : if size > len ( iterable ) : raise ValueError ( "size can not be greater than length of iterable." ) fifo = collections . deque ( maxlen = size ) cycle = itertools . cycle ( iterable ) counter = itertools . count ( 1 ) length = len ( iterable ) for i in cycle : fifo . append ( i ) if len ( fifo ) == size : yield list ( fifo ) if next ( counter ) == length : break | Generate n - size cycle running window . |
6,098 | def shift_and_trim ( array , dist ) : length = len ( array ) if length == 0 : return [ ] if ( dist >= length ) or ( dist <= - length ) : return [ ] elif dist < 0 : return array [ - dist : ] elif dist > 0 : return array [ : - dist ] else : return list ( array ) | Shift and trim unneeded item . |
6,099 | def shift_and_pad ( array , dist , pad = "__null__" ) : length = len ( array ) if length == 0 : return [ ] if pad == "__null__" : if dist > 0 : padding_item = array [ 0 ] elif dist < 0 : padding_item = array [ - 1 ] else : padding_item = None else : padding_item = pad if abs ( dist ) >= length : return length * [ padding_item , ] elif dist == 0 : return list ( array ) elif dist > 0 : return [ padding_item , ] * dist + array [ : - dist ] elif dist < 0 : return array [ - dist : ] + [ padding_item , ] * - dist else : raise Exception | Shift and pad with item . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.