idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
7,500
async def wait ( self ) -> None : if self . triggered_token is not None : return futures = [ asyncio . ensure_future ( self . _triggered . wait ( ) , loop = self . loop ) ] for token in self . _chain : futures . append ( asyncio . ensure_future ( token . wait ( ) , loop = self . loop ) ) def cancel_not_done ( fut : 'asyncio.Future[None]' ) -> None : for future in futures : if not future . done ( ) : future . cancel ( ) async def _wait_for_first ( futures : Sequence [ Awaitable [ Any ] ] ) -> None : for future in asyncio . as_completed ( futures ) : await cast ( Awaitable [ Any ] , future ) return fut = asyncio . ensure_future ( _wait_for_first ( futures ) , loop = self . loop ) fut . add_done_callback ( cancel_not_done ) await fut
Coroutine which returns when this token has been triggered
7,501
async def cancellable_wait ( self , * awaitables : Awaitable [ _R ] , timeout : float = None ) -> _R : futures = [ asyncio . ensure_future ( a , loop = self . loop ) for a in awaitables + ( self . wait ( ) , ) ] try : done , pending = await asyncio . wait ( futures , timeout = timeout , return_when = asyncio . FIRST_COMPLETED , loop = self . loop , ) except asyncio . futures . CancelledError : for future in futures : future . cancel ( ) raise for task in pending : task . cancel ( ) if not done : raise TimeoutError ( ) if self . triggered_token is not None : for task in done : task . exception ( ) raise OperationCancelled ( "Cancellation requested by {} token" . format ( self . triggered_token ) ) return done . pop ( ) . result ( )
Wait for the first awaitable to complete unless we timeout or the token is triggered .
7,502
def parent ( self ) -> Optional [ 'CtsReference' ] : if self . start . depth == 1 and ( self . end is None or self . end . depth <= 1 ) : return None else : if self . start . depth > 1 and ( self . end is None or self . end . depth == 0 ) : return CtsReference ( "{0}{1}" . format ( "." . join ( self . start . list [ : - 1 ] ) , self . start . subreference or "" ) ) elif self . start . depth > 1 and self . end is not None and self . end . depth > 1 : _start = self . start . list [ 0 : - 1 ] _end = self . end . list [ 0 : - 1 ] if _start == _end and self . start . subreference is None and self . end . subreference is None : return CtsReference ( "." . join ( _start ) ) else : return CtsReference ( "{0}{1}-{2}{3}" . format ( "." . join ( _start ) , self . start . subreference or "" , "." . join ( _end ) , self . end . subreference or "" ) )
Parent of the actual URN for example 1 . 1 for 1 . 1 . 1
7,503
def highest ( self ) -> CtsSinglePassageId : if not self . end : return self . start elif len ( self . start ) < len ( self . end ) and len ( self . start ) : return self . start elif len ( self . start ) > len ( self . end ) and len ( self . end ) : return self . end elif len ( self . start ) : return self . start
Return highest reference level
7,504
def upTo ( self , key ) : middle = [ component for component in [ self . __parsed [ "textgroup" ] , self . __parsed [ "work" ] , self . __parsed [ "version" ] ] if component is not None ] if key == URN . COMPLETE : return self . __str__ ( ) elif key == URN . NAMESPACE : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] ] ) elif key == URN . TEXTGROUP and self . __parsed [ "textgroup" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , self . __parsed [ "textgroup" ] ] ) elif key == URN . WORK and self . __parsed [ "work" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( [ self . __parsed [ "textgroup" ] , self . __parsed [ "work" ] ] ) ] ) elif key == URN . VERSION and self . __parsed [ "version" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) ] ) elif key == URN . NO_PASSAGE and self . __parsed [ "work" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) ] ) elif key == URN . PASSAGE and self . __parsed [ "reference" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) , str ( self . reference ) ] ) elif key == URN . PASSAGE_START and self . __parsed [ "reference" ] : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) , str ( self . reference . start ) ] ) elif key == URN . PASSAGE_END and self . __parsed [ "reference" ] and self . reference . end is not None : return ":" . join ( [ "urn" , self . __parsed [ "urn_namespace" ] , self . __parsed [ "cts_namespace" ] , "." . join ( middle ) , str ( self . reference . end ) ] ) else : raise KeyError ( "Provided key is not recognized." )
Returns the urn up to given level using URN Constants
7,505
def attribute ( self ) : refs = re . findall ( "\@([a-zA-Z:]+)=\\\?[\'\"]\$" + str ( self . refsDecl . count ( "$" ) ) + "\\\?[\'\"]" , self . refsDecl ) return refs [ - 1 ]
Attribute that serves as a reference getter
7,506
def match ( self , passageId ) : if not isinstance ( passageId , CtsReference ) : passageId = CtsReference ( passageId ) if self . is_root ( ) : return self [ passageId . depth - 1 ] return self . root . match ( passageId )
Given a passageId matches a citation level
7,507
def fill ( self , passage = None , xpath = None ) : if xpath is True : xpath = self . xpath replacement = r"\1" if isinstance ( passage , str ) : replacement = r"\1\2'" + passage + "'" return REFERENCE_REPLACER . sub ( replacement , xpath ) else : if isinstance ( passage , CtsReference ) : passage = passage . start . list elif passage is None : return REFERENCE_REPLACER . sub ( r"\1" , self . refsDecl ) passage = iter ( passage ) return REFERENCE_REPLACER . sub ( lambda m : _ref_replacer ( m , passage ) , self . refsDecl )
Fill the xpath with given informations
7,508
def ingest ( resource , xpath = ".//tei:cRefPattern" ) : if len ( resource ) == 0 and isinstance ( resource , list ) : return None elif isinstance ( resource , list ) : resource = resource [ 0 ] elif not isinstance ( resource , _Element ) : return None resource = resource . xpath ( xpath , namespaces = XPATH_NAMESPACES ) citations = [ ] for x in range ( 0 , len ( resource ) ) : citations . append ( Citation ( name = resource [ x ] . get ( "n" ) , refsDecl = resource [ x ] . get ( "replacementPattern" ) [ 7 : - 1 ] , child = _child_or_none ( citations ) ) ) if len ( citations ) > 1 : for citation in citations [ : - 1 ] : citation . root = citations [ - 1 ] return citations [ - 1 ]
Ingest a resource and store data in its instance
7,509
def get_tweets_count_times ( twitter , count , query = None ) : r oldest_id , newest_id = _get_oldest_id ( query = query ) newest_id = newest_id or oldest_id all_tweets = [ ] i = 0 while i < count : i += 1 if oldest_id <= newest_id : tweets = get_tweets ( query = query , max_id = oldest_id - 1 , count = TWEETS_PER_SEARCH , twitter = twitter ) else : tweets = get_tweets ( query = query , max_id = oldest_id - 1 , since_id = newest_id , count = TWEETS_PER_SEARCH , twitter = twitter ) rate_limit_remaining = twitter . get_lastfunction_header ( 'x-rate-limit-remaining' ) rate_limit_reset = twitter . get_lastfunction_header ( 'x-rate-limit-reset' ) if not len ( tweets ) : oldest_id = oldest_id + ( ( newest_id or oldest_id ) - oldest_id + 1 ) * 10000 break elif isinstance ( tweets , dict ) : print ( tweets ) break all_tweets . extend ( tweets ) tweet_ids = { t [ 'id' ] for t in tweets } if oldest_id : tweet_ids . add ( oldest_id ) oldest_id , newest_id = min ( tweet_ids ) , max ( tweet_ids ) if rate_limit_remaining == 1 : time . sleep ( rate_limit_reset - time . time ( ) ) save_tweets ( all_tweets , query = query ) _set_oldest_id ( oldest_id , newest_id , query = query ) if len ( all_tweets ) == 0 : os . remove ( make_oldest_id_path ( query ) ) return len ( all_tweets ) , twitter . get_lastfunction_header ( 'x-rate-limit-remaining' )
r hits the twitter api count times and grabs tweets for the indicated query
7,510
def parse ( self , ** kwargs ) : try : output_folder = self . retrieved except exceptions . NotExistent : return self . exit_codes . ERROR_NO_RETRIEVED_FOLDER filename_stdout = self . node . get_attribute ( 'output_filename' ) filename_stderr = self . node . get_attribute ( 'error_filename' ) try : with output_folder . open ( filename_stderr , 'r' ) as handle : exit_code = self . parse_stderr ( handle ) except ( OSError , IOError ) : self . logger . exception ( 'Failed to read the stderr file\n%s' , traceback . format_exc ( ) ) return self . exit_codes . ERROR_READING_ERROR_FILE if exit_code : return exit_code try : with output_folder . open ( filename_stdout , 'r' ) as handle : handle . seek ( 0 ) exit_code = self . parse_stdout ( handle ) except ( OSError , IOError ) : self . logger . exception ( 'Failed to read the stdout file\n%s' , traceback . format_exc ( ) ) return self . exit_codes . ERROR_READING_OUTPUT_FILE if exit_code : return exit_code
Parse the contents of the output files retrieved in the FolderData .
7,511
def parse_stdout ( self , filelike ) : from CifFile import StarError if not filelike . read ( ) . strip ( ) : return self . exit_codes . ERROR_EMPTY_OUTPUT_FILE try : filelike . seek ( 0 ) cif = CifData ( file = filelike ) except StarError : self . logger . exception ( 'Failed to parse a `CifData` from the stdout file\n%s' , traceback . format_exc ( ) ) return self . exit_codes . ERROR_PARSING_CIF_DATA else : self . out ( 'cif' , cif ) return
Parse the content written by the script to standard out into a CifData object .
7,512
def parse_stderr ( self , filelike ) : marker_error = 'ERROR,' marker_warning = 'WARNING,' messages = { 'errors' : [ ] , 'warnings' : [ ] } for line in filelike . readlines ( ) : if marker_error in line : messages [ 'errors' ] . append ( line . split ( marker_error ) [ - 1 ] . strip ( ) ) if marker_warning in line : messages [ 'warnings' ] . append ( line . split ( marker_warning ) [ - 1 ] . strip ( ) ) if self . node . get_option ( 'attach_messages' ) : self . out ( 'messages' , Dict ( dict = messages ) ) for error in messages [ 'errors' ] : if 'unknown option' in error : return self . exit_codes . ERROR_INVALID_COMMAND_LINE_OPTION return
Parse the content written by the script to standard err .
7,513
def reset ( cls ) : cls . _func_from_name = { } cls . _func_from_hash = { } cls . _func_hash = { } register = cls . _do_register for ( func , hash_name , hash_new ) in cls . _std_func_data : register ( func , func . name , hash_name , hash_new ) assert set ( cls . _func_hash ) == set ( Func )
Reset the registry to the standard multihash functions .
7,514
def get ( cls , func_hint ) : try : return Func ( func_hint ) except ValueError : pass if func_hint in cls . _func_from_name : return cls . _func_from_name [ func_hint ] if func_hint in cls . _func_hash : return func_hint raise KeyError ( "unknown hash function" , func_hint )
Return a registered hash function matching the given hint .
7,515
def _do_register ( cls , code , name , hash_name = None , hash_new = None ) : cls . _func_from_name [ name . replace ( '-' , '_' ) ] = code cls . _func_from_name [ name . replace ( '_' , '-' ) ] = code if hash_name : cls . _func_from_hash [ hash_name ] = code cls . _func_hash [ code ] = cls . _hash ( hash_name , hash_new )
Add hash function data to the registry without checks .
7,516
def register ( cls , code , name , hash_name = None , hash_new = None ) : if not _is_app_specific_func ( code ) : raise ValueError ( "only application-specific functions can be registered" ) name_mapping_data = [ ( cls . _func_from_name , name , "function name is already registered for a different function" ) , ( cls . _func_from_hash , hash_name , "hashlib name is already registered for a different function" ) ] for ( mapping , nameinmap , errmsg ) in name_mapping_data : existing_func = mapping . get ( nameinmap , code ) if existing_func != code : raise ValueError ( errmsg , existing_func ) if code in cls . _func_hash : cls . unregister ( code ) cls . _do_register ( code , name , hash_name , hash_new )
Add an application - specific function to the registry .
7,517
def unregister ( cls , code ) : if code in Func : raise ValueError ( "only application-specific functions can be unregistered" ) func_names = { n for ( n , f ) in cls . _func_from_name . items ( ) if f == code } for func_name in func_names : del cls . _func_from_name [ func_name ] hash = cls . _func_hash . pop ( code ) if hash . name : del cls . _func_from_hash [ hash . name ]
Remove an application - specific function from the registry .
7,518
def hash_from_func ( cls , func ) : new = cls . _func_hash [ func ] . new return new ( ) if new else None
Return a hashlib - compatible object for the multihash func .
7,519
def thermal_data ( data , figsize = ( 12 , 4 ) , ms_data = 50 , v_label = 'Unit-cell volume $(\mathrm{\AA}^3)$' , pdf_filen = None , title = 'P-V-T data' ) : f , ax = plt . subplots ( 1 , 2 , figsize = figsize , sharex = True ) if isuncertainties ( [ data [ 'p' ] , data [ 'v' ] , data [ 'temp' ] ] ) : p = unp . nominal_values ( data [ 'p' ] ) v = unp . nominal_values ( data [ 'v' ] ) temp = unp . nominal_values ( data [ 'temp' ] ) sp = unp . std_devs ( data [ 'p' ] ) sv = unp . std_devs ( data [ 'v' ] ) stemp = unp . std_devs ( data [ 'temp' ] ) ax [ 0 ] . errorbar ( p , v , xerr = sp , yerr = sv , marker = ' ' , c = 'k' , ms = 0 , mew = 0 , linestyle = 'None' , capsize = 0 , lw = 0.5 , zorder = 1 ) ax [ 1 ] . errorbar ( p , temp , xerr = sp , yerr = stemp , marker = ' ' , c = 'k' , ms = 0 , mew = 0 , linestyle = 'None' , capsize = 0 , lw = 0.5 , zorder = 1 ) else : p = data [ 'p' ] v = data [ 'v' ] temp = data [ 'temp' ] points = ax [ 0 ] . scatter ( p , v , marker = 'o' , s = ms_data , c = temp , cmap = c_map , vmin = 300. , vmax = temp . max ( ) , zorder = 2 ) points = ax [ 1 ] . scatter ( p , temp , marker = 'o' , s = ms_data , c = temp , cmap = c_map , vmin = 300. , vmax = temp . max ( ) , zorder = 2 ) ax [ 0 ] . set_xlabel ( 'Pressure (GPa)' ) ax [ 1 ] . set_xlabel ( 'Pressure (GPa)' ) ax [ 0 ] . set_ylabel ( v_label ) ax [ 1 ] . set_ylabel ( 'Temperature (K)' ) f . suptitle ( title ) position = f . add_axes ( [ 0.92 , 0.11 , .01 , 0.75 ] ) f . colorbar ( points , orientation = "vertical" , cax = position ) if pdf_filen is not None : f . savefig ( pdf_filen )
plot P - V - T data before fitting
7,520
def _do_digest ( data , func ) : func = FuncReg . get ( func ) hash = FuncReg . hash_from_func ( func ) if not hash : raise ValueError ( "no available hash function for hash" , func ) hash . update ( data ) return bytes ( hash . digest ( ) )
Return the binary digest of data with the given func .
7,521
def digest ( data , func ) : digest = _do_digest ( data , func ) return Multihash ( func , digest )
Hash the given data into a new Multihash .
7,522
def decode ( mhash , encoding = None ) : r mhash = bytes ( mhash ) if encoding : mhash = CodecReg . get_decoder ( encoding ) ( mhash ) try : func = mhash [ 0 ] length = mhash [ 1 ] digest = mhash [ 2 : ] except IndexError as ie : raise ValueError ( "multihash is too short" ) from ie if length != len ( digest ) : raise ValueError ( "multihash length field does not match digest field length" ) return Multihash ( func , digest )
r Decode a multihash - encoded digest into a Multihash .
7,523
def from_hash ( self , hash ) : try : func = FuncReg . func_from_hash ( hash ) except KeyError as ke : raise ValueError ( "no matching multihash function" , hash . name ) from ke digest = hash . digest ( ) return Multihash ( func , digest )
Create a Multihash from a hashlib - compatible hash object .
7,524
def encode ( self , encoding = None ) : r try : fc = self . func . value except AttributeError : fc = self . func mhash = bytes ( [ fc , len ( self . digest ) ] ) + self . digest if encoding : mhash = CodecReg . get_encoder ( encoding ) ( mhash ) return mhash
r Encode into a multihash - encoded digest .
7,525
def verify ( self , data ) : r digest = _do_digest ( data , self . func ) return digest [ : len ( self . digest ) ] == self . digest
r Does the given data hash to the digest in this Multihash ?
7,526
def truncate ( self , length ) : if length > len ( self . digest ) : raise ValueError ( "cannot enlarge the original digest by %d bytes" % ( length - len ( self . digest ) ) ) return self . __class__ ( self . func , self . digest [ : length ] )
Return a new Multihash with a shorter digest length .
7,527
def set ( self , key : URIRef , value : Union [ Literal , BNode , URIRef , str , int ] , lang : Optional [ str ] = None ) : if not isinstance ( value , Literal ) and lang is not None : value = Literal ( value , lang = lang ) elif not isinstance ( value , ( BNode , URIRef ) ) : value , _type = term . _castPythonToLiteral ( value ) if _type is None : value = Literal ( value ) else : value = Literal ( value , datatype = _type ) self . graph . set ( ( self . asNode ( ) , key , value ) )
Set the VALUE for KEY predicate in the Metadata Graph
7,528
def add ( self , key , value , lang = None ) : if not isinstance ( value , Literal ) and lang is not None : value = Literal ( value , lang = lang ) elif not isinstance ( value , ( BNode , URIRef ) ) : value , _type = term . _castPythonToLiteral ( value ) if _type is None : value = Literal ( value ) else : value = Literal ( value , datatype = _type ) self . graph . add ( ( self . asNode ( ) , key , value ) )
Add a triple to the graph related to this node
7,529
def get ( self , key , lang = None ) : if lang is not None : for o in self . graph . objects ( self . asNode ( ) , key ) : if o . language == lang : yield o else : for o in self . graph . objects ( self . asNode ( ) , key ) : yield o
Returns triple related to this node . Can filter on lang
7,530
def get_single ( self , key , lang = None ) : if not isinstance ( key , URIRef ) : key = URIRef ( key ) if lang is not None : default = None for o in self . graph . objects ( self . asNode ( ) , key ) : default = o if o . language == lang : return o return default else : for o in self . graph . objects ( self . asNode ( ) , key ) : return o
Returns a single triple related to this node .
7,531
def remove ( self , predicate = None , obj = None ) : self . graph . remove ( ( self . asNode ( ) , predicate , obj ) )
Remove triple matching the predicate or the object
7,532
def unlink ( self , subj = None , predicate = None ) : self . graph . remove ( ( subj , predicate , self . asNode ( ) ) )
Remove triple where Metadata is the object
7,533
def getOr ( subject , predicate , * args , ** kwargs ) : if ( subject , predicate , None ) in get_graph ( ) : return Metadata ( node = get_graph ( ) . objects ( subject , predicate ) . __next__ ( ) ) return Metadata ( * args , ** kwargs )
Retrieve a metadata node or generate a new one
7,534
def forwards_func ( apps , schema_editor ) : print ( "\n" ) create_count = 0 BackupRun = apps . get_model ( "backup_app" , "BackupRun" ) backup_runs = BackupRun . objects . all ( ) for backup_run in backup_runs : temp = OriginBackupRun ( name = backup_run . name , backup_datetime = backup_run . backup_datetime ) try : temp . write_config ( ) except OSError as err : print ( "ERROR creating config file: %s" % err ) else : create_count += 1 print ( "%i config files created.\n" % create_count )
manage migrate backup_app 0004_BackupRun_ini_file_20160203_1415
7,535
def reverse_func ( apps , schema_editor ) : print ( "\n" ) remove_count = 0 BackupRun = apps . get_model ( "backup_app" , "BackupRun" ) backup_runs = BackupRun . objects . all ( ) for backup_run in backup_runs : temp = OriginBackupRun ( name = backup_run . name , backup_datetime = backup_run . backup_datetime ) config_path = temp . get_config_path ( ) try : config_path . unlink ( ) except OSError as err : print ( "ERROR removing config file: %s" % err ) else : remove_count += 1 print ( "%i config files removed.\n" % remove_count )
manage migrate backup_app 0003_auto_20160127_2002
7,536
def speziale_grun ( v , v0 , gamma0 , q0 , q1 ) : if isuncertainties ( [ v , v0 , gamma0 , q0 , q1 ] ) : gamma = gamma0 * unp . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) ) else : gamma = gamma0 * np . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) ) return gamma
calculate Gruneisen parameter for the Speziale equation
7,537
def speziale_debyetemp ( v , v0 , gamma0 , q0 , q1 , theta0 ) : if isuncertainties ( [ v , v0 , gamma0 , q0 , q1 , theta0 ] ) : f_vu = np . vectorize ( uct . wrap ( integrate_gamma ) , excluded = [ 1 , 2 , 3 , 4 , 5 , 6 ] ) integ = f_vu ( v , v0 , gamma0 , q0 , q1 , theta0 ) theta = unp . exp ( unp . log ( theta0 ) - integ ) else : f_v = np . vectorize ( integrate_gamma , excluded = [ 1 , 2 , 3 , 4 , 5 , 6 ] ) integ = f_v ( v , v0 , gamma0 , q0 , q1 , theta0 ) theta = np . exp ( np . log ( theta0 ) - integ ) return theta
calculate Debye temperature for the Speziale equation
7,538
def integrate_gamma ( v , v0 , gamma0 , q0 , q1 , theta0 ) : def f_integrand ( v ) : gamma = gamma0 * np . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) ) return gamma / v theta_term = quad ( f_integrand , v0 , v ) [ 0 ] return theta_term
internal function to calculate Debye temperature
7,539
def speziale_pth ( v , temp , v0 , gamma0 , q0 , q1 , theta0 , n , z , t_ref = 300. , three_r = 3. * constants . R ) : v_mol = vol_uc2mol ( v , z ) gamma = speziale_grun ( v , v0 , gamma0 , q0 , q1 ) theta = speziale_debyetemp ( v , v0 , gamma0 , q0 , q1 , theta0 ) xx = theta / temp debye = debye_E ( xx ) if t_ref == 0. : debye0 = 0. else : xx0 = theta / t_ref debye0 = debye_E ( xx0 ) Eth0 = three_r * n * t_ref * debye0 Eth = three_r * n * temp * debye delEth = Eth - Eth0 p_th = ( gamma / v_mol * delEth ) * 1.e-9 return p_th
calculate thermal pressure for the Speziale equation
7,540
def text ( self ) -> str : return self . export ( output = Mimetypes . PLAINTEXT , exclude = self . default_exclude )
String representation of the text
7,541
def set_creator ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : self . metadata . add ( key = DC . creator , value = value , lang = lang )
Set the DC Creator literal value
7,542
def set_title ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : return self . metadata . add ( key = DC . title , value = value , lang = lang )
Set the DC Title literal value
7,543
def get_description ( self , lang : str = None ) -> Literal : return self . metadata . get_single ( key = DC . description , lang = lang )
Get the description of the object
7,544
def set_description ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : return self . metadata . add ( key = DC . description , value = value , lang = lang )
Set the DC Description literal value
7,545
def set_subject ( self , value : Union [ Literal , Identifier , str ] , lang : str = None ) : return self . metadata . add ( key = DC . subject , value = value , lang = lang )
Set the DC Subject literal value
7,546
def childIds ( self ) -> BaseReferenceSet : if self . _childIds is None : self . _childIds = self . getReffs ( ) return self . _childIds
Identifiers of children
7,547
def firstId ( self ) -> BaseReference : if self . childIds is not None : if len ( self . childIds ) > 0 : return self . childIds [ 0 ] return None else : raise NotImplementedError
First child s id of current TextualNode
7,548
def lastId ( self ) -> BaseReference : if self . childIds is not None : if len ( self . childIds ) > 0 : return self . childIds [ - 1 ] return None else : raise NotImplementedError
Last child s id of current TextualNode
7,549
def compile_vocab ( docs , limit = 1e6 , verbose = 0 , tokenizer = Tokenizer ( stem = None , lower = None , strip = None ) ) : tokenizer = make_tokenizer ( tokenizer ) d = Dictionary ( ) try : limit = min ( limit , docs . count ( ) ) docs = docs . iterator ( ) except ( AttributeError , TypeError ) : pass for i , doc in enumerate ( docs ) : try : doc = doc . values ( ) except AttributeError : if not isinstance ( doc , str ) : doc = ' ' . join ( [ str ( v ) for v in doc ] ) else : doc = str ( doc ) if i >= limit : break d . add_documents ( [ list ( tokenizer ( doc ) ) ] ) if verbose and not i % 100 : log . info ( '{}: {}' . format ( i , repr ( d ) [ : 120 ] ) ) return d
Get the set of words used anywhere in a sequence of documents and assign an integer id
7,550
def gen_file_lines ( path , mode = 'rUb' , strip_eol = True , ascii = True , eol = '\n' ) : if isinstance ( path , str ) : path = open ( path , mode ) with path : for line in path : if ascii : line = str ( line ) if strip_eol : line = line . rstrip ( '\n' ) yield line
Generate a sequence of documents from the lines in a file
7,551
def inventory ( self , inventory_name ) : def decorator ( f ) : self . add ( func = f , inventory_name = inventory_name ) return f return decorator
Decorator to register filters for given inventory . For a function abc it has the same effect
7,552
def dispatch ( self , collection , ** kwargs ) : for inventory , method in self . methods [ : : - 1 ] : if method ( collection , ** kwargs ) is True : collection . parent = self . collection . children [ inventory ] return raise UndispatchedTextError ( "CapitainsCtsText not dispatched %s" % collection . id )
Dispatch a collection using internal filters
7,553
def generate_tokens ( doc , regex = CRE_TOKEN , strip = True , nonwords = False ) : r if isinstance ( regex , basestring ) : regex = re . compile ( regex ) for w in regex . finditer ( doc ) : if w : w = w . group ( ) if strip : w = w . strip ( r'-_*`()}{' + r"'" ) if w and ( nonwords or not re . match ( r'^' + RE_NONWORD + '$' , w ) ) : yield w
r Return a sequence of words or tokens using a re . match iteratively through the str
7,554
def financial_float ( s , scale_factor = 1 , typ = float , ignore = FINANCIAL_WHITESPACE , percent_str = PERCENT_SYMBOLS , replace = FINANCIAL_MAPPING , normalize_case = str . lower ) : percent_scale_factor = 1 if isinstance ( s , basestring ) : s = normalize_case ( s ) . strip ( ) for i in ignore : s = s . replace ( normalize_case ( i ) , '' ) s = s . strip ( ) for old , new in replace : s = s . replace ( old , new ) for p in percent_str : if s . endswith ( p ) : percent_scale_factor *= 0.01 s = s [ : - len ( p ) ] try : return ( scale_factor if scale_factor < 1 else percent_scale_factor ) * typ ( float ( s ) ) except ( ValueError , TypeError ) : return s
Strip dollar signs and commas from financial numerical string
7,555
def is_invalid_date ( d ) : if not isinstance ( d , DATE_TYPES ) : return False if d . year < 1970 or d . year >= 2100 : return True
Return boolean to indicate whether date is invalid None if valid False if not a date
7,556
def vocab_freq ( docs , limit = 1e6 , verbose = 1 , tokenizer = generate_tokens ) : total = Counter ( ) try : limit = min ( limit , docs . count ( ) ) docs = docs . iterator ( ) except : pass for i , doc in enumerate ( docs ) : try : doc = doc . values ( ) except AttributeError : if not isinstance ( doc , basestring ) : doc = ' ' . join ( [ stringify ( v ) for v in doc ] ) else : doc = stringify ( doc ) if i >= limit : break c = Counter ( tokenizer ( doc , strip = True , nonwords = False ) ) if verbose and ( verbose < 1e-3 or not i % int ( limit * verbose ) ) : print ( '{}: {} ... {}' . format ( i , c . keys ( ) [ : 3 ] , c . keys ( ) [ - 3 : ] if len ( c . keys ( ) ) > 6 else '' ) ) total += c return total
Get the set of words used anywhere in a sequence of documents and count occurrences
7,557
def make_filename ( s , allow_whitespace = False , allow_underscore = False , allow_hyphen = False , limit = 255 , lower = False ) : r s = stringify ( s ) s = CRE_BAD_FILENAME . sub ( '' , s ) if not allow_whitespace : s = CRE_WHITESPACE . sub ( '' , s ) if lower : s = str . lower ( s ) if not allow_hyphen : s = s . replace ( '-' , '' ) if not allow_underscore : s = s . replace ( '_' , '' ) if limit is not None : s = s [ : limit ] return s or 'empty' [ : limit ]
r Make sure the provided string is a valid filename and optionally remove whitespace
7,558
def stem ( self , s ) : if self . _stemmer is None : return passthrough ( s ) try : return getattr ( getattr ( self , '_stemmer' , None ) , 'stem' , None ) ( s ) except ( AttributeError , TypeError ) : return getattr ( getattr ( self , '_stemmer' , self ) , 'lemmatize' , passthrough ) ( s )
This should make the Stemmer picklable and unpicklable by not using bound methods
7,559
def assoc ( self , index , value ) : newvec = ImmutableVector ( ) newvec . tree = self . tree . assoc ( index , value ) if index >= self . _length : newvec . _length = index + 1 else : newvec . _length = self . _length return newvec
Return a new vector with value associated at index . The implicit parameter is not modified .
7,560
def concat ( self , tailvec ) : newvec = ImmutableVector ( ) vallist = [ ( i + self . _length , tailvec [ i ] ) for i in range ( 0 , tailvec . _length ) ] newvec . tree = self . tree . multi_assoc ( vallist ) newvec . _length = self . _length + tailvec . _length return newvec
Returns the result of concatenating tailvec to the implicit parameter
7,561
def pop ( self ) : if self . _length == 0 : raise IndexError ( ) newvec = ImmutableVector ( ) newvec . tree = self . tree . remove ( self . _length - 1 ) newvec . _length = self . _length - 1 return newvec
Return a new ImmutableVector with the last item removed .
7,562
def read ( self , identifier , path ) : with open ( path ) as f : o = self . classes [ "text" ] ( urn = identifier , resource = self . xmlparse ( f ) ) return o
Retrieve and parse a text given an identifier
7,563
def _parse_textgroup ( self , cts_file ) : with io . open ( cts_file ) as __xml__ : return self . classes [ "textgroup" ] . parse ( resource = __xml__ ) , cts_file
Parses a textgroup from a cts file
7,564
def _parse_work ( self , cts_file , textgroup ) : with io . open ( cts_file ) as __xml__ : work , texts = self . classes [ "work" ] . parse ( resource = __xml__ , parent = textgroup , _with_children = True ) return work , texts , os . path . dirname ( cts_file )
Parses a work from a cts file
7,565
def _parse_text ( self , text , directory ) : text_id , text_metadata = text . id , text text_metadata . path = "{directory}/{textgroup}.{work}.{version}.xml" . format ( directory = directory , textgroup = text_metadata . urn . textgroup , work = text_metadata . urn . work , version = text_metadata . urn . version ) if os . path . isfile ( text_metadata . path ) : try : text = self . read ( text_id , path = text_metadata . path ) cites = list ( ) for cite in [ c for c in text . citation ] [ : : - 1 ] : if len ( cites ) >= 1 : cites . append ( self . classes [ "citation" ] ( xpath = cite . xpath . replace ( "'" , '"' ) , scope = cite . scope . replace ( "'" , '"' ) , name = cite . name , child = cites [ - 1 ] ) ) else : cites . append ( self . classes [ "citation" ] ( xpath = cite . xpath . replace ( "'" , '"' ) , scope = cite . scope . replace ( "'" , '"' ) , name = cite . name ) ) del text text_metadata . citation = cites [ - 1 ] self . logger . info ( "%s has been parsed " , text_metadata . path ) if not text_metadata . citation . is_set ( ) : self . logger . error ( "%s has no passages" , text_metadata . path ) return False return True except Exception : self . logger . error ( "%s does not accept parsing at some level (most probably citation) " , text_metadata . path ) return False else : self . logger . error ( "%s is not present" , text_metadata . path ) return False
Complete the TextMetadata object with its citation scheme by parsing the original text
7,566
def _dispatch ( self , textgroup , directory ) : if textgroup . id in self . dispatcher . collection : self . dispatcher . collection [ textgroup . id ] . update ( textgroup ) else : self . dispatcher . dispatch ( textgroup , path = directory ) for work_urn , work in textgroup . works . items ( ) : if work_urn in self . dispatcher . collection [ textgroup . id ] . works : self . dispatcher . collection [ work_urn ] . update ( work )
Run the dispatcher over a textgroup .
7,567
def parse ( self , resource ) : textgroups = [ ] texts = [ ] invalids = [ ] for folder in resource : cts_files = glob ( "{base_folder}/data/*/__cts__.xml" . format ( base_folder = folder ) ) for cts_file in cts_files : textgroup , cts_file = self . _parse_textgroup ( cts_file ) textgroups . append ( ( textgroup , cts_file ) ) for textgroup , cts_textgroup_file in textgroups : cts_work_files = glob ( "{parent}/*/__cts__.xml" . format ( parent = os . path . dirname ( cts_textgroup_file ) ) ) for cts_work_file in cts_work_files : _ , parsed_texts , directory = self . _parse_work ( cts_work_file , textgroup ) texts . extend ( [ ( text , directory ) for text in parsed_texts ] ) for text , directory in texts : if not self . _parse_text ( text , directory ) : invalids . append ( text ) for textgroup , textgroup_path in textgroups : self . _dispatch_container ( textgroup , textgroup_path ) self . _clean_invalids ( invalids ) self . inventory = self . dispatcher . collection return self . inventory
Parse a list of directories and reads it into a collection
7,568
def velocities_to_moduli ( rho , v_phi , v_s ) : return v_phi * v_phi * rho , v_s * v_s * rho
convert velocities to moduli mainly to support Burnman operations
7,569
def moduli_to_velocities ( rho , K_s , G ) : return np . sqrt ( K_s / rho ) , np . sqrt ( G / rho )
convert moduli to velocities mainly to support Burnman operations
7,570
def jamieson_pst ( v , v0 , c0 , s , gamma0 , q , theta0 , n , z , mass , c_v , three_r = 3. * constants . R , t_ref = 300. ) : rho = mass / vol_uc2mol ( v , z ) * 1.e-6 rho0 = mass / vol_uc2mol ( v0 , z ) * 1.e-6 p_h = hugoniot_p ( rho , rho0 , c0 , s ) p_th_h = jamieson_pth ( v , v0 , c0 , s , gamma0 , q , theta0 , n , z , mass , c_v , three_r = three_r , t_ref = t_ref ) p_st = p_h - p_th_h return p_st
calculate static pressure at 300 K from Hugoniot data using the constq formulation
7,571
def jamieson_pth ( v , v0 , c0 , s , gamma0 , q , theta0 , n , z , mass , c_v , three_r = 3. * constants . R , t_ref = 300. ) : rho = mass / vol_uc2mol ( v , z ) * 1.e-6 rho0 = mass / vol_uc2mol ( v0 , z ) * 1.e-6 temp = hugoniot_t ( rho , rho0 , c0 , s , gamma0 , q , theta0 , n , mass , three_r = three_r , t_ref = t_ref , c_v = c_v ) pth = constq_pth ( v , temp , v0 , gamma0 , q , theta0 , n , z , t_ref = t_ref , three_r = three_r ) return pth
calculate thermal pressure from Hugoniot data using the constq formulation
7,572
def hugoniot_p_nlin ( rho , rho0 , a , b , c ) : eta = 1. - ( rho0 / rho ) Up = np . zeros_like ( eta ) if isuncertainties ( [ rho , rho0 , a , b , c ] ) : Up [ eta != 0. ] = ( ( b * eta - 1. ) + unp . sqrt ( np . power ( ( 1. - b * eta ) , 2. ) - 4. * np . power ( eta , 2. ) * a * c ) ) / ( - 2. * eta * c ) else : Up [ eta != 0. ] = ( ( b * eta - 1. ) + np . sqrt ( np . power ( ( 1. - b * eta ) , 2. ) - 4. * np . power ( eta , 2. ) * a * c ) ) / ( - 2. * eta * c ) Us = a + Up * b + Up * Up * c Ph = rho0 * Up * Us return Ph
calculate pressure along a Hugoniot throug nonlinear equations presented in Jameison 1982
7,573
def generate_address_label ( self ) : if self . organisation_name : self . address_label . append ( self . organisation_name ) if self . department_name : self . address_label . append ( self . department_name ) if self . po_box_number : self . address_label . append ( 'PO Box ' + self . po_box_number ) elements = [ self . sub_building_name , self . building_name , self . building_number , self . dependent_thoroughfare , self . thoroughfare , self . double_dependent_locality , self . dependent_locality , ] for element in elements : if element : self . _append_to_label ( element ) if len ( self . address_label ) < 7 : for i in range ( 7 - len ( self . address_label ) ) : self . address_label . append ( '' ) self . address_label [ 5 ] = self . post_town return ", " . join ( [ f for f in self . address_label if f ] )
Construct a list for address label .
7,574
def _is_exception_rule ( self , element ) : if element [ 0 ] . isdigit ( ) and element [ - 1 ] . isdigit ( ) : return True if len ( element ) > 1 and element [ 0 ] . isdigit ( ) and element [ - 2 ] . isdigit ( ) and element [ - 1 ] . isalpha ( ) : return True if len ( element ) == 1 and element . isalpha ( ) : return True return False
Check for exception rule .
7,575
def _append_to_label ( self , element ) : if len ( self . address_label ) > 0 and self . _is_exception_rule ( self . address_label [ - 1 ] ) : self . address_label [ - 1 ] += ( ' ' + element ) else : self . address_label . append ( element )
Append address element to the label .
7,576
def load_template_source ( template_name , template_dirs = None ) : template_zipfiles = getattr ( settings , "TEMPLATE_ZIP_FILES" , [ ] ) for fname in template_zipfiles : try : z = zipfile . ZipFile ( fname ) source = z . read ( template_name ) except ( IOError , KeyError ) : continue z . close ( ) template_path = "%s:%s" % ( fname , template_name ) return ( source , template_path ) raise TemplateDoesNotExist ( template_name )
Template loader that loads templates from a ZIP file .
7,577
def sanitize_capabilities ( caps ) : platform = caps [ "platform" ] upper_platform = platform . upper ( ) if upper_platform . startswith ( "WINDOWS 8" ) : caps [ "platform" ] = "WIN8" elif upper_platform . startswith ( "OS X " ) : caps [ "platform" ] = "MAC" elif upper_platform == "WINDOWS 10" : del caps [ "platform" ] caps [ "os" ] = "Windows" caps [ "os_version" ] = "10" if caps [ "browserName" ] . upper ( ) == "MICROSOFTEDGE" : caps [ "version" ] = caps [ "version" ] . split ( "." , 1 ) [ 0 ] + ".0" caps [ "browser_version" ] = caps [ "version" ] del caps [ "version" ] return caps
Sanitize the capabilities we pass to Selenic so that they can be consumed by Browserstack .
7,578
def my_func ( version ) : class MyClass ( object ) : if version == 2 : import docs . support . python2_module as pm else : import docs . support . python3_module as pm def __init__ ( self , value ) : self . _value = value def _get_value ( self ) : return self . _value value = property ( _get_value , pm . _set_value , None , "Value property" )
Enclosing function .
7,579
def get_subscriptions ( self , publication_id = None , owner_id = None , since_when = None , limit_to = 200 , max_calls = None , start_record = 0 , verbose = False ) : query = "SELECT Objects() FROM Subscription" where_params = [ ] if owner_id : where_params . append ( ( 'owner' , '=' , "'%s'" % owner_id ) ) if publication_id : where_params . append ( ( 'publication' , '=' , "'%s'" % publication_id ) ) if since_when : d = datetime . date . today ( ) - datetime . timedelta ( days = since_when ) where_params . append ( ( 'LastModifiedDate' , ">" , "'%s 00:00:00'" % d ) ) if where_params : query += " WHERE " query += " AND " . join ( [ "%s %s %s" % ( p [ 0 ] , p [ 1 ] , p [ 2 ] ) for p in where_params ] ) subscription_list = self . get_long_query ( query , limit_to = limit_to , max_calls = max_calls , start_record = start_record , verbose = verbose ) return subscription_list
Fetches all subscriptions from Membersuite of a particular publication_id if set .
7,580
def get_prep_value ( self , value ) : if isinstance ( value , JSON . JsonDict ) : return json . dumps ( value , cls = JSON . Encoder ) if isinstance ( value , JSON . JsonList ) : return value . json_string if isinstance ( value , JSON . JsonString ) : return json . dumps ( value ) return value
The psycopg adaptor returns Python objects but we also have to handle conversion ourselves
7,581
def registry ( attr , base = type ) : class Registry ( base ) : def __init__ ( cls , name , bases , attrs ) : super ( Registry , cls ) . __init__ ( name , bases , attrs ) if not hasattr ( cls , '__registry__' ) : cls . __registry__ = { } key = getattr ( cls , attr ) if key is not NotImplemented : assert key not in cls . __registry__ cls . __registry__ [ key ] = cls def __dispatch__ ( cls , key ) : try : return cls . __registry__ [ key ] except KeyError : raise ValueError ( 'Unknown %s: %s' % ( attr , key ) ) return Registry
Generates a meta class to index sub classes by their keys .
7,582
def debug_generate ( self , debug_generator , * gen_args , ** gen_kwargs ) : if self . isEnabledFor ( logging . DEBUG ) : message = debug_generator ( * gen_args , ** gen_kwargs ) if message is not None : return self . debug ( message )
Used for efficient debug logging where the actual message isn t evaluated unless it will actually be accepted by the logger .
7,583
def verify_token ( token , public_key_or_address , signing_algorithm = "ES256K" ) : decoded_token = decode_token ( token ) decoded_token_payload = decoded_token [ "payload" ] if "subject" not in decoded_token_payload : raise ValueError ( "Token doesn't have a subject" ) if "publicKey" not in decoded_token_payload [ "subject" ] : raise ValueError ( "Token doesn't have a subject public key" ) if "issuer" not in decoded_token_payload : raise ValueError ( "Token doesn't have an issuer" ) if "publicKey" not in decoded_token_payload [ "issuer" ] : raise ValueError ( "Token doesn't have an issuer public key" ) if "claim" not in decoded_token_payload : raise ValueError ( "Token doesn't have a claim" ) issuer_public_key = str ( decoded_token_payload [ "issuer" ] [ "publicKey" ] ) public_key_object = ECPublicKey ( issuer_public_key ) compressed_public_key = compress ( issuer_public_key ) decompressed_public_key = decompress ( issuer_public_key ) if public_key_object . _type == PubkeyType . compressed : compressed_address = public_key_object . address ( ) uncompressed_address = bin_hash160_to_address ( bin_hash160 ( decompress ( public_key_object . to_bin ( ) ) ) ) elif public_key_object . _type == PubkeyType . uncompressed : compressed_address = bin_hash160_to_address ( bin_hash160 ( compress ( public_key_object . to_bin ( ) ) ) ) uncompressed_address = public_key_object . address ( ) else : raise ValueError ( "Invalid issuer public key format" ) if public_key_or_address == compressed_public_key : pass elif public_key_or_address == decompressed_public_key : pass elif public_key_or_address == compressed_address : pass elif public_key_or_address == uncompressed_address : pass else : raise ValueError ( "Token public key doesn't match the verifying value" ) token_verifier = TokenVerifier ( ) if not token_verifier . verify ( token , public_key_object . to_pem ( ) ) : raise ValueError ( "Token was not signed by the issuer public key" ) return decoded_token
A function for validating an individual token .
7,584
def verify_token_record ( token_record , public_key_or_address , signing_algorithm = "ES256K" ) : if "token" not in token_record : raise ValueError ( "Token record must have a token inside it" ) token = token_record [ "token" ] decoded_token = verify_token ( token , public_key_or_address , signing_algorithm = signing_algorithm ) token_payload = decoded_token [ "payload" ] issuer_public_key = token_payload [ "issuer" ] [ "publicKey" ] if "parentPublicKey" in token_record : if issuer_public_key == token_record [ "parentPublicKey" ] : pass else : raise ValueError ( "Verification of tokens signed with keychains is not yet supported" ) return decoded_token
A function for validating an individual token record and extracting the decoded token .
7,585
def get_profile_from_tokens ( token_records , public_key_or_address , hierarchical_keys = False ) : if hierarchical_keys : raise NotImplementedError ( "Hierarchical key support not implemented" ) profile = { } for token_record in token_records : try : decoded_token = verify_token_record ( token_record , public_key_or_address ) except ValueError : continue else : if "payload" in decoded_token : if "claim" in decoded_token [ "payload" ] : claim = decoded_token [ "payload" ] [ "claim" ] profile . update ( claim ) return profile
A function for extracting a profile from a list of tokens .
7,586
def resolve_zone_file_to_profile ( zone_file , address_or_public_key ) : if is_profile_in_legacy_format ( zone_file ) : return zone_file try : token_file_url = get_token_file_url_from_zone_file ( zone_file ) except Exception as e : raise Exception ( "Token file URL could not be extracted from zone file" ) try : r = requests . get ( token_file_url ) except Exception as e : raise Exception ( "Token could not be acquired from token file URL" ) try : profile_token_records = json . loads ( r . text ) except ValueError : raise Exception ( "Token records could not be extracted from token file" ) try : profile = get_profile_from_tokens ( profile_token_records , address_or_public_key ) except Exception as e : raise Exception ( "Profile could not be extracted from token records" ) return profile
Resolves a zone file to a profile and checks to makes sure the tokens are signed with a key that corresponds to the address or public key provided .
7,587
def __dog_started ( self ) : if self . __task is not None : raise RuntimeError ( 'Unable to start task. In order to start a new task - at first stop it' ) self . __task = self . record ( ) . task ( ) if isinstance ( self . __task , WScheduleTask ) is False : task_class = self . __task . __class__ . __qualname__ raise RuntimeError ( 'Unable to start unknown type of task: %s' % task_class )
Prepare watchdog for scheduled task starting
7,588
def __thread_started ( self ) : if self . __task is None : raise RuntimeError ( 'Unable to start thread without "start" method call' ) self . __task . start ( ) self . __task . start_event ( ) . wait ( self . __scheduled_task_startup_timeout__ )
Start a scheduled task
7,589
def _polling_iteration ( self ) : if self . __task is None : self . ready_event ( ) . set ( ) elif self . __task . check_events ( ) is True : self . ready_event ( ) . set ( ) self . registry ( ) . task_finished ( self )
Poll for scheduled task stop events
7,590
def thread_stopped ( self ) : if self . __task is not None : if self . __task . stop_event ( ) . is_set ( ) is False : self . __task . stop ( ) self . __task = None
Stop scheduled task beacuse of watchdog stop
7,591
def stop_running_tasks ( self ) : for task in self . __running_registry : task . stop ( ) self . __running_registry . clear ( )
Terminate all the running tasks
7,592
def add_source ( self , task_source ) : next_start = task_source . next_start ( ) self . __sources [ task_source ] = next_start self . __update ( task_source )
Add new tasks source
7,593
def __update_all ( self ) : self . __next_start = None self . __next_sources = [ ] for source in self . __sources : self . __update ( source )
Recheck next start of records from all the sources
7,594
def __update ( self , task_source ) : next_start = task_source . next_start ( ) if next_start is not None : if next_start . tzinfo is None or next_start . tzinfo != timezone . utc : raise ValueError ( 'Invalid timezone information' ) if self . __next_start is None or next_start < self . __next_start : self . __next_start = next_start self . __next_sources = [ task_source ] elif next_start == self . __next_start : self . __next_sources . append ( task_source )
Recheck next start of tasks from the given one only
7,595
def check ( self ) : if self . __next_start is not None : utc_now = utc_datetime ( ) if utc_now >= self . __next_start : result = [ ] for task_source in self . __next_sources : records = task_source . has_records ( ) if records is not None : result . extend ( records ) self . __update_all ( ) if len ( result ) > 0 : return tuple ( result )
Check if there are records that are ready to start and return them if there are any
7,596
def thread_started ( self ) : self . __running_record_registry . start ( ) self . __running_record_registry . start_event ( ) . wait ( ) WPollingThreadTask . thread_started ( self )
Start required registries and start this scheduler
7,597
def dir_contains ( dirname , path , exists = True ) : if exists : dirname = osp . abspath ( dirname ) path = osp . abspath ( path ) if six . PY2 or six . PY34 : return osp . exists ( path ) and osp . samefile ( osp . commonprefix ( [ dirname , path ] ) , dirname ) else : return osp . samefile ( osp . commonpath ( [ dirname , path ] ) , dirname ) return dirname in osp . commonprefix ( [ dirname , path ] )
Check if a file of directory is contained in another .
7,598
def get_next_name ( old , fmt = '%i' ) : nums = re . findall ( '\d+' , old ) if not nums : raise ValueError ( "Could not get the next name because the old name " "has no numbers in it" ) num0 = nums [ - 1 ] num1 = str ( int ( num0 ) + 1 ) return old [ : : - 1 ] . replace ( num0 [ : : - 1 ] , num1 [ : : - 1 ] , 1 ) [ : : - 1 ]
Return the next name that numerically follows old
7,599
def go_through_dict ( key , d , setdefault = None ) : patt = re . compile ( r'(?<!\\)\.' ) sub_d = d splitted = patt . split ( key ) n = len ( splitted ) for i , k in enumerate ( splitted ) : if i < n - 1 : if setdefault is not None : sub_d = sub_d . setdefault ( k , setdefault ( ) ) else : sub_d = sub_d [ k ] else : return k , sub_d
Split up the key by . and get the value from the base dictionary d