idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
55,600
def _addProteins ( self , proteinIds , containerNames ) : proteinIds = AUX . toList ( proteinIds ) for containerName in containerNames : proteinContainer = getattr ( self , containerName ) proteinContainer . update ( proteinIds )
Add one or multiple proteinIds to the respective container .
55,601
def satisfies ( self , other ) : if other . isnocare : return True if self . isnocare : return False if self . arbitrary : return True if self . constant and not other . arbitrary : return True if self . value is other . value and not other . arbitrary and not other . constant : return True return False
Check if the capabilities of a primitive are enough to satisfy a requirement .
55,602
def _list ( self , foldername = "INBOX" , reverse = False , since = None ) : folder = self . folder if foldername == "INBOX" else self . _getfolder ( foldername ) def sortcmp ( d ) : try : return d [ 1 ] . date except : return - 1 lst = folder . items ( ) if not since else folder . items_since ( since ) sorted_lst = sorted ( lst , key = sortcmp , reverse = 1 if reverse else 0 ) itemlist = [ ( folder , key , msg ) for key , msg in sorted_lst ] return itemlist
Do structured list output .
55,603
def ls ( self , foldername = "INBOX" , reverse = False , since = None , grep = None , field = None , stream = sys . stdout ) : if foldername == "" : foldername = "INBOX" msg_list = self . _list ( foldername , reverse , since ) for folder , mk , m in msg_list : try : output_items = ( "%s%s%s" % ( folder . folder or foldername or "INBOX" , SEPERATOR , mk ) , m . date , m . get_from ( ) [ 0 : 50 ] if m . get_from ( ) else "" , m . get_flags ( ) , re . sub ( "\n" , "" , m . get_subject ( ) or "" ) ) output_string = "% -20s % 20s % 50s [%s] %s" % output_items if not grep or ( grep and grep in output_string ) : if field : print ( output_items [ int ( field ) ] , file = stream ) else : print ( output_string , file = stream ) except IOError as e : if e . errno == errno . EPIPE : return self . logger . exception ( "whoops!" ) except Exception as e : self . logger . exception ( "whoops!" )
Do standard text list of the folder to the stream .
55,604
def lisp ( self , foldername = "INBOX" , reverse = False , since = None , stream = sys . stdout ) : def fromval ( hdr ) : if hdr : return parseaddr ( hdr ) for folder , mk , m in self . _list ( foldername , reverse , since ) : try : print ( json . dumps ( { 'folder' : folder . folder or foldername or "INBOX" , 'key' : "%s%s%s" % ( folder . folder or foldername or "INBOX" , SEPERATOR , mk ) , 'date' : str ( m . date ) , "flags" : m . get_flags ( ) , 'from' : fromval ( m . get_from ( ) ) , 'subject' : re . sub ( "\n|\'|\"" , _escape , m . get_subject ( ) or "" ) } ) , file = stream ) except IOError as e : if e . errno == errno . EPIPE : return self . logger . exception ( "whoops!" ) except Exception as e : self . logger . exception ( "whoops!" )
Do JSON list of the folder to the stream .
55,605
def lsfolders ( self , stream = sys . stdout ) : for f in self . folder . folders ( ) : print ( f . folder . strip ( "." ) , file = stream )
List the subfolders
55,606
def _get ( self , msgid ) : foldername , msgkey = msgid . split ( SEPERATOR ) folder = self . folder if foldername == "INBOX" else self . _getfolder ( foldername ) msg = folder [ msgkey ] msg . is_seen = True hdr = list ( msg . items ( ) ) for p in msg . walk ( ) : yield hdr , p return
Yields the message header against each part from the message .
55,607
def gettext ( self , msgid , stream = sys . stdout , splitter = "--text follows this line--\n" ) : for hdr , part in self . _get ( msgid ) : if part . get_content_type ( ) == "text/plain" : for name , val in hdr : if name . lower ( ) == "content-type" : val = part [ "content-type" ] val = " " . join ( [ l . strip ( ) for l in val . split ( "\n" ) ] ) print ( "%s: %s" % ( name , val ) , file = stream ) print ( splitter , file = stream ) payload = part . get_payload ( decode = True ) chartype = part . get_charset ( ) or _get_charset ( part . get ( "Content-Type" , "" ) ) or "us-ascii" print ( payload . decode ( chartype ) , file = stream ) break
Get the first text part we can find and print it as a message .
55,608
def getrawpart ( self , msgid , stream = sys . stdout ) : for hdr , part in self . _get ( msgid ) : pl = part . get_payload ( decode = True ) if pl != None : print ( pl , file = stream ) break
Get the first part from the message and print it raw .
55,609
def getrawpartid ( self , msgid , partid , stream = sys . stdout ) : parts = [ part for hdr , part in self . _get ( msgid ) ] part = parts [ int ( partid ) ] pl = part . get_payload ( decode = True ) if pl != None : print ( pl , file = stream )
Get a specific part from the message and print it raw .
55,610
def getraw ( self , msgid , stream = sys . stdout ) : foldername , msgkey = msgid . split ( SEPERATOR ) folder = self . folder if foldername == "INBOX" else self . _getfolder ( foldername ) msg = folder [ msgkey ] print ( msg . content )
Get the whole message and print it .
55,611
def getstruct ( self , msgid , as_json = False , stream = sys . stdout ) : parts = [ part . get_content_type ( ) for hdr , part in self . _get ( msgid ) ] if as_json : print ( json . dumps ( parts ) , file = stream ) else : for c in parts : print ( c , file = stream )
Get and print the whole message .
55,612
def _extract_alphabet ( self , grammar ) : alphabet = set ( [ ] ) for terminal in grammar . Terminals : alphabet |= set ( [ x for x in terminal ] ) self . alphabet = list ( alphabet )
Extract an alphabet from the given grammar .
55,613
def action ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_ACTION' , column , value , ** kwargs )
The underlying GICS table provides codes and descriptions identifying the current status or disposition of a grant project .
55,614
def applicant ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_APPLICANT' , column , value , ** kwargs )
Find the applicant information for a grant .
55,615
def authority ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_AUTHORITY' , column , value , ** kwargs )
Provides codes and associated authorizing statutes .
55,616
def construction ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_CONSTRUCTION' , column , value , ** kwargs )
Identifies monetary descriptive and milestone information for Wastewater Treatment construction grants .
55,617
def eligible_cost ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_ELIGIBLE_COST' , column , value , ** kwargs )
The assistance dollar amounts by eligible cost category .
55,618
def grant ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_GRANT' , column , value , ** kwargs )
Provides various award project and grant personnel information .
55,619
def grant_assistance ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_GRANT_ASST_PGM' , column , value , ** kwargs )
Many - to - many table connecting grants and assistance .
55,620
def grant_authority ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_GRANT_AUTH' , column , value , ** kwargs )
Many - to - many table connecting grants and authority .
55,621
def lab_office ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_LAB_OFFICE' , column , value , ** kwargs )
Abbreviations names and locations of labratories and offices .
55,622
def milestone ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_MILESTONE' , column , value , ** kwargs )
Status codes and related dates of certain grants
55,623
def record_type ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_RECORD_TYPE' , column , value , ** kwargs )
Codes and descriptions indicating whether an award is for a new project or for the continuation of a currently funded one .
55,624
def srf_cap ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_SRF_CAP' , column , value , ** kwargs )
Fiscal dollar amounts for State Revolving Fund Capitalization Grants .
55,625
def status ( self , column = None , value = None , ** kwargs ) : return self . _resolve_call ( 'GIC_STATUS' , column , value , ** kwargs )
Provides codes and descriptions of project milestones .
55,626
def recRemoveTreeFormating ( element ) : children = element . getchildren ( ) if len ( children ) > 0 : for child in children : recRemoveTreeFormating ( child ) if element . text is not None : if len ( element . text . strip ( ) ) == 0 : element . text = None else : element . text = element . text . strip ( ) if element . tail is not None : if len ( element . tail . strip ( ) ) == 0 : element . tail = None else : element . tail = element . tail . strip ( )
Removes whitespace characters which are leftovers from previous xml formatting .
55,627
def recCopyElement ( oldelement ) : newelement = ETREE . Element ( oldelement . tag , oldelement . attrib ) if len ( oldelement . getchildren ( ) ) > 0 : for childelement in oldelement . getchildren ( ) : newelement . append ( recCopyElement ( childelement ) ) return newelement
Generates a copy of an xml element and recursively of all child elements .
55,628
def getParam ( xmlelement ) : elementTag = clearTag ( xmlelement . tag ) if elementTag in [ 'userParam' , 'cvParam' , 'referenceableParamGroupRef' ] : if elementTag == 'cvParam' : param = cvParamFromDict ( xmlelement . attrib ) elif elementTag == 'userParam' : param = userParamFromDict ( xmlelement . attrib ) else : param = refParamGroupFromDict ( xmlelement . attrib ) else : param = False return param
Converts an mzML xml element to a param tuple .
55,629
def xmlAddParams ( parentelement , params ) : if not params : return None for param in params : if len ( param ) == 3 : cvAttrib = { 'cvRef' : param [ 0 ] . split ( ':' ) [ 0 ] , 'accession' : param [ 0 ] , 'name' : oboTranslator . getNameWithId ( param [ 0 ] ) } if param [ 1 ] : cvAttrib . update ( { 'value' : param [ 1 ] } ) else : cvAttrib . update ( { 'value' : '' } ) if param [ 2 ] : unitName = oboTranslator . getNameWithId ( param [ 2 ] ) cvAttrib . update ( { 'unitAccession' : param [ 2 ] , 'unitCvRef' : param [ 2 ] . split ( ':' ) [ 0 ] , 'unitName' : unitName } ) paramElement = ETREE . Element ( 'cvParam' , ** cvAttrib ) elif len ( param ) == 4 : userAttrib = { 'name' : param [ 0 ] } if param [ 1 ] : userAttrib . update ( { 'value' : param [ 1 ] } ) else : userAttrib . update ( { 'value' : '' } ) if param [ 2 ] : userAttrib . update ( { 'unitAccession' : param [ 2 ] , 'unitCvRef' : param [ 2 ] . split ( ':' ) [ 0 ] } ) if param [ 3 ] : userAttrib . update ( { 'type' : param [ 3 ] } ) paramElement = ETREE . Element ( 'userParam' , ** userAttrib ) elif param [ 0 ] == 'ref' : refAttrib = { 'ref' : param [ 1 ] } paramElement = ETREE . Element ( 'referenceableParamGroupRef' , ** refAttrib ) parentelement . append ( paramElement )
Generates new mzML parameter xml elements and adds them to the parentelement as xml children elements .
55,630
def interpretBitEncoding ( bitEncoding ) : if bitEncoding == '64' : floattype = 'd' numpyType = numpy . float64 elif bitEncoding == '32' : floattype = 'f' numpyType = numpy . float32 else : errorText = '' . join ( [ 'bitEncoding \'' , bitEncoding , '\' not defined. ' , 'Must be \'64\' or \'32\'' ] ) raise TypeError ( errorText ) return ( floattype , numpyType )
Returns a floattype string and a numpy array type .
55,631
def calc_partition_function ( mass , omega_array , temperature_array ) : Kappa_t = mass * omega_array ** 2 return _np . sqrt ( 4 * _np . pi ** 2 * _scipy . constants . Boltzmann ** 2 * temperature_array ** 2 / ( mass * Kappa_t ) )
Calculates the partition function of your system at each point in time .
55,632
def calc_mean_and_variance_of_variances ( self , NumberOfOscillations ) : SplittedArraySize = int ( self . SampleFreq / self . FTrap . n ) * NumberOfOscillations VoltageArraySize = len ( self . voltage ) SnippetsVariances = _np . var ( self . voltage [ : VoltageArraySize - _np . mod ( VoltageArraySize , SplittedArraySize ) ] . reshape ( - 1 , SplittedArraySize ) , axis = 1 ) return _np . mean ( SnippetsVariances ) , _np . var ( SnippetsVariances )
Calculates the mean and variance of a set of varainces . This set is obtained by splitting the timetrace into chunks of points with a length of NumberOfOscillations oscillations .
55,633
def register_template_directory ( kb_app : kb , sphinx_app : Sphinx , sphinx_env : BuildEnvironment , docnames = List [ str ] , ) : template_bridge = sphinx_app . builder . templates actions = ResourceAction . get_callbacks ( kb_app ) for action in actions : f = os . path . dirname ( inspect . getfile ( action ) ) template_bridge . loaders . append ( SphinxFileSystemLoader ( f ) )
Add this resource s templates dir to template paths
55,634
def add_directives ( kb_app : kb , sphinx_app : Sphinx , sphinx_env : BuildEnvironment , docnames = List [ str ] , ) : for k , v in list ( kb_app . config . resources . items ( ) ) : sphinx_app . add_directive ( k , ResourceDirective )
For each resource type register a new Sphinx directive
55,635
def stamp_title ( kb_app : kb , sphinx_app : Sphinx , doctree : doctree ) : resources = sphinx_app . env . resources confdir = sphinx_app . confdir source = PurePath ( doctree . attributes [ 'source' ] ) docname = str ( source . relative_to ( confdir ) ) . split ( '.rst' ) [ 0 ] resource = resources . get ( docname ) if resource : title = get_rst_title ( doctree ) resource . title = title
Walk the tree and extra RST title into resource . title
55,636
def handle_exception ( error ) : response = jsonify ( error . to_dict ( ) ) response . status_code = error . status_code return response
Simple method for handling exceptions raised by PyBankID .
55,637
def create_from_pybankid_exception ( cls , exception ) : return cls ( "{0}: {1}" . format ( exception . __class__ . __name__ , str ( exception ) ) , _exception_class_to_status_code . get ( exception . __class__ ) , )
Class method for initiating from a PyBankID exception .
55,638
def to_dict ( self ) : rv = dict ( self . payload or ( ) ) rv [ "message" ] = self . message return rv
Create a dict representation of this exception .
55,639
def integers ( num , minimum , maximum , base = 10 ) : function = 'integers' num , minimum , maximum = list ( map ( int , [ num , minimum , maximum ] ) ) if ( 1 <= num <= 10 ** 4 ) is False : print ( 'ERROR: %s is out of range' % num ) return if ( - 10 ** 9 <= minimum <= 10 ** 9 ) is False : print ( 'ERROR: %s is out of range' % minimum ) return if ( - 10 ** 9 <= maximum <= 10 ** 9 ) is False : print ( 'ERROR: %s is out of range' % maximum ) return if maximum < minimum : print ( 'ERROR: %s is less than %s' % ( maximum , minimum ) ) return base = int ( base ) if base not in [ 2 , 8 , 10 , 16 ] : raise Exception ( 'Base not in range!' ) opts = { 'num' : num , 'min' : minimum , 'max' : maximum , 'col' : 1 , 'base' : base , 'format' : 'plain' , 'rnd' : 'new' } integers = get_http ( RANDOM_URL , function , opts ) integers_arr = str_to_arr ( integers ) return integers_arr
Random integers within specified interval .
55,640
def sequence ( minimum , maximum ) : function = 'sequences' opts = { 'min' : minimum , 'max' : maximum , 'col' : 1 , 'format' : 'plain' , 'rnd' : 'new' } deal = get_http ( RANDOM_URL , function , opts ) deal_arr = str_to_arr ( deal ) return deal_arr
Randomize a sequence of integers .
55,641
def string ( num , length , digits = False , upper = True , lower = True , unique = False ) : function = 'strings' digits = convert ( digits ) upper = convert ( upper ) lower = convert ( lower ) unique = convert ( unique ) opts = { 'num' : num , 'len' : length , 'digits' : digits , 'upperalpha' : upper , 'loweralpha' : lower , 'format' : 'plain' , 'rnd' : 'new' } seq = get_http ( RANDOM_URL , function , opts ) seq = seq . strip ( ) . split ( '\n' ) return seq
Random strings .
55,642
def quota ( ip = None ) : url = 'http://www.random.org/quota/?format=plain' data = urlopen ( url ) credit = int ( data . read ( ) . strip ( ) ) if data . code == 200 : return credit else : return "ERROR: Server responded with code %s" % data . code
Check your quota .
55,643
def get_http ( base_url , function , opts ) : url = ( os . path . join ( base_url , function ) + '/?' + urlencode ( opts ) ) data = urlopen ( url ) if data . code != 200 : raise ValueError ( "Random.rg returned server code: " + str ( data . code ) ) return data . read ( )
HTTP request generator .
55,644
def read ( * p ) : with open ( os . path . join ( * p ) , 'r' ) as fi : return fi . read ( )
Build a file path from paths and return the contents .
55,645
def execute ( self , processProtocol , command , env = { } , path = None , uid = None , gid = None , usePTY = 0 , childFDs = None ) : raise NotImplementedError ( )
Form a command and start a process in the desired environment .
55,646
def run ( self , command , env = { } , path = None , uid = None , gid = None , usePTY = 0 , childFDs = None ) : deferred = defer . Deferred ( ) processProtocol = _SummaryProcessProtocol ( deferred ) d = defer . maybeDeferred ( self . execute , processProtocol , command , env , path , uid , gid , usePTY , childFDs ) d . addErrback ( deferred . errback ) return deferred
Execute a command and return the results of the completed run .
55,647
def getOutput ( self , command , env = { } , path = None , uid = None , gid = None , usePTY = 0 , childFDs = None ) : deferred = defer . Deferred ( ) processProtocol = _SummaryProcessProtocol ( deferred ) self . execute ( processProtocol , command , env , path , uid , gid , usePTY , childFDs ) @ deferred . addCallback def getStdOut ( tuple_ ) : stdout , _stderr , _returnCode = tuple_ return stdout return deferred
Execute a command and get the output of the finished process .
55,648
def getExitCode ( self , command , env = { } , path = None , uid = None , gid = None , usePTY = 0 , childFDs = None ) : deferred = defer . Deferred ( ) processProtocol = _SummaryProcessProtocol ( deferred ) self . execute ( processProtocol , command , env , path , uid , gid , usePTY , childFDs ) @ deferred . addCallback def getStdOut ( tuple_ ) : _stdout , _stderr , exitCode = tuple_ return exitCode return deferred
Execute a command and get the return code of the finished process .
55,649
def validate_task ( original_task ) : task = original_task . _asdict ( ) if 'inputs' not in task or task [ 'inputs' ] is None : task [ 'inputs' ] = [ '*' ] if ( 'outputs' not in task or task [ 'outputs' ] is None or len ( task [ 'outputs' ] ) == 0 ) : task [ 'outputs' ] = [ '*' ] if not hasattr ( task [ 'inputs' ] , '__iter__' ) or isinstance ( task [ 'inputs' ] , str ) : task [ 'inputs' ] = ( task [ 'inputs' ] , ) else : task [ 'inputs' ] = tuple ( task [ 'inputs' ] ) if not hasattr ( task [ 'outputs' ] , '__iter__' ) or isinstance ( task [ 'outputs' ] , str ) : task [ 'outputs' ] = ( task [ 'outputs' ] , ) else : task [ 'outputs' ] = tuple ( task [ 'outputs' ] ) if not callable ( task [ 'fn' ] ) : raise TypeError ( 'Task function must be a callable object' ) if ( len ( task [ 'outputs' ] ) > 1 and not inspect . isgeneratorfunction ( task [ 'fn' ] ) ) : raise TypeError ( 'Multiple outputs are only supported with \ generator functions' ) if inspect . isgeneratorfunction ( task [ 'fn' ] ) : if task [ 'outputs' ] [ 0 ] == '*' : raise TypeError ( 'Generator functions cannot be used for tasks with \ output specification "*"' ) return Task ( ** task )
Validates task and adds default values for missing options using the following steps .
55,650
def run_task ( task , workspace ) : data = copy . copy ( workspace ) task = validate_task ( task ) inputs = [ input_parser ( key , data ) for key in task . inputs ] if inspect . isgeneratorfunction ( task . fn ) : data . update ( zip ( task . outputs , task . fn ( * inputs ) ) ) else : results = task . fn ( * inputs ) if task . outputs [ 0 ] != '*' : results = { task . outputs [ 0 ] : results } elif not isinstance ( results , dict ) : raise TypeError ( 'Result should be a dict for output type *' ) data . update ( results ) return data
Runs the task and updates the workspace with results .
55,651
def run_hook ( name , workspace , hooks ) : data = copy . copy ( workspace ) for hook_listener in hooks . get ( name , [ ] ) : hook_listener ( data ) return data
Runs all hooks added under the give name .
55,652
def add_task ( self , fn , inputs = None , outputs = None ) : self . tasks . append ( Task ( fn , inputs , outputs ) ) return self
Adds a task to the workflow .
55,653
def add_hook ( self , name , function ) : if not callable ( function ) : return ValueError ( 'Hook function should be callable' ) if name not in self . hooks : self . hooks [ name ] = [ ] self . hooks [ name ] . append ( function ) return self
Adds a function to be called for hook of a given name .
55,654
def dns ( self ) : dns = { 'elb' : self . dns_elb ( ) , 'elb_region' : self . dns_elb_region ( ) , 'global' : self . dns_global ( ) , 'region' : self . dns_region ( ) , 'instance' : self . dns_instance ( ) , } return dns
DNS details .
55,655
def s3_app_bucket ( self , include_region = False ) : if include_region : s3_app_bucket = self . format [ 's3_app_region_bucket' ] . format ( ** self . data ) else : s3_app_bucket = self . format [ 's3_app_bucket' ] . format ( ** self . data ) return s3_app_bucket
Generate s3 application bucket name .
55,656
def shared_s3_app_bucket ( self , include_region = False ) : if include_region : shared_s3_app_bucket = self . format [ 'shared_s3_app_region_bucket' ] . format ( ** self . data ) else : shared_s3_app_bucket = self . format [ 'shared_s3_app_bucket' ] . format ( ** self . data ) return shared_s3_app_bucket
Generate shared s3 application bucket name .
55,657
def iam ( self ) : iam = { 'group' : self . format [ 'iam_group' ] . format ( ** self . data ) , 'lambda_role' : self . format [ 'iam_lambda_role' ] . format ( ** self . data ) , 'policy' : self . format [ 'iam_policy' ] . format ( ** self . data ) , 'profile' : self . format [ 'iam_profile' ] . format ( ** self . data ) , 'role' : self . format [ 'iam_role' ] . format ( ** self . data ) , 'user' : self . format [ 'iam_user' ] . format ( ** self . data ) , 'base' : self . format [ 'iam_base' ] . format ( ** self . data ) , } return iam
Generate iam details .
55,658
def archaius ( self ) : bucket = self . format [ 's3_bucket' ] . format ( ** self . data ) path = self . format [ 's3_bucket_path' ] . format ( ** self . data ) archaius_name = self . format [ 's3_archaius_name' ] . format ( ** self . data ) archaius = { 's3' : archaius_name , 'bucket' : bucket , 'path' : path } return archaius
Generate archaius bucket path .
55,659
def jenkins ( self ) : job_name = self . format [ 'jenkins_job_name' ] . format ( ** self . data ) job = { 'name' : job_name } return job
Generate jenkins job details .
55,660
def gitlab ( self ) : main_name = self . format [ 'git_repo' ] . format ( ** self . data ) qe_name = self . format [ 'git_repo_qe' ] . format ( ** self . data ) config_name = self . format [ 'git_repo_configs' ] . format ( ** self . data ) git = { 'config' : config_name , 'main' : main_name , 'qe' : qe_name , } return git
Generate gitlab details .
55,661
def get_value_matched_by_regex ( field_name , regex_matches , string ) : try : value = regex_matches . group ( field_name ) if value is not None : return value except IndexError : pass raise MissingFieldError ( string , field_name )
Ensure value stored in regex group exists .
55,662
def positive_int ( val ) : if isinstance ( val , float ) : raise ValueError ( '"{}" must not be a float' . format ( val ) ) val = int ( val ) if val >= 0 : return val raise ValueError ( '"{}" must be positive' . format ( val ) )
Parse val into a positive integer .
55,663
def strictly_positive_int_or_none ( val ) : val = positive_int_or_none ( val ) if val is None or val > 0 : return val raise ValueError ( '"{}" must be strictly positive' . format ( val ) )
Parse val into either None or a strictly positive integer .
55,664
def _attributeLinesToDict ( attributeLines ) : attributes = dict ( ) for line in attributeLines : attributeId , attributeValue = line . split ( ':' , 1 ) attributes [ attributeId . strip ( ) ] = attributeValue . strip ( ) return attributes
Converts a list of obo Term lines to a dictionary .
55,665
def _termIsObsolete ( oboTerm ) : isObsolete = False if u'is_obsolete' in oboTerm : if oboTerm [ u'is_obsolete' ] . lower ( ) == u'true' : isObsolete = True return isObsolete
Determine wheter an obo Term entry is marked as obsolete .
55,666
def discover_handler_classes ( handlers_package ) : if handlers_package is None : return sys . path . insert ( 0 , os . getcwd ( ) ) package = import_module ( handlers_package ) if hasattr ( package , '__path__' ) : for _ , modname , _ in pkgutil . iter_modules ( package . __path__ ) : import_module ( '{package}.{module}' . format ( package = package . __name__ , module = modname ) ) return registered_handlers
Looks for handler classes within handler path module .
55,667
def get_multi_word_keywords ( features ) : keys = { 'is not' : Token ( TokenTypes . NOT_EQUAL , 'is not' ) , } return OrderedDict ( sorted ( list ( keys . items ( ) ) , key = lambda t : len ( t [ 0 ] ) , reverse = True ) )
This returns an OrderedDict containing the multi word keywords in order of length . This is so the tokenizer will match the longer matches before the shorter matches
55,668
def inside_try ( func , options = { } ) : if six . PY2 : name = func . func_name else : name = func . __name__ @ wraps ( func ) def silenceit ( * args , ** kwargs ) : excpt = None try : return func ( * args , ** kwargs ) except Exception as excpt : if 'ctx' in kwargs : ctx = kwargs [ 'ctx' ] else : ctx = get_try_option ( None , 'ctx' ) if not ctx : ctx = Bubble ( 'Inside Try' ) head = name + ': silenced function inside_try:Error:' if get_try_option ( ctx , 'count_it' ) : ctx . gbc . cry ( head + 'counting' ) if get_try_option ( ctx , 'print_it' ) : ctx . gbc . cry ( head + 'printing:' + str ( excpt ) ) if get_try_option ( ctx , 'print_args' ) : ctx . gbc . cry ( head + 'printing ak:' + str ( excpt ) ) ctx . gbc . cry ( 'args' , stuff = args ) ctx . gbc . cry ( 'kwargs' , stuff = kwargs ) if get_try_option ( ctx , 'inspect_it' ) : ctx . gbc . cry ( head + 'inspecting:' , stuff = excpt ) for s in inspect . stack ( ) : ctx . gbc . cry ( head + ':stack:' , stuff = s ) if get_try_option ( ctx , 'log_it' ) : ctx . gbc . cry ( head + 'logging' ) for s in inspect . stack ( ) : ctx . gbc . cry ( head + ':stack:' , stuff = s ) if get_try_option ( ctx , 'reraise_it' ) : ctx . gbc . cry ( head + 'reraising' ) raise excpt return { 'error' : str ( excpt ) , 'silenced' : name , 'args' : args , 'kwargs' : kwargs } return silenceit
decorator to silence exceptions for logging we want a safe fail of the functions
55,669
def start ( self ) : Server ( ) . start ( self . options , self . handler_function , self . __class__ . component_type )
Start the server and run forever .
55,670
def sort_entries ( self ) : return sorted ( self . data , key = self . sort_func , reverse = self . get_reverse ( ) )
Get whether reverse is True or False . Return the sorted data .
55,671
def visible_fields ( self ) : form_visible_fields = self . form . visible_fields ( ) if self . render_fields : fields = self . render_fields else : fields = [ field . name for field in form_visible_fields ] filtered_fields = [ field for field in fields if field not in self . exclude_fields ] return [ field for field in form_visible_fields if field . name in filtered_fields ]
Returns the reduced set of visible fields to output from the form .
55,672
def get_fieldsets ( self , fieldsets = None ) : fieldsets = fieldsets or self . fieldsets if not fieldsets : raise StopIteration has_primary = any ( fieldset . get ( 'primary' ) for fieldset in fieldsets ) for fieldset_kwargs in fieldsets : fieldset_kwargs = copy . deepcopy ( fieldset_kwargs ) fieldset_kwargs [ 'form' ] = self if not has_primary : fieldset_kwargs [ 'primary' ] = True has_primary = True yield self . get_fieldset ( ** fieldset_kwargs )
This method returns a generator which yields fieldset instances .
55,673
def get_chunks ( Array , Chunksize ) : for i in range ( 0 , len ( Array ) , Chunksize ) : yield Array [ i : i + Chunksize ]
Generator that yields chunks of size ChunkSize
55,674
def read_data_from_bin_file ( fileName ) : with open ( fileName , mode = 'rb' ) as file : fileContent = file . read ( ) ( ChannelData , LenOf1Channel , NumOfChannels , SampleTime ) = read_data_from_bytes ( fileContent ) return ChannelData , LenOf1Channel , NumOfChannels , SampleTime
Loads the binary data stored in the a binary file and extracts the data for each channel that was saved along with the sample rate and length of the data array .
55,675
def read_data_from_bytes ( fileContent ) : TotalDataLen = struct . unpack ( 'Q' , fileContent [ : 8 ] ) [ 0 ] NumOfChannels = struct . unpack ( 'I' , fileContent [ 8 : 12 ] ) [ 0 ] SampleTime = struct . unpack ( 'd' , fileContent [ 12 : 20 ] ) [ 0 ] AllChannelData = struct . unpack ( "f" * ( ( len ( fileContent ) - 20 ) // 4 ) , fileContent [ 20 : ] ) LenOf1Channel = int ( TotalDataLen / NumOfChannels ) ChannelData = list ( get_chunks ( AllChannelData , LenOf1Channel ) ) return ChannelData , LenOf1Channel , NumOfChannels , SampleTime
Takes the binary data stored in the binary string provided and extracts the data for each channel that was saved along with the sample rate and length of the data array .
55,676
def get_coord_box ( centre_x , centre_y , distance ) : return { 'top_left' : ( centre_x - distance , centre_y + distance ) , 'top_right' : ( centre_x + distance , centre_y + distance ) , 'bottom_left' : ( centre_x - distance , centre_y - distance ) , 'bottom_right' : ( centre_x + distance , centre_y - distance ) , }
Get the square boundary coordinates for a given centre and distance
55,677
def fleet_ttb ( unit_type , quantity , factories , is_techno = False , is_dict = False , stasis_enabled = False ) : unit_weights = { UNIT_SCOUT : 1 , UNIT_DESTROYER : 13 , UNIT_BOMBER : 10 , UNIT_CRUISER : 85 , UNIT_STARBASE : 1 , } govt_weight = 80 if is_dict else 100 prod_weight = 85 if is_techno else 100 weighted_qty = unit_weights [ unit_type ] * quantity ttb = ( weighted_qty * govt_weight * prod_weight ) * ( 2 * factories ) return ttb + ( ttb * 0.66 ) if stasis_enabled else ttb
Calculate the time taken to construct a given fleet
55,678
def parse_fasta ( data ) : name , seq = None , [ ] for line in data : line = line . rstrip ( ) if line . startswith ( '>' ) : if name : yield Sequence ( name , '' . join ( seq ) ) name , seq = line , [ ] else : seq . append ( line ) if name : yield Sequence ( name , '' . join ( seq ) )
Load sequences in Fasta format .
55,679
def _reset ( self ) : self . records = list ( ) self . featsbyid = dict ( ) self . featsbyparent = dict ( ) self . countsbytype = dict ( )
Clear internal data structure .
55,680
def get_by_label ( self , label ) : return next ( ( x for x in self if x . label == label ) , None )
Return the first item with a specific label or None .
55,681
def getGenericAnswers ( self , name , instruction , prompts ) : responses = [ ] for prompt , _echo in prompts : password = self . getPassword ( prompt ) responses . append ( password ) return defer . succeed ( responses )
Called when the server requests keyboard interactive authentication
55,682
def pairwise ( iterable ) : iterator = iter ( iterable ) try : first = next ( iterator ) except StopIteration : return for element in iterator : yield first , element first = element
Generate consecutive pairs of elements from the given iterable .
55,683
def https_policy_from_config ( config ) : server = config . cluster [ "server" ] base_url = URL . fromText ( native_string_to_unicode ( server ) ) ca_certs = pem . parse ( config . cluster [ "certificate-authority" ] . bytes ( ) ) if not ca_certs : raise ValueError ( "No certificate authority certificate found." ) ca_cert = ca_certs [ 0 ] try : ssl . Certificate . load ( ca_cert . as_bytes ( ) , FILETYPE_PEM ) except OpenSSLError as e : raise ValueError ( "Invalid certificate authority certificate found." , str ( e ) , ) netloc = NetLocation ( host = base_url . host , port = base_url . port ) policy = ClientCertificatePolicyForHTTPS ( credentials = { } , trust_roots = { netloc : ca_cert , } , ) return policy
Create an IPolicyForHTTPS which can authenticate a Kubernetes API server .
55,684
def authenticate_with_certificate_chain ( reactor , base_url , client_chain , client_key , ca_cert ) : if base_url . scheme != u"https" : raise ValueError ( "authenticate_with_certificate() makes sense for HTTPS, not {!r}" . format ( base_url . scheme ) , ) netloc = NetLocation ( host = base_url . host , port = base_url . port ) policy = ClientCertificatePolicyForHTTPS ( credentials = { netloc : TLSCredentials ( chain = Chain ( certificates = Certificates ( client_chain ) ) , key = client_key , ) , } , trust_roots = { netloc : ca_cert , } , ) return Agent ( reactor , contextFactory = policy )
Create an IAgent which can issue authenticated requests to a particular Kubernetes server using a client certificate .
55,685
def authenticate_with_certificate ( reactor , base_url , client_cert , client_key , ca_cert ) : return authenticate_with_certificate_chain ( reactor , base_url , [ client_cert ] , client_key , ca_cert , )
See authenticate_with_certificate_chain .
55,686
def authenticate_with_serviceaccount ( reactor , ** kw ) : config = KubeConfig . from_service_account ( ** kw ) policy = https_policy_from_config ( config ) token = config . user [ "token" ] agent = HeaderInjectingAgent ( _to_inject = Headers ( { u"authorization" : [ u"Bearer {}" . format ( token ) ] } ) , _agent = Agent ( reactor , contextFactory = policy ) , ) return agent
Create an IAgent which can issue authenticated requests to a particular Kubernetes server using a service account token .
55,687
def first_time_setup ( self ) : if not self . _auto_unlock_key_position ( ) : pw = password . create_passwords ( ) [ 0 ] attrs = { 'application' : self . keyring } gkr . item_create_sync ( self . default_keyring , gkr . ITEM_GENERIC_SECRET , self . keyring , attrs , pw , True ) found_pos = self . _auto_unlock_key_position ( ) item_info = gkr . item_get_info_sync ( self . default_keyring , found_pos ) gkr . create_sync ( self . keyring , item_info . get_secret ( ) )
First time running Open Sesame? Create keyring and an auto - unlock key in default keyring . Make sure these things don t already exist .
55,688
def _auto_unlock_key_position ( self ) : found_pos = None default_keyring_ids = gkr . list_item_ids_sync ( self . default_keyring ) for pos in default_keyring_ids : item_attrs = gkr . item_get_attributes_sync ( self . default_keyring , pos ) app = 'application' if item_attrs . has_key ( app ) and item_attrs [ app ] == "opensesame" : found_pos = pos break return found_pos
Find the open sesame password in the default keyring
55,689
def get_position_searchable ( self ) : ids = gkr . list_item_ids_sync ( self . keyring ) position_searchable = { } for i in ids : item_attrs = gkr . item_get_attributes_sync ( self . keyring , i ) position_searchable [ i ] = item_attrs [ 'searchable' ] return position_searchable
Return dict of the position and corrasponding searchable str
55,690
def _match_exists ( self , searchable ) : position_searchable = self . get_position_searchable ( ) for pos , val in position_searchable . iteritems ( ) : if val == searchable : return pos return False
Make sure the searchable description doesn t already exist
55,691
def save_password ( self , password , ** attrs ) : pos_of_match = self . _match_exists ( attrs [ 'searchable' ] ) if pos_of_match : old_password = self . get_password ( pos_of_match ) . get_secret ( ) gkr . item_delete_sync ( self . keyring , pos_of_match ) desc = str ( int ( time . time ( ) ) ) + "_" + attrs [ 'searchable' ] gkr . item_create_sync ( self . keyring , gkr . ITEM_GENERIC_SECRET , desc , { } , old_password , True ) desc = attrs [ 'searchable' ] pos = gkr . item_create_sync ( self . keyring , gkr . ITEM_GENERIC_SECRET , desc , attrs , password , True ) return pos
Save the new password save the old password with the date prepended
55,692
def get_descriptor_for_idcode ( idcode ) : idcode = idcode & 0x0fffffff id_str = "XXXX" + bin ( idcode ) [ 2 : ] . zfill ( 28 ) descr_file_path = _check_cache_for_idcode ( id_str ) if descr_file_path : with open ( descr_file_path , 'r' ) as f : dat = json . load ( f ) if dat . get ( "_file_version" , - 1 ) == JTAGDeviceDescription . version : return JTAGDeviceDescription ( dat . get ( 'idcode' ) , dat . get ( 'name' ) , dat . get ( 'ir_length' ) , dat . get ( 'instruction_opcodes' ) , dat . get ( 'registers' ) , dat . get ( 'instruction_register_map' ) ) print ( " Device detected (" + id_str + "). Fetching missing descriptor..." ) sid = get_sid ( id_str ) details = get_details ( sid ) attribs = decode_bsdl ( sid ) instruction_length = 0 if attribs . get ( 'INSTRUCTION_LENGTH' ) == details . get ( 'INSTRUCTION_LENGTH' ) : instruction_length = attribs . get ( 'INSTRUCTION_LENGTH' ) elif attribs . get ( 'INSTRUCTION_LENGTH' ) and details . get ( 'INSTRUCTION_LENGTH' ) : raise Exception ( "INSTRUCTION_LENGTH can not be determined" ) elif attribs . get ( 'INSTRUCTION_LENGTH' ) : instruction_length = attribs . get ( 'INSTRUCTION_LENGTH' ) else : instruction_length = details . get ( 'INSTRUCTION_LENGTH' ) for instruction_name in details . get ( 'instructions' ) : if instruction_name not in attribs . get ( 'INSTRUCTION_OPCODE' , [ ] ) : raise Exception ( "INSTRUCTION_OPCODE sources do not match" ) descr = JTAGDeviceDescription ( attribs [ 'IDCODE_REGISTER' ] . upper ( ) , details [ 'name' ] , instruction_length , attribs [ 'INSTRUCTION_OPCODE' ] , attribs [ 'REGISTERS' ] , attribs [ 'INSTRUCTION_TO_REGISTER' ] ) if not os . path . isdir ( base_descr_dir ) : os . makedirs ( base_descr_dir ) descr_file_path = os . path . join ( base_descr_dir , attribs [ 'IDCODE_REGISTER' ] . upper ( ) + '.json' ) with open ( descr_file_path , 'w' ) as f : json . dump ( descr . _dump ( ) , f ) return descr
Use this method to find bsdl descriptions for devices . The caching on this method drastically lower the execution time when there are a lot of bsdl files and more than one device . May move it into a metaclass to make it more transparent .
55,693
def _fetch_dimensions ( self , dataset ) : yield Dimension ( u"school" ) yield Dimension ( u"year" , datatype = "year" ) yield Dimension ( u"semester" , datatype = "academic_term" , dialect = "swedish" ) yield Dimension ( u"municipality" , datatype = "year" , domain = "sweden/municipalities" )
Iterate through semesters counties and municipalities .
55,694
def _merge_configs ( configs ) : result = { u"contexts" : [ ] , u"users" : [ ] , u"clusters" : [ ] , u"current-context" : None , } for config in configs : for k in { u"contexts" , u"users" , u"clusters" } : try : values = config . doc [ k ] except KeyError : pass else : result [ k ] . extend ( values ) if result [ u"current-context" ] is None : try : result [ u"current-context" ] = config . doc [ u"current-context" ] except KeyError : pass return KubeConfig ( result )
Merge one or more KubeConfig objects .
55,695
def _merge_configs_from_env ( kubeconfigs ) : paths = list ( FilePath ( p ) for p in kubeconfigs . split ( pathsep ) if p ) config = _merge_configs ( list ( KubeConfig . from_file ( p . path ) for p in paths ) ) return config
Merge configuration files from a KUBECONFIG environment variable .
55,696
def network_kubernetes_from_context ( reactor , context = None , path = None , environ = None , default_config_path = FilePath ( expanduser ( u"~/.kube/config" ) ) , ) : if path is None : if environ is None : from os import environ try : kubeconfigs = environ [ u"KUBECONFIG" ] except KeyError : config = KubeConfig . from_file ( default_config_path . path ) else : config = _merge_configs_from_env ( kubeconfigs ) else : config = KubeConfig . from_file ( path . path ) if context is None : context = config . doc [ u"current-context" ] context = config . contexts [ context ] cluster = config . clusters [ context [ u"cluster" ] ] user = config . users [ context [ u"user" ] ] if isinstance ( cluster [ u"server" ] , bytes ) : base_url = URL . fromText ( cluster [ u"server" ] . decode ( "ascii" ) ) else : base_url = URL . fromText ( cluster [ u"server" ] ) [ ca_cert ] = parse ( cluster [ u"certificate-authority" ] . bytes ( ) ) client_chain = parse ( user [ u"client-certificate" ] . bytes ( ) ) [ client_key ] = parse ( user [ u"client-key" ] . bytes ( ) ) agent = authenticate_with_certificate_chain ( reactor , base_url , client_chain , client_key , ca_cert , ) return network_kubernetes ( base_url = base_url , agent = agent , )
Create a new IKubernetes provider based on a kube config file .
55,697
def collection_location ( obj ) : kind = obj . kind apiVersion = obj . apiVersion prefix = version_to_segments [ apiVersion ] collection = kind . lower ( ) + u"s" if IObject . providedBy ( obj ) : namespace = obj . metadata . namespace else : namespace = None if namespace is None : return prefix + ( collection , ) return prefix + ( u"namespaces" , namespace , collection )
Get the URL for the collection of objects like obj .
55,698
async def execute ( ctx ) : tail_depth = len ( ctx . stack ( ) ) - 1 story_part = ctx . get_current_story_part ( ) logger . debug ( '# going to call: {}' . format ( story_part . __name__ ) ) waiting_for = story_part ( ctx . message ) if inspect . iscoroutinefunction ( story_part ) : waiting_for = await waiting_for logger . debug ( '# got result {}' . format ( waiting_for ) ) if isinstance ( waiting_for , story_context . StoryContext ) : ctx = waiting_for . clone ( ) ctx . waiting_for = callable . WaitForReturn ( ) else : ctx = ctx . clone ( ) ctx . waiting_for = waiting_for tail_data = ctx . message [ 'session' ] [ 'stack' ] [ tail_depth ] [ 'data' ] tail_step = ctx . message [ 'session' ] [ 'stack' ] [ tail_depth ] [ 'step' ] if ctx . is_waiting_for_input ( ) : if isinstance ( ctx . waiting_for , callable . EndOfStory ) : if isinstance ( ctx . waiting_for . data , dict ) : new_data = { ** ctx . get_user_data ( ) , ** ctx . waiting_for . data } else : new_data = ctx . waiting_for . data ctx . message = { ** ctx . message , 'session' : { ** ctx . message [ 'session' ] , 'data' : new_data , } , } tail_step += 1 elif isinstance ( ctx . waiting_for , loop . ScopeMatcher ) : tail_data = matchers . serialize ( ctx . waiting_for ) elif isinstance ( ctx . waiting_for , loop . BreakLoop ) : tail_step += 1 else : tail_data = matchers . serialize ( matchers . get_validator ( ctx . waiting_for ) ) tail_step += 1 ctx . message = modify_stack_in_message ( ctx . message , lambda stack : stack [ : tail_depth ] + [ { 'data' : tail_data , 'step' : tail_step , 'topic' : stack [ tail_depth ] [ 'topic' ] , } ] + stack [ tail_depth + 1 : ] ) logger . debug ( '# mutated ctx after execute' ) logger . debug ( ctx ) return ctx
execute story part at the current context and make one step further
55,699
def iterate_storyline ( ctx ) : logger . debug ( '# start iterate' ) compiled_story = ctx . compiled_story ( ) if not compiled_story : return for step in range ( ctx . current_step ( ) , len ( compiled_story . story_line ) ) : ctx = ctx . clone ( ) tail = ctx . stack_tail ( ) ctx . message = modify_stack_in_message ( ctx . message , lambda stack : stack [ : - 1 ] + [ { 'data' : tail [ 'data' ] , 'step' : step , 'topic' : tail [ 'topic' ] , } ] ) logger . debug ( '# [{}] iterate' . format ( step ) ) logger . debug ( ctx ) ctx = yield ctx
iterate the last storyline from the last visited story part