idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
56,300
def register ( self , name , namespace ) : if name in self . _NAMESPACES : raise ValueError ( "Namespace {0} already exists." . format ( name ) ) if not isinstance ( namespace , ns . Namespace ) : raise TypeError ( "Namespaces must be of type Namespace." ) self . _NAMESPACES [ name ] = namespace
Register a new namespace with the Configuration object .
56,301
def napi_compare ( left , ops , comparators , ** kwargs ) : values = [ ] for op , right in zip ( ops , comparators ) : value = COMPARE [ op ] ( left , right ) values . append ( value ) left = right result = napi_and ( values , ** kwargs ) if isinstance ( result , ndarray ) : return result else : return bool ( result )
Make pairwise comparisons of comparators .
56,302
def calc_transition_to_state ( self , newstate ) : cached_val = JTAGStateMachine . _lookup_cache . get ( ( self . state , newstate ) ) if cached_val : return cached_val if newstate not in self . states : raise ValueError ( "%s is not a valid state for this state " "machine" % newstate ) path = self . _find_shortest_path ( self . _statestr , newstate ) if not path : raise ValueError ( "No path to the requested state." ) res = self . _get_steps_from_nodes_path ( path ) res . reverse ( ) JTAGStateMachine . _lookup_cache [ ( self . state , newstate ) ] = res return res
Given a target state generate the sequence of transitions that would move this state machine instance to that target state .
56,303
def setup ( ) : global _displayhooks , _excepthooks if _displayhooks is not None : return _displayhooks = [ ] _excepthooks = [ ] if sys . displayhook != sys . __displayhook__ : _displayhooks . append ( weakref . ref ( sys . displayhook ) ) if sys . excepthook != sys . __excepthook__ : _excepthooks . append ( weakref . ref ( sys . excepthook ) ) sys . displayhook = displayhook sys . excepthook = excepthook
Initializes the hook queues for the sys module . This method will automatically be called on the first registration for a hook to the system by either the registerDisplay or registerExcept functions .
56,304
def _parse_iso8601 ( text ) : if isinstance ( text , unicode ) : try : return parse_iso8601 ( text ) except ValueError : raise CheckedValueTypeError ( None , ( datetime , ) , unicode , text , ) return text
Maybe parse an ISO8601 datetime string into a datetime .
56,305
def from_path ( cls , spec_path ) : with spec_path . open ( ) as spec_file : return cls . from_document ( load ( spec_file ) )
Load a specification from a path .
56,306
def to_document ( self ) : return dict ( info = thaw ( self . info ) , paths = thaw ( self . paths ) , definitions = thaw ( self . definitions ) , securityDefinitions = thaw ( self . securityDefinitions ) , security = thaw ( self . security ) , swagger = thaw ( self . swagger ) , )
Serialize this specification to a JSON - compatible object representing a Swagger specification .
56,307
def pclass_for_definition ( self , name ) : while True : try : cls = self . _pclasses [ name ] except KeyError : try : original_definition = self . definitions [ name ] except KeyError : raise NoSuchDefinition ( name ) if "$ref" in original_definition : name = original_definition [ u"$ref" ] assert name . startswith ( u"#/definitions/" ) name = name [ len ( u"#/definitions/" ) : ] continue definition = self . transform_definition ( name , original_definition ) kind = self . _identify_kind ( definition ) if kind is None : raise NotClassLike ( name , definition ) generator = getattr ( self , "_model_for_{}" . format ( kind ) ) model = generator ( name , definition ) bases = tuple ( self . _behaviors . get ( name , [ ] ) ) cls = model . pclass ( bases ) self . _pclasses [ name ] = cls return cls
Get a pyrsistent . PClass subclass representing the Swagger definition in this specification which corresponds to the given name .
56,308
def _model_for_CLASS ( self , name , definition ) : return _ClassModel . from_swagger ( self . pclass_for_definition , name , definition , )
Model a Swagger definition that is like a Python class .
56,309
def from_swagger ( cls , pclass_for_definition , name , definition ) : return cls ( name = name , doc = definition . get ( u"description" , name ) , attributes = cls . _attributes_for_definition ( pclass_for_definition , definition , ) , )
Create a new _ClassModel from a single Swagger definition .
56,310
def pclass ( self , bases ) : def discard_constant_fields ( cls , ** kwargs ) : def ctor ( ) : return super ( huh , cls ) . __new__ ( cls , ** kwargs ) try : return ctor ( ) except AttributeError : if u"kind" in kwargs or u"apiVersion" in kwargs : kwargs . pop ( "kind" , None ) kwargs . pop ( "apiVersion" , None ) return ctor ( ) raise def lt_pclass ( self , other ) : if isinstance ( other , self . __class__ ) : return sorted ( self . serialize ( ) . items ( ) ) < sorted ( other . serialize ( ) . items ( ) ) return NotImplemented def eq_pclass ( self , other ) : if isinstance ( other , self . __class__ ) : return sorted ( self . serialize ( ) . items ( ) ) == sorted ( other . serialize ( ) . items ( ) ) return NotImplemented content = { attr . name : attr . pclass_field_for_attribute ( ) for attr in self . attributes } content [ "__doc__" ] = nativeString ( self . doc ) content [ "serialize" ] = _serialize_with_omit content [ "__new__" ] = discard_constant_fields content [ "__lt__" ] = lt_pclass content [ "__eq__" ] = eq_pclass content [ "__hash__" ] = PClass . __hash__ content = total_ordering ( content ) huh = type ( nativeString ( self . name ) , bases + ( PClass , ) , content ) return huh
Create a pyrsistent . PClass subclass representing this class .
56,311
def dumps_bytes ( obj ) : b = dumps ( obj ) if isinstance ( b , unicode ) : b = b . encode ( "ascii" ) return b
Serialize obj to JSON formatted bytes .
56,312
def native_string_to_bytes ( s , encoding = "ascii" , errors = "strict" ) : if not isinstance ( s , str ) : raise TypeError ( "{} must be type str, not {}" . format ( s , type ( s ) ) ) if str is bytes : return s else : return s . encode ( encoding = encoding , errors = errors )
Ensure that the native string s is converted to bytes .
56,313
def native_string_to_unicode ( s , encoding = "ascii" , errors = "strict" ) : if not isinstance ( s , str ) : raise TypeError ( "{} must be type str, not {}" . format ( s , type ( s ) ) ) if str is unicode : return s else : return s . decode ( encoding = encoding , errors = errors )
Ensure that the native string s is converted to unicode .
56,314
def datetime_handler ( x ) : if isinstance ( x , datetime . datetime ) or isinstance ( x , datetime . date ) : return x . isoformat ( ) raise TypeError ( "Unknown type" )
Allow serializing datetime objects to JSON
56,315
def parsed ( self ) : if not self . _parsed : self . _parsed = compile ( self . content , self . path , 'exec' ) return self . _parsed
Get the code object which represents the compiled Python file .
56,316
def _chunk ( iterable , size ) : args = ( iter ( iterable ) , ) * size return ( itertools . takewhile ( lambda x : x is not None , group ) for group in itertools . zip_longest ( * args ) )
Split an iterable into chunks of a fixed size .
56,317
def _matrix_add_column ( matrix , column , default = 0 ) : height_difference = len ( column ) - len ( matrix ) width = max ( len ( row ) for row in matrix ) if matrix else 0 offset = 0 if height_difference > 0 : for _ in range ( height_difference ) : matrix . insert ( 0 , [ default ] * width ) if height_difference < 0 : offset = - height_difference for index , value in enumerate ( column ) : row_index = index + offset row = matrix [ row_index ] width_difference = width - len ( row ) row . extend ( [ default ] * width_difference ) row . append ( value )
Given a matrix as a list of lists add a column to the right filling in with a default value if necessary .
56,318
def vertical_graph ( * args , sep = '\n' ) : r lines = [ ] if len ( args ) == 1 : bars = args [ 0 ] else : bars = args if sep is None : sep = '\n' for bar_group in _chunk ( bars , 4 ) : line = [ ] for braille_row , bar_value in enumerate ( bar_group ) : full_blocks_needed = bar_value // 2 blocks_needed = full_blocks_needed + ( bar_value % 2 ) extra_blocks_needed = blocks_needed - len ( line ) if extra_blocks_needed > 0 : line . extend ( [ _BRAILLE_EMPTY_BLOCK ] * extra_blocks_needed ) for block_index in range ( full_blocks_needed ) : line [ block_index ] += _BRAILLE_FULL_ROW [ braille_row ] if bar_value % 2 : line [ full_blocks_needed ] += _BRAILLE_HALF_ROW [ braille_row ] lines . append ( '' . join ( chr ( code ) for code in line ) ) return sep . join ( lines )
r Consume an iterable of integers and produce a vertical bar graph using braille characters .
56,319
def horizontal_graph ( * args ) : r lines = [ ] if len ( args ) == 1 : bars = args [ 0 ] else : bars = args for bar_group in _chunk ( bars , 2 ) : column = [ ] for braille_col , bar_value in enumerate ( bar_group ) : full_blocks_needed = bar_value // 4 blocks_needed = full_blocks_needed + ( 1 if bar_value % 4 else 0 ) extra_blocks_needed = blocks_needed - len ( column ) column = ( [ _BRAILLE_EMPTY_BLOCK ] * extra_blocks_needed ) + column for block_index in range ( - full_blocks_needed , 0 , 1 ) : column [ block_index ] += _BRAILLE_FULL_COL [ braille_col ] if bar_value % 4 : partial_index = ( bar_value % 4 ) - 1 column [ - blocks_needed ] += ( _BRAILLE_PARTIAL_COL [ braille_col ] [ partial_index ] ) _matrix_add_column ( lines , column , default = _BRAILLE_EMPTY_BLOCK ) return '\n' . join ( '' . join ( chr ( code ) for code in line ) for line in lines )
r Consume an iterable of integers and produce a horizontal bar graph using braille characters .
56,320
def generate_example ( config , ext = 'json' ) : template_name = 'example.{0}' . format ( ext . lower ( ) ) template = ENV . get_template ( template_name ) return template . render ( config = config )
Generate an example file based on the given Configuration object .
56,321
def _removeHeaderTag ( header , tag ) : if header . startswith ( tag ) : tagPresent = True header = header [ len ( tag ) : ] else : tagPresent = False return header , tagPresent
Removes a tag from the beginning of a header string .
56,322
def _idFromHeaderInfo ( headerInfo , isDecoy , decoyTag ) : proteinId = headerInfo [ 'id' ] if isDecoy : proteinId = '' . join ( ( decoyTag , proteinId ) ) return proteinId
Generates a protein id from headerInfo . If isDecoy is True the decoyTag is added to beginning of the generated protein id .
56,323
def _nameFromHeaderInfo ( headerInfo , isDecoy , decoyTag ) : if 'name' in headerInfo : proteinName = headerInfo [ 'name' ] else : proteinName = headerInfo [ 'id' ] if isDecoy : proteinName = '' . join ( ( decoyTag , proteinName ) ) return proteinName
Generates a protein name from headerInfo . If isDecoy is True the decoyTag is added to beginning of the generated protein name .
56,324
def _addPeptide ( self , sequence , proteinId , digestInfo ) : stdSequence = self . getStdSequence ( sequence ) if stdSequence not in self . peptides : self . peptides [ stdSequence ] = PeptideEntry ( stdSequence , mc = digestInfo [ 'missedCleavage' ] ) if sequence not in self . peptides : self . peptides [ sequence ] = self . peptides [ stdSequence ] if proteinId not in self . peptides [ stdSequence ] . proteins : self . peptides [ stdSequence ] . proteins . add ( proteinId ) self . peptides [ stdSequence ] . proteinPositions [ proteinId ] = ( digestInfo [ 'startPos' ] , digestInfo [ 'endPos' ] ) self . proteins [ proteinId ] . peptides . add ( sequence )
Add a peptide to the protein database .
56,325
def configure ( self , options , conf ) : super ( LeakDetectorPlugin , self ) . configure ( options , conf ) if options . leak_detector_level : self . reporting_level = int ( options . leak_detector_level ) self . report_delta = options . leak_detector_report_delta self . patch_mock = options . leak_detector_patch_mock self . ignore_patterns = options . leak_detector_ignore_patterns self . save_traceback = options . leak_detector_save_traceback self . multiprocessing_enabled = bool ( getattr ( options , 'multiprocess_workers' , False ) )
Configure plugin .
56,326
def bind ( self , instance , auto = False ) : methods = [ ( m , cls . __dict__ [ m ] ) for cls in inspect . getmro ( type ( instance ) ) for m in cls . __dict__ if inspect . isfunction ( cls . __dict__ [ m ] ) ] try : deps_of_endpoints = [ ( method_ptr , self . entrypoint_deps ( method_ptr ) ) for ( method_name , method_ptr ) in methods ] for ( method_ptr , method_deps ) in deps_of_endpoints : if len ( method_deps ) > 0 : method_ptr ( instance , ** method_deps ) except KeyError : pass if auto and instance not in self . current_scope . get_auto_bind_list ( ) : self . current_scope . auto_bind ( instance ) return instance
Bind deps to instance
56,327
def as_dict ( self ) : odict = OrderedDict ( ) for name in self . _order : attr_value = getattr ( self , name ) if isinstance ( attr_value , List ) : _list = [ ] for item in attr_value : _list . append ( ( item . as_dict ( ) if isinstance ( item , Entity ) else item ) ) odict [ name ] = _list elif isinstance ( attr_value , Entity ) : odict [ name ] = attr_value . as_dict ( ) else : odict [ name ] = getattr ( self , name ) return odict
create a dict based on class attributes
56,328
def map ( cls , dict_entity ) : for key , value in dict_entity . items ( ) : if hasattr ( cls , key ) : if isinstance ( value , list ) : _list = getattr ( cls , key ) if isinstance ( _list . expected_type , list ) : for _dict in value : _list . append ( cls . map ( _list . typeof ( ) , _dict ) ) elif isinstance ( value , dict ) : attr = getattr ( cls , key ) instance = attr . expected_type ( ) Entity . map ( instance , value ) setattr ( cls , key , instance ) else : setattr ( cls , key , value ) else : setattr ( cls , key , value )
staticmethod which will be used in recursive mode in order to map dict to instance
56,329
def generateParams ( rawfilepath , outputpath , isolationWindow , coElute ) : output = str ( ) output = '\n' . join ( [ output , ' = ' . join ( [ 'datapath' , rawfilepath ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'logfilepath' , outputpath ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'outputpath' , outputpath ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'co-elute' , str ( coElute ) ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'input_format' , 'raw' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'isolation_width' , str ( isolationWindow ) ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'mars_threshold' , '-0.5' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'ipv_file' , '.\IPV.txt' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'trainingset' , 'EmptyPath' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'output_mars_y' , '0' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'delete_msn' , '0' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'output_mgf' , '1' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'output_pf' , '0' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'debug_mode' , '0' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'check_activationcenter' , '1' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'output_all_mars_y' , '0' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'rewrite_files' , '0' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'export_unchecked_mono' , '0' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'cut_similiar_mono' , '1' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'mars_model' , '4' ] ) ] ) output = '\n' . join ( [ output , ' = ' . join ( [ 'output_trainingdata' , '0' ] ) ] ) return output
Generates a string containing the parameters for a pParse parameter file but doesn t write any file yet .
56,330
def writeParams ( rawfilepath , outputpath , isolationWindow , coElute = 0 ) : paramText = generateParams ( rawfilepath , outputpath , isolationWindow , coElute ) filename , fileext = os . path . splitext ( os . path . basename ( rawfilepath ) ) paramPath = aux . joinpath ( outputpath , filename + '.pparse.para' ) with open ( paramPath , 'wb' ) as openfile : openfile . write ( paramText ) return paramPath
Generate and write a pParse parameter file .
56,331
def execute ( paramPath , executable = 'pParse.exe' ) : procArgs = [ executable , paramPath ] proc = subprocess . Popen ( procArgs , stderr = subprocess . PIPE ) while True : out = proc . stderr . read ( 1 ) if out == '' and proc . poll ( ) != None : break if out != '' : sys . stdout . write ( out ) sys . stdout . flush ( ) return proc . returncode
Execute pParse with the specified parameter file .
56,332
def cleanUpPparse ( outputpath , rawfilename , mgf = False ) : extensions = [ 'csv' , 'ms1' , 'ms2' , 'xtract' ] filename , fileext = os . path . splitext ( os . path . basename ( rawfilename ) ) additionalFiles = [ aux . joinpath ( outputpath , 'pParsePlusLog.txt' ) , aux . joinpath ( outputpath , filename + '.pparse.para' ) , ] for ext in extensions : filepath = aux . joinpath ( outputpath , '.' . join ( [ filename , ext ] ) ) if os . path . isfile ( filepath ) : print ( 'Removing file: ' , filepath ) os . remove ( filepath ) for filepath in additionalFiles : if os . path . isfile ( filepath ) : print ( 'Removing file: ' , filepath ) os . remove ( filepath ) if mgf : for _filename in os . listdir ( outputpath ) : _basename , _fileext = os . path . splitext ( _filename ) if _fileext . lower ( ) != '.mgf' : continue if _basename . find ( basename ) != - 1 and _basename != basename : filepath = aux . joinpath ( outputpath , _filename ) print ( 'Removing file: ' , filepath ) os . remove ( filepath )
Delete temporary files generated by pparse including the filetypes . csv . ms1 . ms2 . xtract the files pParsePlusLog . txt and pParse . para and optionally also the . mgf file generated by pParse .
56,333
def create_handler ( cls , message_handler , buffer_size , logger ) : cls . BUFFER_SIZE = buffer_size cls . message_handler = message_handler cls . logger = logger cls . message_handler . logger = logging . getLogger ( message_handler . __class__ . __name__ ) cls . message_handler . logger . setLevel ( logger . level ) return cls
Class variables used here since the framework creates an instance for each connection
56,334
def handle ( self ) : logger = StreamHandler . logger logger . debug ( "handling requests with message handler %s " % StreamHandler . message_handler . __class__ . __name__ ) message_handler = StreamHandler . message_handler try : while True : logger . debug ( 'waiting for more data' ) if not message_handler . handle ( self . request , StreamHandler . BUFFER_SIZE ) : break logger . warning ( "connection closed from %s" % ( self . client_address [ 0 ] ) ) self . request . close ( ) except : logger . exception ( "connection closed from %s" % ( self . client_address [ 0 ] ) ) finally : self . request . close ( )
The required handle method .
56,335
def receiveError ( self , reasonCode , description ) : error = disconnectErrors . get ( reasonCode , DisconnectError ) self . connectionClosed ( error ( reasonCode , description ) ) SSHClientTransport . receiveError ( self , reasonCode , description )
Called when we receive a disconnect error message from the other side .
56,336
def handle ( self , event ) : callback = getattr ( self , 'on_{event}' . format ( event = event . event ) , None ) callback ( event )
Entry point to handle user events .
56,337
def _emplace_pmrna ( mrnas , parent , strict = False ) : mrnas . sort ( key = lambda m : ( m . cdslen , m . get_attribute ( 'ID' ) ) ) pmrna = mrnas . pop ( ) if strict : parent . children = [ pmrna ] else : parent . children = [ c for c in parent . children if c not in mrnas ]
Retrieve the primary mRNA and discard all others .
56,338
def _emplace_transcript ( transcripts , parent ) : transcripts . sort ( key = lambda t : ( len ( t ) , t . get_attribute ( 'ID' ) ) ) pt = transcripts . pop ( ) parent . children = [ pt ]
Retrieve the primary transcript and discard all others .
56,339
def primary_mrna ( entrystream , parenttype = 'gene' ) : for entry in entrystream : if not isinstance ( entry , tag . Feature ) : yield entry continue for parent in tag . select . features ( entry , parenttype , traverse = True ) : mrnas = [ f for f in parent . children if f . type == 'mRNA' ] if len ( mrnas ) == 0 : continue _emplace_pmrna ( mrnas , parent ) yield entry
Select a single mRNA as a representative for each protein - coding gene .
56,340
def _get_primary_type ( ttypes , parent , logstream = stderr ) : if len ( ttypes ) > 1 : if logstream : message = '[tag::transcript::primary_transcript]' message += ' WARNING: feature {:s}' . format ( parent . slug ) message += ' has multiple associated transcript types' message += ' {}' . format ( ttypes ) print ( message , file = logstream ) if 'mRNA' not in ttypes : message = ( 'cannot resolve multiple transcript types if "mRNA" is' ' not one of those types {}' . format ( ttypes ) ) raise Exception ( message ) ttypes = [ 'mRNA' ] return ttypes [ 0 ]
Check for multiple transcript types and if possible select one .
56,341
def primary_transcript ( entrystream , parenttype = 'gene' , logstream = stderr ) : for entry in entrystream : if not isinstance ( entry , tag . Feature ) : yield entry continue for parent in tag . select . features ( entry , parenttype , traverse = True ) : if parent . num_children == 0 : continue transcripts = defaultdict ( list ) for child in parent . children : if child . type in type_terms : transcripts [ child . type ] . append ( child ) if len ( transcripts ) == 0 : continue ttypes = list ( transcripts . keys ( ) ) ttype = _get_primary_type ( ttypes , parent ) transcript_list = transcripts [ ttype ] if ttype == 'mRNA' : _emplace_pmrna ( transcript_list , parent , strict = True ) else : _emplace_transcript ( transcript_list , parent ) yield entry
Select a single transcript as a representative for each gene .
56,342
def parse_parent ( docname ) : lineage = docname . split ( '/' ) lineage_count = len ( lineage ) if docname == 'index' : parent = None elif lineage_count == 1 : parent = 'index' elif lineage_count == 2 and lineage [ - 1 ] == 'index' : parent = 'index' elif lineage_count == 2 : parent = lineage [ 0 ] + '/index' elif lineage [ - 1 ] == 'index' : parent = '/' . join ( lineage [ : - 2 ] ) + '/index' else : parent = '/' . join ( lineage [ : - 1 ] ) + '/index' return parent
Given a docname path pick apart and return name of parent
56,343
def parents ( self , resources ) : if self . docname == 'index' : return [ ] parents = [ ] parent = resources . get ( self . parent ) while parent is not None : parents . append ( parent ) parent = resources . get ( parent . parent ) return parents
Split the path in name and get parents
56,344
def acquire ( self , resources , prop_name ) : custom_prop = getattr ( self . props , prop_name , None ) if custom_prop : return custom_prop for parent in self . parents ( resources ) : acquireds = parent . props . acquireds if acquireds : rtype_acquireds = acquireds . get ( self . rtype ) if rtype_acquireds : prop_acquired = rtype_acquireds . get ( prop_name ) if prop_acquired : return prop_acquired all_acquireds = acquireds . get ( 'all' ) if all_acquireds : prop_acquired = all_acquireds . get ( prop_name ) if prop_acquired : return prop_acquired return
Starting with self walk until you find prop or None
56,345
def find_prop_item ( self , prop_name , prop_key , prop_value ) : prop = getattr ( self . props , prop_name , None ) if prop : return next ( ( p for p in prop if getattr ( p , prop_key ) == prop_value ) , None ) return None
Look for a list prop with an item where key == value
56,346
def detect ( self , code ) : keywords = KeywordFetcher . fetch ( code ) probabilities = { } for keyword in keywords : if keyword not in self . trained_set [ 'keywords' ] : continue data = self . trained_set [ 'keywords' ] [ keyword ] p_avg = sum ( data . values ( ) ) / len ( data ) for language , probability in data . items ( ) : p = probability / p_avg probabilities [ language ] = probabilities . get ( language , 0 ) + math . log ( 1 + p ) for pattern , data in self . trained_set [ 'patterns' ] . items ( ) : matcher = PatternMatcher ( pattern ) p0 = matcher . getratio ( code ) for language , p_avg in data . items ( ) : if language not in probabilities : continue p = 1 - abs ( p_avg - p0 ) probabilities [ language ] *= p sum_val = 0 for language , p in probabilities . items ( ) : sum_val += math . pow ( math . e / 2 , p ) for language , p in probabilities . items ( ) : probabilities [ language ] = math . pow ( math . e / 2 , p ) / sum_val * 100 return sorted ( probabilities . items ( ) , key = lambda a : a [ 1 ] , reverse = True )
Detect language with code
56,347
def fetch ( code ) : ret = { } code = KeywordFetcher . _remove_strings ( code ) result = KeywordFetcher . prog . findall ( code ) for keyword in result : if len ( keyword ) <= 1 : continue if keyword . isdigit ( ) : continue if keyword [ 0 ] == '-' or keyword [ 0 ] == '*' : keyword = keyword [ 1 : ] if keyword [ - 1 ] == '-' or keyword [ - 1 ] == '*' : keyword = keyword [ 0 : - 1 ] if len ( keyword ) <= 1 : continue ret [ keyword ] = ret . get ( keyword , 0 ) + 1 return ret
Fetch keywords by Code
56,348
def _remove_strings ( code ) : removed_string = "" is_string_now = None for i in range ( 0 , len ( code ) - 1 ) : append_this_turn = False if code [ i ] == "'" and ( i == 0 or code [ i - 1 ] != '\\' ) : if is_string_now == "'" : is_string_now = None elif is_string_now == None : is_string_now = "'" append_this_turn = True elif code [ i ] == '"' and ( i == 0 or code [ i - 1 ] != '\\' ) : if is_string_now == '"' : is_string_now = None elif is_string_now == None : is_string_now = '"' append_this_turn = True if is_string_now == None or append_this_turn == True : removed_string += code [ i ] return removed_string
Remove strings in code
56,349
def getratio ( self , code ) : if len ( code ) == 0 : return 0 code_replaced = self . prog . sub ( '' , code ) return ( len ( code ) - len ( code_replaced ) ) / len ( code )
Get ratio of code and pattern matched
56,350
def loadXmlProperty ( self , xprop ) : if xprop . tag == 'property' : value = self . dataInterface ( ) . fromXml ( xprop [ 0 ] ) self . _xmlData [ xprop . get ( 'name' , '' ) ] = value
Loads an XML property that is a child of the root data being loaded .
56,351
def toXml ( self , xparent = None ) : if xparent is None : xml = ElementTree . Element ( 'object' ) else : xml = ElementTree . SubElement ( xparent , 'object' ) xml . set ( 'class' , self . __class__ . __name__ ) for name , value in self . _xmlData . items ( ) : xprop = ElementTree . SubElement ( xml , 'property' ) xprop . set ( 'name' , name ) XmlDataIO . toXml ( value , xprop ) return xml
Converts this object to XML .
56,352
def fromXml ( cls , xml ) : clsname = xml . get ( 'class' ) if clsname : subcls = XmlObject . byName ( clsname ) if subcls is None : inst = MissingXmlObject ( clsname ) else : inst = subcls ( ) else : inst = cls ( ) inst . loadXml ( xml ) return inst
Restores an object from XML .
56,353
def template_substitute ( text , ** kwargs ) : for name , value in kwargs . items ( ) : placeholder_pattern = "{%s}" % name if placeholder_pattern in text : text = text . replace ( placeholder_pattern , value ) return text
Replace placeholders in text by using the data mapping . Other placeholders that is not represented by data is left untouched .
56,354
def _wva ( values , weights ) : assert len ( values ) == len ( weights ) and len ( weights ) > 0 return sum ( [ mul ( * x ) for x in zip ( values , weights ) ] ) / sum ( weights )
Calculates a weighted average
56,355
def execute_one_to_many_job ( parent_class = None , get_unfinished_kwargs = None , get_unfinished_limit = None , parser_func = None , parser_func_kwargs = None , build_url_func_kwargs = None , downloader_func = None , downloader_func_kwargs = None , post_process_response_func = None , post_process_response_func_kwargs = None , process_item_func_kwargs = None , logger = None , sleep_time = None ) : get_unfinished_kwargs = prepare_kwargs ( get_unfinished_kwargs ) parser_func_kwargs = prepare_kwargs ( parser_func_kwargs ) build_url_func_kwargs = prepare_kwargs ( build_url_func_kwargs ) downloader_func_kwargs = prepare_kwargs ( downloader_func_kwargs ) post_process_response_func_kwargs = prepare_kwargs ( post_process_response_func_kwargs ) process_item_func_kwargs = prepare_kwargs ( process_item_func_kwargs ) if post_process_response_func is None : def post_process_response_func ( response , ** kwargs ) : pass if not isinstance ( logger , SpiderLogger ) : raise TypeError if sleep_time is None : sleep_time = 0 query_set = parent_class . get_all_unfinished ( ** get_unfinished_kwargs ) if get_unfinished_limit is not None : query_set = query_set . limit ( get_unfinished_limit ) todo = list ( query_set ) logger . log_todo_volumn ( todo ) for parent_instance in todo : url = parent_instance . build_url ( ** build_url_func_kwargs ) logger . log_to_crawl_url ( url ) logger . log_sleeper ( sleep_time ) time . sleep ( sleep_time ) try : response_or_html = downloader_func ( url , ** downloader_func_kwargs ) if isinstance ( response_or_html , string_types ) : parser_func_kwargs [ "html" ] = response_or_html else : parser_func_kwargs [ "response" ] = response_or_html post_process_response_func ( response_or_html , ** post_process_response_func_kwargs ) except Exception as e : logger . log_error ( e ) continue try : parse_result = parser_func ( parent = parent_instance , ** parser_func_kwargs ) parse_result . process_item ( ** process_item_func_kwargs ) logger . log_status ( parse_result ) except Exception as e : logger . log_error ( e ) continue
A standard one - to - many crawling workflow .
56,356
def create_db_schema ( cls , cur , schema_name ) : create_schema_script = "CREATE SCHEMA {0} ;\n" . format ( schema_name ) cur . execute ( create_schema_script )
Create Postgres schema script and execute it on cursor
56,357
def revoke_all ( cls , cur , schema_name , roles ) : cur . execute ( 'REVOKE ALL ON SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL TABLES IN SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL SEQUENCES IN SCHEMA {0} FROM {1};' 'REVOKE ALL ON ALL FUNCTIONS IN SCHEMA {0} FROM {1};' . format ( schema_name , roles ) )
Revoke all privileges from schema tables sequences and functions for a specific role
56,358
def schema_exists ( cls , cur , schema_name ) : cur . execute ( "SELECT EXISTS (SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{0}');" . format ( schema_name ) ) return cur . fetchone ( ) [ 0 ]
Check if schema exists
56,359
def pandas ( self ) : if self . _pandas is None : self . _pandas = pd . DataFrame ( ) . from_records ( self . list_of_dicts ) return self . _pandas
Return a Pandas dataframe .
56,360
def translate ( self , dialect ) : new_resultset = copy ( self ) new_resultset . dialect = dialect for result in new_resultset : for dimensionvalue in result . dimensionvalues : dimensionvalue . value = dimensionvalue . translate ( dialect ) return new_resultset
Return a copy of this ResultSet in a different dialect .
56,361
def append ( self , val ) : val . resultset = self val . dataset = self . dataset if val . dataset : dataset_dimensions = self . dataset . dimensions for k , v in val . raw_dimensions . items ( ) : if k not in dataset_dimensions : d = Dimension ( k ) else : d = dataset_dimensions [ k ] normalized_value = unicode ( v ) if d . dialect and d . datatype : if d . dialect in d . datatype . dialects : for av in d . allowed_values : if unicode ( v ) in av . dialects . get ( d . dialect , [ ] ) : normalized_value = av . value break if isinstance ( v , DimensionValue ) : dim = v v . value = normalized_value else : if k in dataset_dimensions : dim = DimensionValue ( normalized_value , d ) else : dim = DimensionValue ( normalized_value , Dimension ( ) ) val . dimensionvalues . append ( dim ) self . dimensionvalues = val . dimensionvalues super ( ResultSet , self ) . append ( val )
Connect any new results to the resultset .
56,362
def allowed_values ( self ) : if self . _allowed_values is None : self . _allowed_values = ValueList ( ) for val in self . scraper . _fetch_allowed_values ( self ) : if isinstance ( val , DimensionValue ) : self . _allowed_values . append ( val ) else : self . _allowed_values . append ( DimensionValue ( val , Dimension ( ) ) ) return self . _allowed_values
Return a list of allowed values .
56,363
def append ( self , val ) : val . scraper = self . scraper val . _collection_path = copy ( self . collection . _collection_path ) val . _collection_path . append ( val ) super ( ItemList , self ) . append ( val )
Connect any new items to the scraper .
56,364
def _move_here ( self ) : cu = self . scraper . current_item if self is cu : return if cu . items and self in cu . items : self . scraper . move_to ( self ) return if self is cu . parent : self . scraper . move_up ( ) if self . parent and self in self . parent . items : self . scraper . move_up ( ) self . scraper . move_to ( self ) return self . scraper . move_to_top ( ) for step in self . path : self . scraper . move_to ( step )
Move the cursor to this item .
56,365
def items ( self ) : if self . scraper . current_item is not self : self . _move_here ( ) if self . _items is None : self . _items = ItemList ( ) self . _items . scraper = self . scraper self . _items . collection = self for i in self . scraper . _fetch_itemslist ( self ) : i . parent = self if i . type == TYPE_DATASET and i . dialect is None : i . dialect = self . scraper . dialect self . _items . append ( i ) return self . _items
ItemList of children .
56,366
def _hash ( self ) : dump = dumps ( self . query , sort_keys = True ) if isinstance ( dump , str ) : dump = dump . encode ( 'utf-8' ) return md5 ( dump ) . hexdigest ( )
Return a hash for the current query .
56,367
def dimensions ( self ) : if self . scraper . current_item is not self : self . _move_here ( ) if self . _dimensions is None : self . _dimensions = DimensionList ( ) for d in self . scraper . _fetch_dimensions ( self ) : d . dataset = self d . scraper = self . scraper self . _dimensions . append ( d ) return self . _dimensions
Available dimensions if defined .
56,368
def on ( cls , hook ) : def decorator ( function_ ) : cls . _hooks [ hook ] . append ( function_ ) return function_ return decorator
Hook decorator .
56,369
def move_to_top ( self ) : self . current_item = self . root for f in self . _hooks [ "top" ] : f ( self ) return self
Move to root item .
56,370
def move_up ( self ) : if self . current_item . parent is not None : self . current_item = self . current_item . parent for f in self . _hooks [ "up" ] : f ( self ) if self . current_item is self . root : for f in self . _hooks [ "top" ] : f ( self ) return self
Move up one level in the hierarchy unless already on top .
56,371
def descendants ( self ) : for i in self . current_item . items : self . move_to ( i ) if i . type == TYPE_COLLECTION : for c in self . children : yield c else : yield i self . move_up ( )
Recursively return every dataset below current item .
56,372
def children ( self ) : from warnings import warn warn ( "Deprecated. Use Scraper.descendants." , DeprecationWarning ) for descendant in self . descendants : yield descendant
Former misleading name for descendants .
56,373
def make_python_name ( s , default = None , number_prefix = 'N' , encoding = "utf-8" ) : if s in ( '' , None ) : s = default s = str ( s ) s = re . sub ( "[^a-zA-Z0-9_]" , "_" , s ) if not re . match ( '\d' , s ) is None : s = number_prefix + s return unicode ( s , encoding )
Returns a unicode string that can be used as a legal python identifier .
56,374
def data_type ( self , data_type ) : allowed_values = [ "string" , "number" , "date" , "color" ] if data_type is not None and data_type not in allowed_values : raise ValueError ( "Invalid value for `data_type` ({0}), must be one of {1}" . format ( data_type , allowed_values ) ) self . _data_type = data_type
Sets the data_type of this Option .
56,375
def get_callable_signature_as_string ( the_callable ) : args , varargs , varkw , defaults = inspect . getargspec ( the_callable ) tmp_args = list ( args ) args_dict = { } if defaults : defaults = list ( defaults ) else : defaults = [ ] while defaults : args_dict [ tmp_args . pop ( ) ] = defaults . pop ( ) while tmp_args : args_dict [ tmp_args . pop ( ) ] = None args_list = [ ] for arg in args : if args_dict [ arg ] is not None : args_list . append ( "%s=%s" % ( arg , repr ( args_dict [ arg ] ) ) ) else : args_list . append ( arg ) if varargs : args_list . append ( "*%s" % varargs ) if varkw : args_list . append ( "**%s" % varkw ) args_string = ', ' . join ( args_list ) return "def %s(%s)" % ( the_callable . __name__ , args_string )
Return a string representing a callable .
56,376
def get_callable_documentation ( the_callable ) : return wrap_text_in_a_box ( title = get_callable_signature_as_string ( the_callable ) , body = ( getattr ( the_callable , '__doc__' ) or 'No documentation' ) . replace ( '\n' , '\n\n' ) , style = 'ascii_double' )
Return a string with the callable signature and its docstring .
56,377
def register_extension_class ( ext , base , * args , ** kwargs ) : ext_instance = ext . plugin ( base , * args , ** kwargs ) setattr ( base , ext . name . lstrip ( '_' ) , ext_instance )
Instantiate the given extension class and register as a public attribute of the given base .
56,378
def register_extension_method ( ext , base , * args , ** kwargs ) : bound_method = create_bound_method ( ext . plugin , base ) setattr ( base , ext . name . lstrip ( '_' ) , bound_method )
Register the given extension method as a public attribute of the given base .
56,379
def token_auto_auth ( func ) : @ functools . wraps ( func ) def wrapper ( self , * args , ** kwargs ) : try : response = func ( self , * args , ** kwargs ) except errors . AuthFailure : self . _client . auth . _refresh ( ) response = func ( self , * args , ** kwargs ) return response return wrapper
Wrap class methods with automatic token re - authentication .
56,380
def get_theme_dir ( ) : return os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , "theme" ) )
Returns path to directory containing this package s theme . This is designed to be used when setting the html_theme_path option within Sphinx s conf . py file .
56,381
def seconds2str ( seconds ) : if seconds < 0 : return "{0:.3g}s" . format ( seconds ) elif math . isnan ( seconds ) : return "NaN" elif math . isinf ( seconds ) : return "Inf" m , s = divmod ( seconds , 60 ) h , m = divmod ( m , 60 ) if h >= 1 : return "{0:g}h {1:02g}m {2:.3g}s" . format ( h , m , s ) elif m >= 1 : return "{0:02g}m {1:.3g}s" . format ( m , s ) else : return "{0:.3g}s" . format ( s )
Returns string such as 1h 05m 55s .
56,382
def eval_fieldnames ( string_ , varname = "fieldnames" ) : ff = eval ( string_ ) if not isinstance ( ff , list ) : raise RuntimeError ( "{0!s} must be a list" . format ( varname ) ) if not all ( [ isinstance ( x , str ) for x in ff ] ) : raise RuntimeError ( "{0!s} must be a list of strings" . format ( varname ) ) ff = [ x . upper ( ) for x in ff ] return ff
Evaluates string_ must evaluate to list of strings . Also converts field names to uppercase
56,383
def strip_accents ( s ) : nfkd = unicodedata . normalize ( 'NFKD' , unicode ( s ) ) return u'' . join ( ch for ch in nfkd if not unicodedata . combining ( ch ) )
Strip accents to prepare for slugification .
56,384
def slugify ( s ) : s = strip_accents ( s . replace ( "'" , '' ) . lower ( ) ) return re . sub ( '[^a-z0-9]+' , ' ' , s ) . strip ( ) . replace ( ' ' , '-' )
Converts the given string to a URL slug .
56,385
def _legacy_status ( stat ) : if stat [ : 2 ] == '30' or stat [ : 2 ] == '47' : ooo = stat [ 4 : 5 ] if ooo == '0' : return 0 if ooo == '8' : return 100 if stat == '7e' : return 0 if stat == '7f' : return 100 if len ( stat ) == 6 : try : val = int ( stat [ 4 : ] , 16 ) except ValueError : val = 0 hwt = stat [ : 2 ] if hwt == '01' : return round ( ( ( 125 - val ) / 125 ) * 100 ) if hwt == '02' : return 100 if val == 127 else 0 if hwt == '28' : if stat [ 2 : 4 ] == '01' : if stat [ 4 : ] == '78' : return 0 return round ( ( ( 120 - val ) / 120 ) * 100 ) if stat . upper ( ) . find ( 'ON' ) >= 0 : return 100 if ( not stat ) or stat . upper ( ) . find ( 'OFF' ) >= 0 : return 0 if stat . endswith ( '%' ) : if stat [ : - 1 ] . isdigit : return int ( stat [ : - 1 ] ) _LOGGER . debug ( "val='%s' used a -1 fallback in legacy_status" , stat ) return - 1
Legacy status method from the qsmobile . js library .
56,386
def decode_door ( packet , channel = 1 ) : val = str ( packet . get ( QSDATA , '' ) ) if len ( val ) == 6 and val . startswith ( '46' ) and channel == 1 : return val [ - 1 ] == '0' return None
Decode a door sensor .
56,387
def decode_imod ( packet , channel = 1 ) : val = str ( packet . get ( QSDATA , '' ) ) if len ( val ) == 8 and val . startswith ( '4e' ) : try : _map = ( ( 5 , 1 ) , ( 5 , 2 ) , ( 5 , 4 ) , ( 4 , 1 ) , ( 5 , 1 ) , ( 5 , 2 ) ) [ channel - 1 ] return ( int ( val [ _map [ 0 ] ] , 16 ) & _map [ 1 ] ) == 0 except IndexError : return None return None
Decode an 4 channel imod . May support 6 channels .
56,388
def decode_pir ( packet , channel = 1 ) : val = str ( packet . get ( QSDATA , '' ) ) if len ( val ) == 8 and val . startswith ( '0f' ) and channel == 1 : return int ( val [ - 4 : ] , 16 ) > 0 return None
Decode a PIR .
56,389
def decode_temperature ( packet , channel = 1 ) : val = str ( packet . get ( QSDATA , '' ) ) if len ( val ) == 12 and val . startswith ( '34' ) and channel == 1 : temperature = int ( val [ - 4 : ] , 16 ) return round ( float ( ( - 46.85 + ( 175.72 * ( temperature / pow ( 2 , 16 ) ) ) ) ) ) return None
Decode the temperature .
56,390
def decode_humidity ( packet , channel = 1 ) : val = str ( packet . get ( QSDATA , '' ) ) if len ( val ) == 12 and val . startswith ( '34' ) and channel == 1 : humidity = int ( val [ 4 : - 4 ] , 16 ) return round ( float ( - 6 + ( 125 * ( humidity / pow ( 2 , 16 ) ) ) ) ) return None
Decode the humidity .
56,391
def update_devices ( self , devices ) : for qspacket in devices : try : qsid = qspacket [ QS_ID ] except KeyError : _LOGGER . debug ( "Device without ID: %s" , qspacket ) continue if qsid not in self : self [ qsid ] = QSDev ( data = qspacket ) dev = self [ qsid ] dev . data = qspacket newqs = _legacy_status ( qspacket [ QS_VALUE ] ) if dev . is_dimmer : newqs = min ( round ( math . pow ( newqs , self . dim_adj ) ) , 100 ) newin = round ( newqs * _MAX / 100 ) if abs ( dev . value - newin ) > 1 : _LOGGER . debug ( "%s qs=%s , qsid , newqs , newin ) dev . value = newin self . _cb_value_changed ( self , qsid , newin )
Update values from response of URL_DEVICES callback if changed .
56,392
def geist_replay ( wrapped , instance , args , kwargs ) : path_parts = [ ] file_parts = [ ] if hasattr ( wrapped , '__module__' ) : module = wrapped . __module__ module_file = sys . modules [ module ] . __file__ root , _file = os . path . split ( module_file ) path_parts . append ( root ) _file , _ = os . path . splitext ( _file ) file_parts . append ( _file ) if hasattr ( wrapped , '__objclass__' ) : file_parts . append ( wrapped . __objclass__ . __name__ ) elif hasattr ( wrapped , '__self__' ) : file_parts . append ( wrapped . __self__ . __class__ . __name__ ) file_parts . append ( wrapped . __name__ + '.log' ) path_parts . append ( '_' . join ( file_parts ) ) filename = os . path . join ( * path_parts ) if is_in_record_mode ( ) : platform_backend = get_platform_backend ( ) backend = RecordingBackend ( source_backend = platform_backend , recording_filename = filename ) else : backend = PlaybackBackend ( recording_filename = filename ) gui = GUI ( backend ) return wrapped ( gui , * args , ** kwargs )
Wraps a test of other function and injects a Geist GUI which will enable replay ( set environment variable GEIST_REPLAY_MODE to record to active record mode .
56,393
def _nbinom_ztrunc_p ( mu , k_agg ) : p_eq = lambda p , mu , k_agg : ( k_agg * p ) / ( 1 - ( 1 + p ) ** - k_agg ) - mu p = optim . brentq ( p_eq , 1e-10 , 1e10 , args = ( mu , k_agg ) ) return p
Calculates p parameter for truncated negative binomial
56,394
def _ln_choose ( n , k_agg ) : gammaln = special . gammaln return gammaln ( n + 1 ) - ( gammaln ( k_agg + 1 ) + gammaln ( n - k_agg + 1 ) )
log binomial coefficient with extended gamma factorials . n and k_agg may be int or array - if both array must be the same length .
56,395
def _solve_k_from_mu ( data , k_array , nll , * args ) : nll_array = np . zeros ( len ( k_array ) ) for i in range ( len ( k_array ) ) : nll_array [ i ] = nll ( data , k_array [ i ] , * args ) min_nll_idx = np . argmin ( nll_array ) return k_array [ min_nll_idx ]
For given args return k_agg from searching some k_range .
56,396
def _expon_solve_lam_from_mu ( mu , b ) : def lam_eq ( lam , mu , b ) : lam , mu , b = Decimal ( lam ) , Decimal ( mu ) , Decimal ( b ) return ( ( 1 - ( lam * b + 1 ) * np . exp ( - lam * b ) ) / ( lam - lam * np . exp ( - lam * b ) + Decimal ( 1e-32 ) ) - mu ) return optim . brentq ( lam_eq , - 100 , 100 , args = ( mu , b ) , disp = True )
For the expon_uptrunc given mu and b return lam . Similar to geom_uptrunc
56,397
def _make_rank ( dist_obj , n , mu , sigma , crit = 0.5 , upper = 10000 , xtol = 1 ) : qs = ( np . arange ( 1 , n + 1 ) - 0.5 ) / n rank = np . empty ( len ( qs ) ) brute_ppf = lambda val , prob : prob - dist_obj . cdf ( val , mu , sigma ) qs_less = qs <= crit ind = np . sum ( qs_less ) rank [ qs_less ] = dist_obj . ppf ( qs [ qs_less ] , mu , sigma ) for i , tq in enumerate ( qs [ ~ qs_less ] ) : j = ind + i try : rank [ j ] = np . abs ( np . ceil ( optim . brentq ( brute_ppf , - 1 , upper , args = ( tq , ) , xtol = xtol ) ) ) except ValueError : rank [ j : ] = np . repeat ( rank [ j - 1 ] , len ( rank [ j : ] ) ) break return rank
Make rank distribution using both ppf and brute force .
56,398
def _mean_var ( vals , pmf ) : mean = np . sum ( vals * pmf ) var = np . sum ( vals ** 2 * pmf ) - mean ** 2 return mean , var
Calculates the mean and variance from vals and pmf
56,399
def _pdf_w_mean ( self , x , mean , sigma ) : mu , sigma = self . translate_args ( mean , sigma ) return self . logpdf ( x , mu , sigma )
Calculates the pdf of a lognormal distribution with parameters mean and sigma