idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
7,400
def iter_filtered_dir_entry ( dir_entries , match_patterns , on_skip ) : def match ( dir_entry_path , match_patterns , on_skip ) : for match_pattern in match_patterns : if dir_entry_path . path_instance . match ( match_pattern ) : on_skip ( dir_entry_path , match_pattern ) return True return False for entry in dir_entries : try : dir_entry_path = DirEntryPath ( entry ) except FileNotFoundError as err : log . error ( "Can't make DirEntryPath() instance: %s" % err ) continue if match ( dir_entry_path , match_patterns , on_skip ) : yield None else : yield dir_entry_path
Filter a list of DirEntryPath instances with the given pattern
7,401
def parse_pagination ( headers ) : links = { link . rel : parse_qs ( link . href ) . get ( "page" , None ) for link in link_header . parse ( headers . get ( "Link" , "" ) ) . links } return _Navigation ( links . get ( "previous" , [ None ] ) [ 0 ] , links . get ( "next" , [ None ] ) [ 0 ] , links . get ( "last" , [ None ] ) [ 0 ] , links . get ( "current" , [ None ] ) [ 0 ] , links . get ( "first" , [ None ] ) [ 0 ] )
Parses headers to create a pagination objects
7,402
def parse_uri ( uri , endpoint_uri ) : temp_parse = urlparse ( uri ) return _Route ( urljoin ( endpoint_uri , temp_parse . path ) , parse_qs ( temp_parse . query ) )
Parse a URI into a Route namedtuple
7,403
def _cryptodome_cipher ( key , iv ) : return AES . new ( key , AES . MODE_CFB , iv , segment_size = 128 )
Build a Pycryptodome AES Cipher object .
7,404
def _cryptography_cipher ( key , iv ) : return Cipher ( algorithm = algorithms . AES ( key ) , mode = modes . CFB ( iv ) , backend = default_backend ( ) )
Build a cryptography AES Cipher object .
7,405
def make_xml_node ( graph , name , close = False , attributes = None , text = "" , complete = False , innerXML = "" ) : name = graph . namespace_manager . qname ( name ) if complete : if attributes is not None : return "<{0} {1}>{2}{3}</{0}>" . format ( name , " " . join ( [ "{}=\"{}\"" . format ( attr_name , attr_value ) for attr_name , attr_value in attributes . items ( ) ] ) , escape ( text ) , innerXML ) return "<{0}>{1}{2}</{0}>" . format ( name , escape ( text ) , innerXML ) elif close is True : return "</{}>" . format ( name ) elif attributes is not None : return "<{} {}>" . format ( name , " " . join ( [ "{}=\"{}\"" . format ( attr_name , attr_value ) for attr_name , attr_value in attributes . items ( ) ] ) ) return "<{}>" . format ( name )
Create an XML Node
7,406
def performXpath ( parent , xpath ) : loop = False if xpath . startswith ( ".//" ) : result = parent . xpath ( xpath . replace ( ".//" , "./" , 1 ) , namespaces = XPATH_NAMESPACES ) if len ( result ) == 0 : result = parent . xpath ( "*[{}]" . format ( xpath ) , namespaces = XPATH_NAMESPACES ) loop = True else : result = parent . xpath ( xpath , namespaces = XPATH_NAMESPACES ) return result [ 0 ] , loop
Perform an XPath on an element and indicate if we need to loop over it to find something
7,407
def copyNode ( node , children = False , parent = False ) : if parent is not False : element = SubElement ( parent , node . tag , attrib = node . attrib , nsmap = { None : "http://www.tei-c.org/ns/1.0" } ) else : element = Element ( node . tag , attrib = node . attrib , nsmap = { None : "http://www.tei-c.org/ns/1.0" } ) if children : if node . text : element . _setText ( node . text ) for child in xmliter ( node ) : element . append ( copy ( child ) ) return element
Copy an XML Node
7,408
def normalizeXpath ( xpath ) : new_xpath = [ ] for x in range ( 0 , len ( xpath ) ) : if x > 0 and len ( xpath [ x - 1 ] ) == 0 : new_xpath . append ( "/" + xpath [ x ] ) elif len ( xpath [ x ] ) > 0 : new_xpath . append ( xpath [ x ] ) return new_xpath
Normalize XPATH split around slashes
7,409
def passageLoop ( parent , new_tree , xpath1 , xpath2 = None , preceding_siblings = False , following_siblings = False ) : current_1 , queue_1 = __formatXpath__ ( xpath1 ) if xpath2 is None : result_1 , loop = performXpath ( parent , current_1 ) if loop is True : queue_1 = xpath1 central = None has_no_queue = len ( queue_1 ) == 0 if preceding_siblings or following_siblings : for sibling in xmliter ( parent ) : if sibling == result_1 : central = True child = copyNode ( result_1 , children = has_no_queue , parent = new_tree ) if not has_no_queue : passageLoop ( result_1 , child , queue_1 , None , preceding_siblings = preceding_siblings , following_siblings = following_siblings ) if preceding_siblings : break elif not central and preceding_siblings : copyNode ( sibling , parent = new_tree , children = True ) elif central and following_siblings : copyNode ( sibling , parent = new_tree , children = True ) else : result_1 , loop = performXpath ( parent , current_1 ) if loop is True : queue_1 = xpath1 if xpath2 == xpath1 : current_2 , queue_2 = current_1 , queue_1 else : current_2 , queue_2 = __formatXpath__ ( xpath2 ) else : current_2 , queue_2 = __formatXpath__ ( xpath2 ) if xpath1 != xpath2 : result_2 , loop = performXpath ( parent , current_2 ) if loop is True : queue_2 = xpath2 else : result_2 = result_1 if result_1 == result_2 : has_no_queue = len ( queue_1 ) == 0 child = copyNode ( result_1 , children = has_no_queue , parent = new_tree ) if not has_no_queue : passageLoop ( result_1 , child , queue_1 , queue_2 ) else : start = False for sibling in xmliter ( parent ) : if start : if sibling == result_2 : break else : copyNode ( sibling , parent = new_tree , children = True ) elif sibling == result_1 : start = True has_no_queue_1 = len ( queue_1 ) == 0 node = copyNode ( sibling , children = has_no_queue_1 , parent = new_tree ) if not has_no_queue_1 : passageLoop ( sibling , node , queue_1 , None , following_siblings = True ) continue_loop = len ( queue_2 ) == 0 node = copyNode ( result_2 , children = continue_loop , parent = new_tree ) if not continue_loop : passageLoop ( result_2 , node , queue_2 , None , preceding_siblings = True ) return new_tree
Loop over passages to construct and increment new tree given a parent and XPaths
7,410
def get_label ( self , lang = None ) : x = None if lang is None : for obj in self . graph . objects ( self . asNode ( ) , RDFS . label ) : return obj for obj in self . graph . objects ( self . asNode ( ) , RDFS . label ) : x = obj if x . language == lang : return x return x
Return label for given lang or any default
7,411
def parents ( self ) -> List [ "Collection" ] : p = self . parent parents = [ ] while p is not None : parents . append ( p ) p = p . parent return parents
Iterator to find parents of current collection from closest to furthest
7,412
def _add_member ( self , member ) : if member . id in self . children : return None else : self . children [ member . id ] = member
Does not add member if it already knows it .
7,413
def export_base_dts ( cls , graph , obj , nsm ) : o = { "@id" : str ( obj . asNode ( ) ) , "@type" : nsm . qname ( obj . type ) , nsm . qname ( RDF_NAMESPACES . HYDRA . title ) : str ( obj . get_label ( ) ) , nsm . qname ( RDF_NAMESPACES . HYDRA . totalItems ) : obj . size } for desc in graph . objects ( obj . asNode ( ) , RDF_NAMESPACES . HYDRA . description ) : o [ nsm . qname ( RDF_NAMESPACES . HYDRA . description ) ] = str ( desc ) return o
Export the base DTS information in a simple reusable way
7,414
def get_subject ( self , lang = None ) : return self . metadata . get_single ( key = DC . subject , lang = lang )
Get the subject of the object
7,415
def get_context ( self ) : if not self . is_valid ( ) : raise ValueError ( "Cannot generate Context when form is invalid." ) return dict ( request = self . request , ** self . cleaned_data )
Context sent to templates for rendering include the form s cleaned data and also the current Request object .
7,416
def zharkov_panh ( v , temp , v0 , a0 , m , n , z , t_ref = 300. , three_r = 3. * constants . R ) : v_mol = vol_uc2mol ( v , z ) x = v / v0 a = a0 * np . power ( x , m ) def f ( t ) : return three_r * n / 2. * a * m / v_mol * np . power ( t , 2. ) * 1.e-9 return f ( temp ) - f ( t_ref )
calculate pressure from anharmonicity for Zharkov equation the equation is from Dorogokupets 2015
7,417
def split_words ( line ) : line = _NORM_REGEX . sub ( r'\1 \2' , line ) return [ normalize ( w ) for w in _WORD_REGEX . split ( line ) ]
Return the list of words contained in a line .
7,418
def add ( self , files ) : if files . __class__ . __name__ == 'str' : self . _files . append ( files ) else : self . _files . extend ( files )
Adds files to check .
7,419
def check ( self ) : errors = [ ] results = [ ] for fn in self . _files : if not os . path . isdir ( fn ) : try : with open ( fn , 'r' ) as f : line_ct = 1 for line in f : for word in split_words ( line ) : if ( word in self . _misspelling_dict or word . lower ( ) in self . _misspelling_dict ) : results . append ( [ fn , line_ct , word ] ) line_ct += 1 except UnicodeDecodeError : pass except IOError : errors . append ( '%s' % sys . exc_info ( ) [ 1 ] ) return errors , results
Checks the files for misspellings .
7,420
def suggestions ( self , word ) : suggestions = set ( self . _misspelling_dict . get ( word , [ ] ) ) . union ( set ( self . _misspelling_dict . get ( word . lower ( ) , [ ] ) ) ) return sorted ( [ same_case ( source = word , destination = w ) for w in suggestions ] )
Returns a list of suggestions for a misspelled word .
7,421
def dump_misspelling_list ( self ) : results = [ ] for bad_word in sorted ( self . _misspelling_dict . keys ( ) ) : for correction in self . _misspelling_dict [ bad_word ] : results . append ( [ bad_word , correction ] ) return results
Returns a list of misspelled words and corrections .
7,422
def status ( self ) : orig_dict = self . _get ( self . _service_url ( 'status' ) ) orig_dict [ 'implementation_version' ] = orig_dict . pop ( 'Implementation-Version' ) orig_dict [ 'built_from_git_sha1' ] = orig_dict . pop ( 'Built-From-Git-SHA1' ) return Status ( orig_dict )
Get the status of Alerting Service
7,423
def cli ( ctx , report , semantic , rcfile ) : ctx . obj = { 'report' : report , 'semantic' : semantic , 'rcfile' : rcfile , }
Query or manipulate smother reports
7,424
def lookup ( ctx , path ) : regions = parse_intervals ( path , as_context = ctx . obj [ 'semantic' ] ) _report_from_regions ( regions , ctx . obj )
Determine which tests intersect a source interval .
7,425
def diff ( ctx , branch ) : diff = GitDiffReporter ( branch ) regions = diff . changed_intervals ( ) _report_from_regions ( regions , ctx . obj , file_factory = diff . old_file )
Determine which tests intersect a git diff .
7,426
def combine ( ctx , src , dst ) : c = coverage . Coverage ( config_file = ctx . obj [ 'rcfile' ] ) result = Smother ( c ) for infile in src : result |= Smother . load ( infile ) result . write ( dst )
Combine several smother reports .
7,427
def convert_to_relative_paths ( src , dst ) : result = Smother . convert_to_relative_paths ( Smother . load ( src ) ) result . write ( dst )
Converts all file paths in a smother report to relative paths relative to the current directory .
7,428
def csv ( ctx , dst ) : sm = Smother . load ( ctx . obj [ 'report' ] ) semantic = ctx . obj [ 'semantic' ] writer = _csv . writer ( dst , lineterminator = '\n' ) dst . write ( "source_context, test_context\n" ) writer . writerows ( sm . iter_records ( semantic = semantic ) )
Flatten a coverage file into a CSV of source_context testname
7,429
def erase ( ctx ) : if os . path . exists ( ctx . obj [ 'report' ] ) : os . remove ( ctx . obj [ 'report' ] )
Erase the existing smother report .
7,430
def to_coverage ( ctx ) : sm = Smother . load ( ctx . obj [ 'report' ] ) sm . coverage = coverage . coverage ( ) sm . write_coverage ( )
Produce a . coverage file from a smother file
7,431
def fill_missing_fields ( self , data , columns ) : for column in columns : if column not in data . columns : data [ column ] = scipy . zeros ( len ( data ) ) return data
This method fills with 0 s missing fields
7,432
def update_field_names ( self , data , matching ) : for key in matching . keys ( ) : if key in data . columns : data . rename ( columns = { key : matching [ key ] } ) return data
This method updates the names of the fields according to matching
7,433
def format_dates ( self , data , columns ) : for column in columns : if column in data . columns : data [ column ] = pandas . to_datetime ( data [ column ] ) return data
This method translates columns values into datetime objects
7,434
def remove_columns ( self , data , columns ) : for column in columns : if column in data . columns : data = data . drop ( column , axis = 1 ) return data
This method removes columns in data
7,435
def tange_grun ( v , v0 , gamma0 , a , b ) : x = v / v0 return gamma0 * ( 1. + a * ( np . power ( x , b ) - 1. ) )
calculate Gruneisen parameter for the Tange equation
7,436
def tange_debyetemp ( v , v0 , gamma0 , a , b , theta0 ) : x = v / v0 gamma = tange_grun ( v , v0 , gamma0 , a , b ) if isuncertainties ( [ v , v0 , gamma0 , a , b , theta0 ] ) : theta = theta0 * np . power ( x , ( - 1. * ( 1. - a ) * gamma0 ) ) * unp . exp ( ( gamma0 - gamma ) / b ) else : theta = theta0 * np . power ( x , ( - 1. * ( 1. - a ) * gamma0 ) ) * np . exp ( ( gamma0 - gamma ) / b ) return theta
calculate Debye temperature for the Tange equation
7,437
def tange_pth ( v , temp , v0 , gamma0 , a , b , theta0 , n , z , t_ref = 300. , three_r = 3. * constants . R ) : v_mol = vol_uc2mol ( v , z ) gamma = tange_grun ( v , v0 , gamma0 , a , b ) theta = tange_debyetemp ( v , v0 , gamma0 , a , b , theta0 ) xx = theta / temp debye = debye_E ( xx ) if t_ref == 0. : debye0 = 0. else : xx0 = theta / t_ref debye0 = debye_E ( xx0 ) Eth0 = three_r * n * t_ref * debye0 Eth = three_r * n * temp * debye delEth = Eth - Eth0 p_th = ( gamma / v_mol * delEth ) * 1.e-9 return p_th
calculate thermal pressure for the Tange equation
7,438
def _make_passage_kwargs ( urn , reference ) : kwargs = { } if urn is not None : if reference is not None : kwargs [ "urn" ] = URN ( "{}:{}" . format ( urn . upTo ( URN . VERSION ) , reference ) ) else : kwargs [ "urn" ] = urn return kwargs
Little helper used by CapitainsCtsPassage here to comply with parents args
7,439
def getTextualNode ( self , subreference = None , simple = False ) : if subreference is None : return self . _getSimplePassage ( ) if not isinstance ( subreference , CtsReference ) : if isinstance ( subreference , str ) : subreference = CtsReference ( subreference ) elif isinstance ( subreference , list ) : subreference = CtsReference ( "." . join ( subreference ) ) if len ( subreference . start ) > self . citation . root . depth : raise CitationDepthError ( "URN is deeper than citation scheme" ) if simple is True : return self . _getSimplePassage ( subreference ) if not subreference . is_range ( ) : start = end = subreference . start . list else : start , end = subreference . start . list , subreference . end . list citation_start = self . citation . root [ len ( start ) - 1 ] citation_end = self . citation . root [ len ( end ) - 1 ] start , end = citation_start . fill ( passage = start ) , citation_end . fill ( passage = end ) start , end = normalizeXpath ( start . split ( "/" ) [ 2 : ] ) , normalizeXpath ( end . split ( "/" ) [ 2 : ] ) xml = self . textObject . xml if isinstance ( xml , etree . _Element ) : root = copyNode ( xml ) else : root = copyNode ( xml . getroot ( ) ) root = passageLoop ( xml , root , start , end ) if self . urn : urn = URN ( "{}:{}" . format ( self . urn , subreference ) ) else : urn = None return CapitainsCtsPassage ( urn = urn , resource = root , text = self , citation = citation_start , reference = subreference )
Finds a passage in the current text
7,440
def _getSimplePassage ( self , reference = None ) : if reference is None : return _SimplePassage ( resource = self . resource , reference = None , urn = self . urn , citation = self . citation . root , text = self ) subcitation = self . citation . root [ reference . depth - 1 ] resource = self . resource . xpath ( subcitation . fill ( reference ) , namespaces = XPATH_NAMESPACES ) if len ( resource ) != 1 : raise InvalidURN return _SimplePassage ( resource [ 0 ] , reference = reference , urn = self . urn , citation = subcitation , text = self . textObject )
Retrieve a single node representing the passage .
7,441
def getReffs ( self , level : int = 1 , subreference : CtsReference = None ) -> CtsReferenceSet : if not subreference and hasattr ( self , "reference" ) : subreference = self . reference elif subreference and not isinstance ( subreference , CtsReference ) : subreference = CtsReference ( subreference ) return self . getValidReff ( level = level , reference = subreference )
CtsReference available at a given level
7,442
def xpath ( self , * args , ** kwargs ) : if "smart_strings" not in kwargs : kwargs [ "smart_strings" ] = False return self . resource . xpath ( * args , ** kwargs )
Perform XPath on the passage XML
7,443
def tostring ( self , * args , ** kwargs ) : return etree . tostring ( self . resource , * args , ** kwargs )
Transform the CapitainsCtsPassage in XML string
7,444
def childIds ( self ) : if self . depth >= len ( self . citation . root ) : return [ ] elif self . _children is not None : return self . _children else : self . _children = self . getReffs ( ) return self . _children
Children of the passage
7,445
def location ( hexgrid_type , coord ) : if hexgrid_type == TILE : return str ( coord ) elif hexgrid_type == NODE : tile_id = nearest_tile_to_node ( coord ) dirn = tile_node_offset_to_direction ( coord - tile_id_to_coord ( tile_id ) ) return '({} {})' . format ( tile_id , dirn ) elif hexgrid_type == EDGE : tile_id = nearest_tile_to_edge ( coord ) dirn = tile_edge_offset_to_direction ( coord - tile_id_to_coord ( tile_id ) ) return '({} {})' . format ( tile_id , dirn ) else : logging . warning ( 'unsupported hexgrid_type={}' . format ( hexgrid_type ) ) return None
Returns a formatted string representing the coordinate . The format depends on the coordinate type .
7,446
def coastal_edges ( tile_id ) : edges = list ( ) tile_coord = tile_id_to_coord ( tile_id ) for edge_coord in edges_touching_tile ( tile_id ) : dirn = tile_edge_offset_to_direction ( edge_coord - tile_coord ) if tile_id_in_direction ( tile_id , dirn ) is None : edges . append ( edge_coord ) return edges
Returns a list of coastal edge coordinate .
7,447
def tile_id_in_direction ( from_tile_id , direction ) : coord_from = tile_id_to_coord ( from_tile_id ) for offset , dirn in _tile_tile_offsets . items ( ) : if dirn == direction : coord_to = coord_from + offset if coord_to in legal_tile_coords ( ) : return tile_id_from_coord ( coord_to ) return None
Variant on direction_to_tile . Returns None if there s no tile there .
7,448
def direction_to_tile ( from_tile_id , to_tile_id ) : coord_from = tile_id_to_coord ( from_tile_id ) coord_to = tile_id_to_coord ( to_tile_id ) direction = tile_tile_offset_to_direction ( coord_to - coord_from ) return direction
Convenience method wrapping tile_tile_offset_to_direction . Used to get the direction of the offset between two tiles . The tiles must be adjacent .
7,449
def edge_coord_in_direction ( tile_id , direction ) : tile_coord = tile_id_to_coord ( tile_id ) for edge_coord in edges_touching_tile ( tile_id ) : if tile_edge_offset_to_direction ( edge_coord - tile_coord ) == direction : return edge_coord raise ValueError ( 'No edge found in direction={} at tile_id={}' . format ( direction , tile_id ) )
Returns the edge coordinate in the given direction at the given tile identifier .
7,450
def node_coord_in_direction ( tile_id , direction ) : tile_coord = tile_id_to_coord ( tile_id ) for node_coord in nodes_touching_tile ( tile_id ) : if tile_node_offset_to_direction ( node_coord - tile_coord ) == direction : return node_coord raise ValueError ( 'No node found in direction={} at tile_id={}' . format ( direction , tile_id ) )
Returns the node coordinate in the given direction at the given tile identifier .
7,451
def tile_id_from_coord ( coord ) : for i , c in _tile_id_to_coord . items ( ) : if c == coord : return i raise Exception ( 'Tile id lookup failed, coord={} not found in map' . format ( hex ( coord ) ) )
Convert a tile coordinate to its corresponding tile identifier .
7,452
def nearest_tile_to_edge_using_tiles ( tile_ids , edge_coord ) : for tile_id in tile_ids : if edge_coord - tile_id_to_coord ( tile_id ) in _tile_edge_offsets . keys ( ) : return tile_id logging . critical ( 'Did not find a tile touching edge={}' . format ( edge_coord ) )
Get the first tile found adjacent to the given edge . Returns a tile identifier .
7,453
def nearest_tile_to_node_using_tiles ( tile_ids , node_coord ) : for tile_id in tile_ids : if node_coord - tile_id_to_coord ( tile_id ) in _tile_node_offsets . keys ( ) : return tile_id logging . critical ( 'Did not find a tile touching node={}' . format ( node_coord ) )
Get the first tile found adjacent to the given node . Returns a tile identifier .
7,454
def edges_touching_tile ( tile_id ) : coord = tile_id_to_coord ( tile_id ) edges = [ ] for offset in _tile_edge_offsets . keys ( ) : edges . append ( coord + offset ) return edges
Get a list of edge coordinates touching the given tile .
7,455
def nodes_touching_tile ( tile_id ) : coord = tile_id_to_coord ( tile_id ) nodes = [ ] for offset in _tile_node_offsets . keys ( ) : nodes . append ( coord + offset ) return nodes
Get a list of node coordinates touching the given tile .
7,456
def nodes_touching_edge ( edge_coord ) : a , b = hex_digit ( edge_coord , 1 ) , hex_digit ( edge_coord , 2 ) if a % 2 == 0 and b % 2 == 0 : return [ coord_from_hex_digits ( a , b + 1 ) , coord_from_hex_digits ( a + 1 , b ) ] else : return [ coord_from_hex_digits ( a , b ) , coord_from_hex_digits ( a + 1 , b + 1 ) ]
Returns the two node coordinates which are on the given edge coordinate .
7,457
def legal_edge_coords ( ) : edges = set ( ) for tile_id in legal_tile_ids ( ) : for edge in edges_touching_tile ( tile_id ) : edges . add ( edge ) logging . debug ( 'Legal edge coords({})={}' . format ( len ( edges ) , edges ) ) return edges
Return all legal edge coordinates on the grid .
7,458
def legal_node_coords ( ) : nodes = set ( ) for tile_id in legal_tile_ids ( ) : for node in nodes_touching_tile ( tile_id ) : nodes . add ( node ) logging . debug ( 'Legal node coords({})={}' . format ( len ( nodes ) , nodes ) ) return nodes
Return all legal node coordinates on the grid
7,459
def make ( parser ) : s = parser . add_subparsers ( title = 'commands' , metavar = 'COMMAND' , help = 'description' , ) def create_manila_db_f ( args ) : create_manila_db ( args ) create_manila_db_parser = create_manila_db_subparser ( s ) create_manila_db_parser . set_defaults ( func = create_manila_db_f ) def create_service_credentials_f ( args ) : create_service_credentials ( args ) create_service_credentials_parser = create_service_credentials_subparser ( s ) create_service_credentials_parser . set_defaults ( func = create_service_credentials_f ) def install_f ( args ) : install ( args ) install_parser = install_subparser ( s ) install_parser . set_defaults ( func = install_f )
provison Manila with HA
7,460
def assoc ( self , index , value ) : newnode = LookupTreeNode ( index , value ) newtree = LookupTree ( ) newtree . root = _assoc_down ( self . root , newnode , 0 ) return newtree
Return a new tree with value associated at index .
7,461
def remove ( self , index ) : newtree = LookupTree ( ) newtree . root = _remove_down ( self . root , index , 0 ) return newtree
Return new tree with index removed .
7,462
def insert ( self , index , value ) : newnode = LookupTreeNode ( index , value ) level = 0 node = self . root while True : ind = _getbits ( newnode . index , level ) level += 1 child = node . children [ ind ] if child is None or child . index == newnode . index : if child : assert child . value == newnode . value node . children [ ind ] = newnode break elif child . index == _root_index : node = child else : branch = LookupTreeNode ( ) nind = _getbits ( newnode . index , level ) cind = _getbits ( child . index , level ) node . children [ ind ] = branch if nind == cind : branch . children [ cind ] = child node = branch else : branch . children [ nind ] = newnode branch . children [ cind ] = child break
Insert a node in - place . It is highly suggested that you do not use this method . Use assoc instead
7,463
def reset ( cls ) : cls . _codecs = { } c = cls . _codec for ( name , encode , decode ) in cls . _common_codec_data : cls . _codecs [ name ] = c ( encode , decode )
Reset the registry to the standard codecs .
7,464
def register ( cls , name , encode , decode ) : cls . _codecs [ name ] = cls . _codec ( encode , decode )
Add a codec to the registry .
7,465
def default_formatter ( handler , item , value ) : if hasattr ( value , '__unicode__' ) : value = value . __unicode__ ( ) return escape ( str ( value ) )
Default formatter . Convert value to string .
7,466
def list_formatter ( handler , item , value ) : return u', ' . join ( str ( v ) for v in value )
Format list .
7,467
def format_value ( handler , item , column ) : value = getattr ( item , column , None ) formatter = FORMATTERS . get ( type ( value ) , default_formatter ) return formatter ( handler , item , value )
Format value .
7,468
def make_regex ( string ) : if string and string [ 0 ] in '+-' : sign , name = string [ 0 ] , string [ 1 : ] if not name or '+' in name or '-' in name : raise ValueError ( 'inappropriate feature name: %r' % string ) tmpl = r'([+]?%s)' if sign == '+' else r'(-%s)' return tmpl % name if not string or '+' in string or '-' in string : raise ValueError ( 'inappropriate feature name: %r' % string ) return r'(%s)' % string
Regex string for optionally signed binary or privative feature .
7,469
def substring_names ( features ) : names = tools . uniqued ( map ( remove_sign , features ) ) for l , r in permutations ( names , 2 ) : if l in r : yield ( l , r )
Yield all feature name pairs in substring relation .
7,470
def join ( self , featuresets ) : concepts = ( f . concept for f in featuresets ) join = self . lattice . join ( concepts ) return self . _featuresets [ join . index ]
Return the nearest featureset that subsumes all given ones .
7,471
def meet ( self , featuresets ) : concepts = ( f . concept for f in featuresets ) meet = self . lattice . meet ( concepts ) return self . _featuresets [ meet . index ]
Return the nearest featureset that implies all given ones .
7,472
def upset_union ( self , featuresets ) : concepts = ( f . concept for f in featuresets ) indexes = ( c . index for c in self . lattice . upset_union ( concepts ) ) return map ( self . _featuresets . __getitem__ , indexes )
Yield all featuresets that subsume any of the given ones .
7,473
def graphviz ( self , highlight = None , maximal_label = None , topdown = None , filename = None , directory = None , render = False , view = False ) : return visualize . featuresystem ( self , highlight , maximal_label , topdown , filename , directory , render , view )
Return the system lattice visualization as graphviz source .
7,474
def soap_action ( self , service , action , payloadbody ) : payload = self . soapenvelope . format ( body = payloadbody ) . encode ( 'utf-8' ) headers = { "Host" : self . url , "Content-Type" : "text/xml; charset=UTF-8" , "Cache-Control" : "no-cache" , "Content-Length" : str ( len ( payload ) ) , "SOAPAction" : action } try : self . last_exception = None response = requests . post ( url = self . url + service , headers = headers , data = payload , cookies = self . cookies ) except requests . exceptions . RequestException as exp : self . last_exception = exp return False if response . status_code != 200 : self . last_response = response return False self . cookies = response . cookies try : xdoc = xml . etree . ElementTree . fromstring ( response . text ) except xml . etree . ElementTree . ParseError as exp : self . last_exception = exp self . last_response = response return False return xdoc
Do a soap request .
7,475
def getValidReff ( self , level = 1 , reference = None ) : if reference : urn = "{0}:{1}" . format ( self . urn , reference ) else : urn = str ( self . urn ) if level == - 1 : level = len ( self . citation ) xml = self . retriever . getValidReff ( level = level , urn = urn ) xml = xmlparser ( xml ) self . _parse_request ( xml . xpath ( "//ti:request" , namespaces = XPATH_NAMESPACES ) [ 0 ] ) return [ ref . split ( ":" ) [ - 1 ] for ref in xml . xpath ( "//ti:reply//ti:urn/text()" , namespaces = XPATH_NAMESPACES ) ]
Given a resource CtsText will compute valid reffs
7,476
def getTextualNode ( self , subreference = None ) : if isinstance ( subreference , URN ) : urn = str ( subreference ) elif isinstance ( subreference , CtsReference ) : urn = "{0}:{1}" . format ( self . urn , str ( subreference ) ) elif isinstance ( subreference , str ) : if ":" in subreference : urn = subreference else : urn = "{0}:{1}" . format ( self . urn . upTo ( URN . NO_PASSAGE ) , subreference ) elif isinstance ( subreference , list ) : urn = "{0}:{1}" . format ( self . urn , "." . join ( subreference ) ) else : urn = str ( self . urn ) response = xmlparser ( self . retriever . getPassage ( urn = urn ) ) self . _parse_request ( response . xpath ( "//ti:request" , namespaces = XPATH_NAMESPACES ) [ 0 ] ) return CtsPassage ( urn = urn , resource = response , retriever = self . retriever )
Retrieve a passage and store it in the object
7,477
def getPassagePlus ( self , reference = None ) : if reference : urn = "{0}:{1}" . format ( self . urn , reference ) else : urn = str ( self . urn ) response = xmlparser ( self . retriever . getPassagePlus ( urn = urn ) ) passage = CtsPassage ( urn = urn , resource = response , retriever = self . retriever ) passage . _parse_request ( response . xpath ( "//ti:reply/ti:label" , namespaces = XPATH_NAMESPACES ) [ 0 ] ) self . citation = passage . citation return passage
Retrieve a passage and informations around it and store it in the object
7,478
def _parse_request ( self , xml ) : for node in xml . xpath ( ".//ti:groupname" , namespaces = XPATH_NAMESPACES ) : lang = node . get ( "xml:lang" ) or CtsText . DEFAULT_LANG self . metadata . add ( RDF_NAMESPACES . CTS . groupname , lang = lang , value = node . text ) self . set_creator ( node . text , lang ) for node in xml . xpath ( ".//ti:title" , namespaces = XPATH_NAMESPACES ) : lang = node . get ( "xml:lang" ) or CtsText . DEFAULT_LANG self . metadata . add ( RDF_NAMESPACES . CTS . title , lang = lang , value = node . text ) self . set_title ( node . text , lang ) for node in xml . xpath ( ".//ti:label" , namespaces = XPATH_NAMESPACES ) : lang = node . get ( "xml:lang" ) or CtsText . DEFAULT_LANG self . metadata . add ( RDF_NAMESPACES . CTS . label , lang = lang , value = node . text ) self . set_subject ( node . text , lang ) for node in xml . xpath ( ".//ti:description" , namespaces = XPATH_NAMESPACES ) : lang = node . get ( "xml:lang" ) or CtsText . DEFAULT_LANG self . metadata . add ( RDF_NAMESPACES . CTS . description , lang = lang , value = node . text ) self . set_description ( node . text , lang ) if not self . citation . is_set ( ) and xml . xpath ( "//ti:citation" , namespaces = XPATH_NAMESPACES ) : self . citation = CtsCollection . XmlCtsCitation . ingest ( xml , xpath = ".//ti:citation[not(ancestor::ti:citation)]" )
Parse a request with metadata information
7,479
def getLabel ( self ) : response = xmlparser ( self . retriever . getLabel ( urn = str ( self . urn ) ) ) self . _parse_request ( response . xpath ( "//ti:reply/ti:label" , namespaces = XPATH_NAMESPACES ) [ 0 ] ) return self . metadata
Retrieve metadata about the text
7,480
def getPrevNextUrn ( self , reference ) : _prev , _next = _SharedMethod . prevnext ( self . retriever . getPrevNextUrn ( urn = "{}:{}" . format ( str ( URN ( str ( self . urn ) ) . upTo ( URN . NO_PASSAGE ) ) , str ( reference ) ) ) ) return _prev , _next
Get the previous URN of a reference of the text
7,481
def getFirstUrn ( self , reference = None ) : if reference is not None : if ":" in reference : urn = reference else : urn = "{}:{}" . format ( str ( URN ( str ( self . urn ) ) . upTo ( URN . NO_PASSAGE ) ) , str ( reference ) ) else : urn = str ( self . urn ) _first = _SharedMethod . firstUrn ( self . retriever . getFirstUrn ( urn ) ) return _first
Get the first children URN for a given resource
7,482
def firstUrn ( resource ) : resource = xmlparser ( resource ) urn = resource . xpath ( "//ti:reply/ti:urn/text()" , namespaces = XPATH_NAMESPACES , magic_string = True ) if len ( urn ) > 0 : urn = str ( urn [ 0 ] ) return urn . split ( ":" ) [ - 1 ]
Parse a resource to get the first URN
7,483
def prevnext ( resource ) : _prev , _next = False , False resource = xmlparser ( resource ) prevnext = resource . xpath ( "//ti:prevnext" , namespaces = XPATH_NAMESPACES ) if len ( prevnext ) > 0 : _next , _prev = None , None prevnext = prevnext [ 0 ] _next_xpath = prevnext . xpath ( "ti:next/ti:urn/text()" , namespaces = XPATH_NAMESPACES , smart_strings = False ) _prev_xpath = prevnext . xpath ( "ti:prev/ti:urn/text()" , namespaces = XPATH_NAMESPACES , smart_strings = False ) if len ( _next_xpath ) : _next = _next_xpath [ 0 ] . split ( ":" ) [ - 1 ] if len ( _prev_xpath ) : _prev = _prev_xpath [ 0 ] . split ( ":" ) [ - 1 ] return _prev , _next
Parse a resource to get the prev and next urn
7,484
def prevId ( self ) : if self . _prev_id is False : self . _prev_id , self . _next_id = self . getPrevNextUrn ( reference = self . urn . reference ) return self . _prev_id
Previous passage Identifier
7,485
def nextId ( self ) : if self . _next_id is False : self . _prev_id , self . _next_id = self . getPrevNextUrn ( reference = self . urn . reference ) return self . _next_id
Shortcut for getting the following passage identifier
7,486
def siblingsId ( self ) : if self . _next_id is False or self . _prev_id is False : self . _prev_id , self . _next_id = self . getPrevNextUrn ( reference = self . urn . reference ) return self . _prev_id , self . _next_id
Shortcut for getting the previous and next passage identifier
7,487
def _parse ( self ) : self . response = self . resource self . resource = self . resource . xpath ( "//ti:passage/tei:TEI" , namespaces = XPATH_NAMESPACES ) [ 0 ] self . _prev_id , self . _next_id = _SharedMethod . prevnext ( self . response ) if not self . citation . is_set ( ) and len ( self . resource . xpath ( "//ti:citation" , namespaces = XPATH_NAMESPACES ) ) : self . citation = CtsCollection . XmlCtsCitation . ingest ( self . response , xpath = ".//ti:citation[not(ancestor::ti:citation)]" )
Given self . resource split information from the CTS API
7,488
def get_user_token ( self ) : headers = { 'User-Agent' : self . user_agent ( ) , 'Host' : self . domain ( ) , 'Accept' : '*/*' , } headers . update ( self . headers ( ) ) r = requests . get ( self . portals_url ( ) + '/users/_this/token' , headers = headers , auth = self . auth ( ) ) if HTTP_STATUS . OK == r . status_code : return r . text else : print ( "get_user_token: Something went wrong: <{0}>: {1}" . format ( r . status_code , r . reason ) ) r . raise_for_status ( )
Gets a authorization token for session reuse .
7,489
def add_device ( self , model , serial ) : device = { 'model' : model , 'vendor' : self . vendor ( ) , 'sn' : serial , 'type' : 'vendor' } headers = { 'User-Agent' : self . user_agent ( ) , } headers . update ( self . headers ( ) ) r = requests . post ( self . portals_url ( ) + '/portals/' + self . portal_id ( ) + '/devices' , data = json . dumps ( device ) , headers = headers , auth = self . auth ( ) ) if HTTP_STATUS . ADDED == r . status_code : device_obj = r . json ( ) return dictify_device_meta ( device_obj ) else : print ( "add_device: Something went wrong: <{0}>: {1}" . format ( r . status_code , r . reason ) ) r . raise_for_status ( )
Returns device object of newly created device .
7,490
def update_portal ( self , portal_obj ) : headers = { 'User-Agent' : self . user_agent ( ) , } headers . update ( self . headers ( ) ) r = requests . put ( self . portals_url ( ) + '/portals/' + self . portal_id ( ) , data = json . dumps ( portal_obj ) , headers = headers , auth = self . auth ( ) ) if HTTP_STATUS . OK == r . status_code : return r . json ( ) else : print ( "update_portal: Something went wrong: <{0}>: {1}" . format ( r . status_code , r . reason ) ) r . raise_for_status ( )
Implements the Update device Portals API .
7,491
def get_device ( self , rid ) : headers = { 'User-Agent' : self . user_agent ( ) , 'Content-Type' : self . content_type ( ) } headers . update ( self . headers ( ) ) url = self . portals_url ( ) + '/devices/' + rid r = requests . get ( url , headers = headers , auth = self . auth ( ) ) if HTTP_STATUS . OK == r . status_code : device_obj = r . json ( ) return device_obj else : print ( "get_device: Something went wrong: <{0}>: {1}" . format ( r . status_code , r . reason ) ) r . raise_for_status ( )
Retrieve the device object for a given RID .
7,492
def get_multiple_devices ( self , rids ) : headers = { 'User-Agent' : self . user_agent ( ) , 'Content-Type' : self . content_type ( ) } headers . update ( self . headers ( ) ) url = self . portals_url ( ) + '/users/_this/devices/' + str ( rids ) . replace ( "'" , "" ) . replace ( ' ' , '' ) r = requests . get ( url , headers = headers , auth = self . auth ( ) ) if HTTP_STATUS . OK == r . status_code : return r . json ( ) else : print ( "get_multiple_devices: Something went wrong: <{0}>: {1}" . format ( r . status_code , r . reason ) ) r . raise_for_status ( )
Implements the Get Multiple Devices API .
7,493
def dorogokupets2015_pth ( v , temp , v0 , gamma0 , gamma_inf , beta , theta01 , m1 , theta02 , m2 , n , z , t_ref = 300. , three_r = 3. * constants . R ) : v_mol = vol_uc2mol ( v , z ) gamma = altshuler_grun ( v , v0 , gamma0 , gamma_inf , beta ) theta1 = altshuler_debyetemp ( v , v0 , gamma0 , gamma_inf , beta , theta01 ) theta2 = altshuler_debyetemp ( v , v0 , gamma0 , gamma_inf , beta , theta02 ) if isuncertainties ( [ v , temp , v0 , gamma0 , gamma_inf , beta , theta01 , m1 , theta02 , m2 ] ) : term_h1 = m1 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta1 / ( unp . exp ( theta1 / temp ) - 1. ) ) term_h2 = m2 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta2 / ( unp . exp ( theta2 / temp ) - 1. ) ) term_h1_ref = m1 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta1 / ( unp . exp ( theta1 / t_ref ) - 1. ) ) term_h2_ref = m2 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta2 / ( unp . exp ( theta2 / t_ref ) - 1. ) ) else : term_h1 = m1 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta1 / ( np . exp ( theta1 / temp ) - 1. ) ) term_h2 = m2 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta2 / ( np . exp ( theta2 / temp ) - 1. ) ) term_h1_ref = m1 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta1 / ( np . exp ( theta1 / t_ref ) - 1. ) ) term_h2_ref = m2 / ( m1 + m2 ) * three_r * n * gamma / v_mol * ( theta2 / ( np . exp ( theta2 / t_ref ) - 1. ) ) p_th = term_h1 * 1.e-9 + term_h2 * 1.e-9 p_th_ref = term_h1_ref * 1.e-9 + term_h2_ref * 1.e-9 return ( p_th - p_th_ref )
calculate thermal pressure for Dorogokupets 2015 EOS
7,494
def routes ( self ) : if self . _routes : return self . _routes request = requests . get ( self . endpoint ) request . raise_for_status ( ) data = request . json ( ) self . _routes = { "collections" : parse_uri ( data [ "collections" ] , self . endpoint ) , "documents" : parse_uri ( data [ "documents" ] , self . endpoint ) , "navigation" : parse_uri ( data [ "navigation" ] , self . endpoint ) } return self . _routes
Retrieves the main routes of the DTS Collection
7,495
def get_collection ( self , collection_id = None , nav = "children" , page = None ) : return self . call ( "collections" , { "id" : collection_id , "nav" : nav , "page" : page } , defaults = { "id" : None , "nav" : "children" , "page" : 1 } )
Makes a call on the Collection API
7,496
def _create_glance_db ( self , root_db_pass , glance_db_pass ) : print red ( env . host_string + ' | Create glance database' ) sudo ( "mysql -uroot -p{0} -e \"CREATE DATABASE glance;\"" . format ( root_db_pass ) , shell = False ) sudo ( "mysql -uroot -p{0} -e \"GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '{1}';\"" . format ( root_db_pass , glance_db_pass ) , shell = False ) sudo ( "mysql -uroot -p{0} -e \"GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '{1}';\"" . format ( root_db_pass , glance_db_pass ) , shell = False )
Create the glance database
7,497
def chain ( self , token : 'CancelToken' ) -> 'CancelToken' : if self . loop != token . _loop : raise EventLoopMismatch ( "Chained CancelToken objects must be on the same event loop" ) chain_name = ":" . join ( [ self . name , token . name ] ) chain = CancelToken ( chain_name , loop = self . loop ) chain . _chain . extend ( [ self , token ] ) return chain
Return a new CancelToken chaining this and the given token .
7,498
def triggered_token ( self ) -> 'CancelToken' : if self . _triggered . is_set ( ) : return self for token in self . _chain : if token . triggered : return token . triggered_token return None
Return the token which was triggered .
7,499
def triggered ( self ) -> bool : if self . _triggered . is_set ( ) : return True return any ( token . triggered for token in self . _chain )
Return True or False whether this token has been triggered .