idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
50,700 | def _write_config ( self , memory ) : memory . seek ( 0 ) memory . write ( struct . pack ( "<II" , self . _simulator . length , self . output . routing_key ) ) memory . write ( bitarray ( self . stimulus . ljust ( self . _simulator . length , "0" ) , endian = "little" ) . tobytes ( ) ) | Write the configuration for this stimulus to memory . |
50,701 | def execute_notebook ( nb_path , pkg_dir , dataframes , write_notebook = False , env = None ) : import nbformat from metapack . jupyter . preprocessors import AddEpilog , AddProlog from metapack . jupyter . exporters import ExecutePreprocessor , Config from os . path import dirname , join , splitext , basename from nbconvert . preprocessors . execute import CellExecutionError with open ( nb_path , encoding = 'utf8' ) as f : nb = nbformat . read ( f , as_version = 4 ) root , ext = splitext ( basename ( nb_path ) ) c = Config ( ) nb , resources = AddProlog ( config = c , env = env or { } ) . preprocess ( nb , { } ) nb , resources = AddEpilog ( config = c , pkg_dir = pkg_dir , dataframes = dataframes , ) . preprocess ( nb , { } ) def _write_notebook ( nb_path , root , ext , write_notebook ) : if write_notebook : if write_notebook is True : exec_nb_path = join ( dirname ( nb_path ) , root + '-executed' + ext ) else : exec_nb_path = write_notebook with open ( exec_nb_path , 'w' , encoding = 'utf8' ) as f : nbformat . write ( nb , f ) _write_notebook ( nb_path , root , ext , write_notebook ) try : ep = ExecutePreprocessor ( config = c ) ep . timeout = 5 * 60 nb , _ = ep . preprocess ( nb , { 'metadata' : { 'path' : dirname ( nb_path ) } } ) except ( CellExecutionError , TimeoutError ) as e : err_nb_path = join ( dirname ( nb_path ) , root + '-errors' + ext ) with open ( err_nb_path , 'w' , encoding = 'utf8' ) as f : nbformat . write ( nb , f ) raise CellExecutionError ( "Errors executing noteboook. See notebook at {} for details.\n{}" . format ( err_nb_path , '' ) ) except ImportError as e : raise NotebookError ( "Failed to import a library required for notebook execution: {}" . format ( str ( e ) ) ) _write_notebook ( nb_path , root , ext , write_notebook ) return nb | Execute a notebook after adding the prolog and epilog . Can also add %mt_materialize magics to write dataframes to files |
50,702 | def convert_documentation ( nb_path ) : with open ( nb_path ) as f : nb = nbformat . reads ( f . read ( ) , as_version = 4 ) doc = ExtractInlineMetatabDoc ( package_url = "metapack+file:" + dirname ( nb_path ) ) . run ( nb ) package_name = doc . as_version ( None ) output_dir = join ( getcwd ( ) , package_name ) de = DocumentationExporter ( config = Config ( ) , log = logger , metadata = doc_metadata ( doc ) ) prt ( 'Converting documentation' ) output , resources = de . from_filename ( nb_path ) fw = FilesWriter ( ) fw . build_directory = join ( output_dir , 'docs' ) fw . write ( output , resources , notebook_name = 'notebook' ) prt ( "Wrote documentation to {}" . format ( fw . build_directory ) ) | Run only the document conversion portion of the notebook conversion |
50,703 | def doc_metadata ( doc ) : r = doc [ 'Root' ] . as_dict ( ) r . update ( doc [ 'Contacts' ] . as_dict ( ) ) r [ 'author' ] = r . get ( 'author' , r . get ( 'creator' , r . get ( 'wrangler' ) ) ) return r | Create a metadata dict from a MetatabDoc for Document conversion |
50,704 | def extract_notebook_metatab ( nb_path : Path ) : from metatab . rowgenerators import TextRowGenerator import nbformat with nb_path . open ( ) as f : nb = nbformat . read ( f , as_version = 4 ) lines = '\n' . join ( [ 'Declare: metatab-latest' ] + [ get_cell_source ( nb , tag ) for tag in [ 'metadata' , 'resources' , 'schema' ] ] ) doc = MetapackDoc ( TextRowGenerator ( lines ) ) doc [ 'Root' ] . get_or_new_term ( 'Root.Title' ) . value = get_cell_source ( nb , 'Title' ) . strip ( '#' ) . strip ( ) doc [ 'Root' ] . get_or_new_term ( 'Root.Description' ) . value = get_cell_source ( nb , 'Description' ) doc [ 'Documentation' ] . get_or_new_term ( 'Root.Readme' ) . value = get_cell_source ( nb , 'readme' ) return doc | Extract the metatab lines from a notebook and return a Metapack doc |
50,705 | def add_resource ( self , ref , ** properties ) : raise NotImplementedError ( "Still uses decompose_url" ) du = Bunch ( decompose_url ( ref ) ) added = [ ] if du . proto == 'file' and isdir ( ref ) : for f in self . find_files ( ref , [ 'csv' ] ) : if f . endswith ( DEFAULT_METATAB_FILE ) : continue if self . _doc . find_first ( 'Root.Datafile' , value = f ) : self . prt ( "Datafile exists for '{}', ignoring" . format ( f ) ) else : added . extend ( self . add_resource ( f , ** properties ) ) else : self . prt ( "Enumerating '{}'" . format ( ref ) ) for c in enumerate_contents ( ref , self . _cache ) : added . append ( self . add_single_resource ( c . rebuild_url ( ) , ** properties ) ) return added | Add one or more resources entities from a url and property values possibly adding multiple entries for an excel spreadsheet or ZIP file |
50,706 | def _clean_doc ( self , doc = None ) : if doc is None : doc = self . doc resources = doc [ 'Resources' ] for arg in [ 'startline' , 'headerlines' , 'encoding' ] : for e in list ( resources . args ) : if e . lower ( ) == arg : resources . args . remove ( e ) for term in resources : term [ 'startline' ] = None term [ 'headerlines' ] = None term [ 'encoding' ] = None schema = doc [ 'Schema' ] for arg in [ 'altname' , 'transform' ] : for e in list ( schema . args ) : if e . lower ( ) == arg : schema . args . remove ( e ) for table in self . doc . find ( 'Root.Table' ) : for col in table . find ( 'Column' ) : try : col . value = col [ 'altname' ] . value except : pass col [ 'altname' ] = None col [ 'transform' ] = None return doc | Clean the doc before writing it removing unnecessary properties and doing other operations . |
50,707 | def _load_resources ( self , abs_path = False ) : from metapack . doc import MetapackDoc assert type ( self . doc ) == MetapackDoc for r in self . datafiles : if r . term_is ( 'root.sql' ) : if not r . value : self . warn ( "No value for SQL URL for {} " . format ( r . term ) ) continue try : self . _load_resource ( r , abs_path ) except Exception as e : if r . props . get ( 'ignoreerrors' ) : self . warn ( f"Ignoring errors for {r.name}: {str(e)}" ) pass else : raise e else : if not r . url : self . warn ( "No value for URL for {} " . format ( r . term ) ) continue try : if self . _resource . exists ( r ) : self . prt ( "Resource '{}' exists, skipping" . format ( r . name ) ) continue except AttributeError : pass self . prt ( "Reading resource {} from {} " . format ( r . name , r . resolved_url ) ) try : if not r . headers : raise PackageError ( "Resource {} does not have header. Have schemas been generated?" . format ( r . name ) ) except AttributeError : raise PackageError ( "Resource '{}' of type {} does not have a headers property" . format ( r . url , type ( r ) ) ) try : self . _load_resource ( r , abs_path ) except Exception as e : if r . props . get ( 'ignoreerrors' ) : self . warn ( f"Ignoring errors for {r.name}: {str(e)}" ) pass else : raise e | Copy all of the Datafile entries into the package |
50,708 | def _load_documentation_files ( self ) : for t in self . doc . find ( [ 'Root.Documentation' , 'Root.Image' , 'Root.Notebook' ] ) : resource = self . _get_ref_contents ( t ) if not resource : continue if t . term_is ( 'Root.Documentation' ) : real_name_base , ext = splitext ( resource . resource_file ) name = t . get_value ( 'name' ) if t . get_value ( 'name' ) else real_name_base real_name = slugify ( name ) + ext self . _load_documentation ( t , resource . read ( ) , resource . resource_file ) t = self . doc . find_first ( 'Root.Readme' ) if t and ( t . value or '' ) . strip ( ) : t [ 'title' ] = 'Readme' readme = '# ' + ( self . doc . get_value ( 'Root.Title' ) or '' ) . strip ( ) if self . doc . description : readme += '\n\n' + ( self . doc . description or '' ) . strip ( ) if ( t . value or '' ) . strip ( ) : readme += '\n\n' + ( t . value or '' ) . strip ( ) self . _load_documentation ( t , readme . encode ( 'utf8' ) , 'README.md' ) | Copy all of the Datafile |
50,709 | def _load_files ( self ) : def copy_dir ( path ) : for ( dr , _ , files ) in walk ( path ) : for fn in files : if '__pycache__' in fn : continue relpath = dr . replace ( self . source_dir , '' ) . strip ( '/' ) src = parse_app_url ( join ( dr , fn ) ) dest = join ( relpath , fn ) resource = src . get_resource ( ) self . _load_file ( dest , resource . read ( ) ) for term in self . resources ( term = 'Root.Pythonlib' ) : uv = parse_app_url ( term . value ) ur = parse_app_url ( self . source_dir ) if ur . proto == 'file' and uv . proto == 'file' : path = join ( self . source_dir , uv . path ) if isdir ( path ) : copy_dir ( path ) else : f = self . _get_ref_contents ( term ) try : self . _load_file ( term . value , f . read ( ) ) except Exception as e : raise PackageError ( "Failed to load file for '{}': {} " . format ( term . value , e ) ) nb_dir = join ( self . source_dir , 'notebooks' ) if exists ( nb_dir ) and isdir ( nb_dir ) : copy_dir ( nb_dir ) | Load other files |
50,710 | def row_generator ( resource , doc , env , * args , ** kwargs ) : yield 'a b c' . split ( ) for i in range ( 10 ) : yield [ i , i * 2 , i * 3 ] | An example row generator function . |
50,711 | def example_transform ( v , row , row_n , i_s , i_d , header_s , header_d , scratch , errors , accumulator ) : return str ( v ) + '-foo' | An example column transform . |
50,712 | def search_index_file ( ) : from metapack import Downloader from os import environ return environ . get ( 'METAPACK_SEARCH_INDEX' , Downloader . get_instance ( ) . cache . getsyspath ( 'index.json' ) ) | Return the default local index file from the download cache |
50,713 | def write ( self ) : index_file = self . path new_index_file = index_file + '.new' bak_index_file = index_file + '.bak' if not self . _db : return with open ( new_index_file , 'w' ) as f : json . dump ( self . _db , f , indent = 4 ) if exists ( index_file ) : copy ( index_file , bak_index_file ) rename ( new_index_file , index_file ) | Safely write the index data to the index file |
50,714 | def update ( self , o ) : self . open ( ) try : self . _db . update ( o . _db ) except AttributeError : self . _db . update ( o ) | Update from another index or index dict |
50,715 | def bounds ( ctx , tile ) : click . echo ( '%s %s %s %s' % TilePyramid ( ctx . obj [ 'grid' ] , tile_size = ctx . obj [ 'tile_size' ] , metatiling = ctx . obj [ 'metatiling' ] ) . tile ( * tile ) . bounds ( pixelbuffer = ctx . obj [ 'pixelbuffer' ] ) ) | Print Tile bounds . |
50,716 | def bbox ( ctx , tile ) : geom = TilePyramid ( ctx . obj [ 'grid' ] , tile_size = ctx . obj [ 'tile_size' ] , metatiling = ctx . obj [ 'metatiling' ] ) . tile ( * tile ) . bbox ( pixelbuffer = ctx . obj [ 'pixelbuffer' ] ) if ctx . obj [ 'output_format' ] in [ 'WKT' , 'Tile' ] : click . echo ( geom ) elif ctx . obj [ 'output_format' ] == 'GeoJSON' : click . echo ( geojson . dumps ( geom ) ) | Print Tile bounding box as geometry . |
50,717 | def tile ( ctx , point , zoom ) : tile = TilePyramid ( ctx . obj [ 'grid' ] , tile_size = ctx . obj [ 'tile_size' ] , metatiling = ctx . obj [ 'metatiling' ] ) . tile_from_xy ( * point , zoom = zoom ) if ctx . obj [ 'output_format' ] == 'Tile' : click . echo ( '%s %s %s' % tile . id ) elif ctx . obj [ 'output_format' ] == 'WKT' : click . echo ( tile . bbox ( pixelbuffer = ctx . obj [ 'pixelbuffer' ] ) ) elif ctx . obj [ 'output_format' ] == 'GeoJSON' : click . echo ( geojson . dumps ( geojson . FeatureCollection ( [ geojson . Feature ( geometry = tile . bbox ( pixelbuffer = ctx . obj [ 'pixelbuffer' ] ) , properties = dict ( zoom = tile . zoom , row = tile . row , col = tile . col ) ) ] ) ) ) | Print Tile containing POINT .. |
50,718 | def tiles ( ctx , bounds , zoom ) : tiles = TilePyramid ( ctx . obj [ 'grid' ] , tile_size = ctx . obj [ 'tile_size' ] , metatiling = ctx . obj [ 'metatiling' ] ) . tiles_from_bounds ( bounds , zoom = zoom ) if ctx . obj [ 'output_format' ] == 'Tile' : for tile in tiles : click . echo ( '%s %s %s' % tile . id ) elif ctx . obj [ 'output_format' ] == 'WKT' : for tile in tiles : click . echo ( tile . bbox ( pixelbuffer = ctx . obj [ 'pixelbuffer' ] ) ) elif ctx . obj [ 'output_format' ] == 'GeoJSON' : click . echo ( '{\n' ' "type": "FeatureCollection",\n' ' "features": [' ) try : tile = next ( tiles ) while True : gj = ' %s' % geojson . Feature ( geometry = tile . bbox ( pixelbuffer = ctx . obj [ 'pixelbuffer' ] ) , properties = dict ( zoom = tile . zoom , row = tile . row , col = tile . col ) ) try : tile = next ( tiles ) click . echo ( gj + ',' ) except StopIteration : click . echo ( gj ) raise except StopIteration : pass click . echo ( ' ]\n' '}' ) | Print Tiles from bounds . |
50,719 | def snap_bbox ( ctx , bounds , zoom ) : click . echo ( box ( * tilematrix . snap_bounds ( bounds = bounds , tile_pyramid = TilePyramid ( ctx . obj [ 'grid' ] , tile_size = ctx . obj [ 'tile_size' ] , metatiling = ctx . obj [ 'metatiling' ] ) , zoom = zoom , pixelbuffer = ctx . obj [ 'pixelbuffer' ] ) ) ) | Snap bbox to tile grid . |
50,720 | def place ( vertices_resources , nets , machine , constraints , random = default_random ) : machine = machine . copy ( ) placements = { } vertices_resources , nets , constraints , substitutions = apply_same_chip_constraints ( vertices_resources , nets , constraints ) for constraint in constraints : if isinstance ( constraint , LocationConstraint ) : location = constraint . location if location not in machine : raise InvalidConstraintError ( "Chip requested by {} unavailable" . format ( machine ) ) vertex = constraint . vertex placements [ vertex ] = location resources = vertices_resources [ vertex ] machine [ location ] = subtract_resources ( machine [ location ] , resources ) if overallocated ( machine [ location ] ) : raise InsufficientResourceError ( "Cannot meet {}" . format ( constraint ) ) elif isinstance ( constraint , ReserveResourceConstraint ) : apply_reserve_resource_constraint ( machine , constraint ) movable_vertices = [ v for v in vertices_resources if v not in placements ] locations = set ( machine ) for vertex in movable_vertices : while True : if len ( locations ) == 0 : raise InsufficientResourceError ( "Ran out of chips while attempting to place vertex " "{}" . format ( vertex ) ) location = random . sample ( locations , 1 ) [ 0 ] resources_if_placed = subtract_resources ( machine [ location ] , vertices_resources [ vertex ] ) if overallocated ( resources_if_placed ) : locations . remove ( location ) else : placements [ vertex ] = location machine [ location ] = resources_if_placed break finalise_same_chip_constraints ( substitutions , placements ) return placements | A random placer . |
50,721 | def _initial_placement ( movable_vertices , vertices_resources , machine , random ) : locations = list ( machine ) random . shuffle ( locations ) location_iter = iter ( locations ) movable_vertices = list ( v for v in vertices_resources if v in movable_vertices ) random . shuffle ( movable_vertices ) vertex_iter = iter ( movable_vertices ) placement = { } try : location = next ( location_iter ) except StopIteration : raise InsufficientResourceError ( "No working chips in system." ) while True : try : vertex = next ( vertex_iter ) except StopIteration : break while True : resources_if_placed = subtract_resources ( machine [ location ] , vertices_resources [ vertex ] ) if overallocated ( resources_if_placed ) : try : location = next ( location_iter ) continue except StopIteration : raise InsufficientResourceError ( "Ran out of chips while attempting to place vertex " "{}" . format ( vertex ) ) else : placement [ vertex ] = location machine [ location ] = resources_if_placed break return placement | For internal use . Produces a random sequential initial placement updating the resource availabilities of every core in the supplied machine . |
50,722 | def _bibliography ( doc , terms , converters = [ ] , format = 'html' ) : output_backend = 'latex' if format == 'latex' else MetatabHtmlBackend def mk_cite ( v ) : for c in converters : r = c ( v ) if r is not False : return r return make_citation_dict ( v ) if isinstance ( doc , MetatabDoc ) : d = [ mk_cite ( t ) for t in terms ] cd = { e [ 'name_link' ] : e for e in d } else : cd = { k : mk_cite ( v , i ) for i , ( k , v ) in enumerate ( doc . items ( ) ) } return PybtexEngine ( ) . format_from_string ( safe_dump ( { 'entries' : cd } ) , style = MetatabStyle , output_backend = output_backend , bib_format = 'yaml' ) | Render citations from a document or a doct of dicts |
50,723 | def markdown ( doc , title = True , template = 'short_documentation.md' ) : from jinja2 import Environment , PackageLoader , select_autoescape env = Environment ( loader = PackageLoader ( 'metapack' , 'support/templates' ) ) context = display_context ( doc ) return env . get_template ( template ) . render ( ** context ) | Markdown specifically for the Notes field in a CKAN dataset |
50,724 | def breadth_first_vertex_order ( vertices_resources , nets ) : if len ( vertices_resources ) == 0 : return vertex_neighbours = defaultdict ( set ) for net in nets : vertex_neighbours [ net . source ] . update ( net ) for sink in net . sinks : vertex_neighbours [ sink ] . update ( net ) unplaced_vertices = set ( vertices_resources ) vertex_queue = deque ( ) while vertex_queue or unplaced_vertices : if not vertex_queue : vertex_queue . append ( unplaced_vertices . pop ( ) ) vertex = vertex_queue . popleft ( ) yield vertex vertex_queue . extend ( v for v in vertex_neighbours [ vertex ] if v in unplaced_vertices ) unplaced_vertices . difference_update ( vertex_neighbours [ vertex ] ) | A generator which iterates over a set of vertices in a breadth - first order in terms of connectivity . |
50,725 | def place ( vertices_resources , nets , machine , constraints , chip_order = None ) : return sequential_place ( vertices_resources , nets , machine , constraints , breadth_first_vertex_order ( vertices_resources , nets ) , chip_order ) | Places vertices in breadth - first order onto chips in the machine . |
50,726 | def download ( self , output = "" , outputFile = "" , silent = True ) : image = urllib . urlopen ( self . imageLink ) . read ( ) if output != "" : output = os . path . abspath ( os . path . expanduser ( output ) ) if output == "" or not os . path . exists ( output ) : output = os . path . expanduser ( os . path . join ( "~" , "Downloads" ) ) if not os . path . exists ( output ) : os . mkdir ( output ) if outputFile == "" : outputFile = "xkcd-" + str ( self . number ) + "-" + self . imageName output = os . path . join ( output , outputFile ) try : download = open ( output , 'wb' ) except : if not silent : print ( "Unable to make file " + output ) return "" download . write ( image ) download . close ( ) return output | Downloads the image of the comic onto your computer . |
50,727 | def replace_local_hyperlinks ( text , base_url = "https://github.com/project-rig/rig/blob/master/" ) : def get_new_url ( url ) : return base_url + url [ 2 : ] for match in re . finditer ( r"^__ (?P<url>\./.*)" , text , re . MULTILINE ) : orig_url = match . groupdict ( ) [ "url" ] url = get_new_url ( orig_url ) text = re . sub ( "^__ {}" . format ( orig_url ) , "__ {}" . format ( url ) , text , flags = re . MULTILINE ) for match in re . finditer ( r"^\.\. _(?P<identifier>[^:]*): (?P<url>\./.*)" , text , re . MULTILINE ) : identifier = match . groupdict ( ) [ "identifier" ] orig_url = match . groupdict ( ) [ "url" ] url = get_new_url ( orig_url ) text = re . sub ( "^\.\. _{}: {}" . format ( identifier , orig_url ) , ".. _{}: {}" . format ( identifier , url ) , text , flags = re . MULTILINE ) for match in re . finditer ( r"^\.\. image:: (?P<url>\./.*)" , text , re . MULTILINE ) : orig_url = match . groupdict ( ) [ "url" ] url = get_new_url ( orig_url ) text = text . replace ( ".. image:: {}" . format ( orig_url ) , ".. image:: {}" . format ( url ) ) return text | Replace local hyperlinks in RST with absolute addresses using the given base URL . |
50,728 | def dump_index ( args , idx ) : import csv import sys from metatab import MetatabDoc doc = MetatabDoc ( ) pack_section = doc . new_section ( 'Packages' , [ 'Identifier' , 'Name' , 'Nvname' , 'Version' , 'Format' ] ) r = doc [ 'Root' ] r . new_term ( 'Root.Title' , 'Package Index' ) for p in idx . list ( ) : pack_section . new_term ( 'Package' , p [ 'url' ] , identifier = p [ 'ident' ] , name = p [ 'name' ] , nvname = p [ 'nvname' ] , version = p [ 'version' ] , format = p [ 'format' ] ) doc . write_csv ( args . dump ) | Create a metatab file for the index |
50,729 | def _process_added_port_event ( self , port_name ) : LOG . info ( "Hyper-V VM vNIC added: %s" , port_name ) self . _added_ports . add ( port_name ) | Callback for added ports . |
50,730 | def _load_physical_network_mappings ( self , phys_net_vswitch_mappings ) : for mapping in phys_net_vswitch_mappings : parts = mapping . split ( ':' ) if len ( parts ) != 2 : LOG . debug ( 'Invalid physical network mapping: %s' , mapping ) else : pattern = re . escape ( parts [ 0 ] . strip ( ) ) . replace ( '\\*' , '.*' ) pattern = pattern + '$' vswitch = parts [ 1 ] . strip ( ) self . _physical_network_mappings [ pattern ] = vswitch | Load all the information regarding the physical network . |
50,731 | def _get_vswitch_name ( self , network_type , physical_network ) : if network_type != constants . TYPE_LOCAL : vswitch_name = self . _get_vswitch_for_physical_network ( physical_network ) else : vswitch_name = self . _local_network_vswitch if vswitch_name : return vswitch_name err_msg = _ ( "No vSwitch configured for physical network " "'%(physical_network)s'. Neutron network type: " "'%(network_type)s'." ) raise exception . NetworkingHyperVException ( err_msg % dict ( physical_network = physical_network , network_type = network_type ) ) | Get the vswitch name for the received network information . |
50,732 | def _get_vswitch_for_physical_network ( self , phys_network_name ) : for pattern in self . _physical_network_mappings : if phys_network_name is None : phys_network_name = '' if re . match ( pattern , phys_network_name ) : return self . _physical_network_mappings [ pattern ] | Get the vswitch name for the received network name . |
50,733 | def _get_network_vswitch_map_by_port_id ( self , port_id ) : for network_id , vswitch in six . iteritems ( self . _network_vswitch_map ) : if port_id in vswitch [ 'ports' ] : return ( network_id , vswitch ) return ( None , None ) | Get the vswitch name for the received port id . |
50,734 | def _update_port_status_cache ( self , device , device_bound = True ) : with self . _cache_lock : if device_bound : self . _bound_ports . add ( device ) self . _unbound_ports . discard ( device ) else : self . _bound_ports . discard ( device ) self . _unbound_ports . add ( device ) | Update the ports status cache . |
50,735 | def _create_event_listeners ( self ) : LOG . debug ( "Create the event listeners." ) for event_type , callback in self . _event_callback_pairs : LOG . debug ( "Create listener for %r event" , event_type ) listener = self . _utils . get_vnic_event_listener ( event_type ) eventlet . spawn_n ( listener , callback ) | Create and bind the event listeners . |
50,736 | def process_added_port ( self , device_details ) : device = device_details [ 'device' ] port_id = device_details [ 'port_id' ] reprocess = True try : self . _process_added_port ( device_details ) LOG . debug ( "Updating cached port %s status as UP." , port_id ) self . _update_port_status_cache ( device , device_bound = True ) LOG . info ( "Port %s processed." , port_id ) except os_win_exc . HyperVvNicNotFound : LOG . debug ( 'vNIC %s not found. This can happen if the VM was ' 'destroyed.' , port_id ) reprocess = False except os_win_exc . HyperVPortNotFoundException : LOG . debug ( 'vSwitch port %s not found. This can happen if the VM ' 'was destroyed.' , port_id ) except Exception as ex : LOG . exception ( "Exception encountered while processing " "port %(port_id)s. Exception: %(ex)s" , dict ( port_id = port_id , ex = ex ) ) else : reprocess = False if reprocess : self . _added_ports . add ( device ) self . _refresh_cache = True return False return True | Process the new ports . |
50,737 | def _treat_devices_added ( self ) : try : devices_details_list = self . _plugin_rpc . get_devices_details_list ( self . _context , self . _added_ports , self . _agent_id ) except Exception as exc : LOG . debug ( "Unable to get ports details for " "devices %(devices)s: %(exc)s" , { 'devices' : self . _added_ports , 'exc' : exc } ) return for device_details in devices_details_list : device = device_details [ 'device' ] LOG . info ( "Adding port %s" , device ) if 'port_id' in device_details : LOG . info ( "Port %(device)s updated. " "Details: %(device_details)s" , { 'device' : device , 'device_details' : device_details } ) eventlet . spawn_n ( self . process_added_port , device_details ) else : LOG . debug ( "Missing port_id from device details: " "%(device)s. Details: %(device_details)s" , { 'device' : device , 'device_details' : device_details } ) LOG . debug ( "Remove the port from added ports set, so it " "doesn't get reprocessed." ) self . _added_ports . discard ( device ) | Process the new devices . |
50,738 | def _process_removed_port ( self , device ) : LOG . debug ( "Trying to remove the port %r" , device ) self . _update_port_status_cache ( device , device_bound = False ) self . _port_unbound ( device , vnic_deleted = True ) LOG . debug ( "The port was successfully removed." ) self . _removed_ports . discard ( device ) | Process the removed ports . |
50,739 | def _treat_devices_removed ( self ) : for device in self . _removed_ports . copy ( ) : eventlet . spawn_n ( self . _process_removed_port , device ) | Process the removed devices . |
50,740 | def build ( subparsers ) : parser = subparsers . add_parser ( 'build' , help = 'Build derived packages' , description = build . __doc__ , formatter_class = argparse . RawDescriptionHelpFormatter , epilog = '' ) parser . set_defaults ( run_command = run_metapack ) parser . add_argument ( 'metatabfile' , nargs = '?' , help = "Path or URL to a metatab file. If not provided, defaults to 'metadata.csv'. " ) parser . add_argument ( '-p' , '--profile' , help = "Name of a BOTO or AWS credentails profile" , required = False ) parser . add_argument ( '-D' , '--package-directory' , help = "Write Zip, Excel and CSV packages to an alternate directory" , required = False ) parser . add_argument ( '-F' , '--force' , action = 'store_true' , default = False , help = 'Force some operations, like updating the name and building packages' ) parser . add_argument ( '-R' , '--reuse-resources' , action = 'store_true' , default = False , help = 'When building Filesystem package, try to reuse resources built in prior build' ) group = parser . add_mutually_exclusive_group ( ) group . add_argument ( '-n' , '--nonversion-name' , action = 'store_true' , default = False , help = 'Write file packages with non-versioned names' ) group . add_argument ( '-N' , '--nonversion-link' , action = 'store_true' , default = False , help = 'Create links with nonversioned names to file packages' ) parser . set_defaults ( handler = None ) derived_group = parser . add_argument_group ( 'Derived Packages' , 'Generate other types of packages' ) derived_group . add_argument ( '-e' , '--excel' , action = 'store_true' , default = False , help = 'Create an excel archive from a metatab file' ) derived_group . add_argument ( '-z' , '--zip' , action = 'store_true' , default = False , help = 'Create a zip archive from a metatab file' ) derived_group . add_argument ( '-f' , '--filesystem' , action = 'store_true' , default = False , help = 'Create a filesystem archive from a metatab file' ) derived_group . add_argument ( '-c' , '--csv' , action = 'store_true' , default = False , help = 'Create a CSV archive from a metatab file' ) admin_group = parser . add_argument_group ( 'Administration' , 'Information and administration' ) admin_group . add_argument ( '--clean-cache' , default = False , action = 'store_true' , help = "Clean the download cache" ) admin_group . add_argument ( '-C' , '--clean' , default = False , action = 'store_true' , help = "For some operations, like updating schemas, clear the section of existing terms first" ) | Build source packages . |
50,741 | def metatab_derived_handler ( m ) : from metapack . exc import PackageError from metapack . util import get_materialized_data_cache from shutil import rmtree create_list = [ ] url = None doc = MetapackDoc ( m . mt_file ) env = get_lib_module_dict ( doc ) package_dir = m . package_root if m . args . package_directory : package_dir = parse_app_url ( m . args . package_directory ) update_name ( m . mt_file , fail_on_missing = False , report_unchanged = False ) process_schemas ( m . mt_file , cache = m . cache , clean = m . args . clean , report_found = False ) nv_name = m . args . nonversion_name nv_link = m . args . nonversion_link rmtree ( get_materialized_data_cache ( doc ) , ignore_errors = True ) reuse_resources = m . args . reuse_resources try : _ , url , created = make_filesystem_package ( m . mt_file , m . package_root , m . cache , env , m . args . force , False , nv_link , reuse_resources = reuse_resources ) create_list . append ( ( 'fs' , url , created ) ) lb_path = Path ( m . package_root . fspath , 'last_build' ) if created or not lb_path . exists ( ) : Path ( m . package_root . fspath , 'last_build' ) . touch ( ) m . mt_file = url env = { } if m . args . excel is not False : _ , url , created = make_excel_package ( m . mt_file , package_dir , m . cache , env , m . args . force , nv_name , nv_link ) create_list . append ( ( 'xlsx' , url , created ) ) if m . args . zip is not False : _ , url , created = make_zip_package ( m . mt_file , package_dir , m . cache , env , m . args . force , nv_name , nv_link ) create_list . append ( ( 'zip' , url , created ) ) if m . args . csv is not False : _ , url , created = make_csv_package ( m . mt_file , package_dir , m . cache , env , m . args . force , nv_name , nv_link ) create_list . append ( ( 'csv' , url , created ) ) except PackageError as e : err ( "Failed to generate package: {}" . format ( e ) ) index_packages ( m ) return create_list | Create local Zip Excel and Filesystem packages |
50,742 | def init ( ) : from metapack . appurl import SearchUrl import metapack as mp from os import environ SearchUrl . initialize ( ) mp . Downloader . context . update ( environ ) | Initialize features that are normally initialized in the CLI |
50,743 | def read_struct_file ( struct_data ) : structs = dict ( ) name = None for i , l in enumerate ( struct_data . splitlines ( ) ) : tokens = re_comment . sub ( b"" , l ) . strip ( ) . split ( ) if len ( tokens ) == 0 : continue elif len ( tokens ) == 3 : ( key , _ , value ) = tokens if key == b"name" : if name is not None : if structs [ name ] . size is None : raise ValueError ( "size value missing for struct '{}'" . format ( name ) ) if structs [ name ] . base is None : raise ValueError ( "base value missing for struct '{}'" . format ( name ) ) name = value structs [ name ] = Struct ( name ) elif key == b"size" : structs [ name ] . size = num ( value ) elif key == b"base" : structs [ name ] . base = num ( value ) else : raise ValueError ( key ) elif len ( tokens ) == 5 : ( field , pack , offset , printf , default ) = tokens num_pack = re_numbered_pack . match ( pack ) if num_pack is not None : pack = ( num_pack . group ( "num" ) + perl_to_python_packs [ num_pack . group ( "char" ) ] ) else : pack = perl_to_python_packs [ pack ] length = 1 field_exp = re_array_field . match ( field ) if field_exp is not None : field = field_exp . group ( "field" ) length = num ( field_exp . group ( "length" ) ) structs [ name ] [ field ] = StructField ( pack , num ( offset ) , printf , num ( default ) , length ) else : raise ValueError ( "line {}: Invalid syntax in struct file" . format ( i ) ) if structs [ name ] . size is None : raise ValueError ( "size value missing for struct '{}'" . format ( name ) ) if structs [ name ] . base is None : raise ValueError ( "base value missing for struct '{}'" . format ( name ) ) return structs | Interpret a struct file defining the location of variables in memory . |
50,744 | def num ( value ) : if re_hex_num . match ( value ) : return int ( value , base = 16 ) else : return int ( value ) | Convert a value from one of several bases to an int . |
50,745 | def update_default_values ( self , ** updates ) : for ( field , value ) in six . iteritems ( updates ) : fname = six . b ( field ) self [ fname ] = self [ fname ] . _replace ( default = value ) | Replace the default values of specified fields . |
50,746 | def next_future_job_delta ( self ) -> Optional [ float ] : job = self . _get_next_future_job ( ) if not job : return None return ( job . at - datetime . now ( timezone . utc ) ) . total_seconds ( ) | Give the amount of seconds before the next future job is due . |
50,747 | def sdram_alloc_for_vertices ( controller , placements , allocations , core_as_tag = True , sdram_resource = SDRAM , cores_resource = Cores , clear = False ) : vertex_memory = dict ( ) for vertex , allocs in six . iteritems ( allocations ) : if sdram_resource in allocs : sdram_slice = allocs [ sdram_resource ] assert sdram_slice . step is None size = sdram_slice . stop - sdram_slice . start x , y = placements [ vertex ] if core_as_tag : tag = allocs [ cores_resource ] . start else : tag = 0 vertex_memory [ vertex ] = controller . sdram_alloc_as_filelike ( size , tag , x = x , y = y , clear = clear ) return vertex_memory | Allocate and return a file - like view of a region of SDRAM for each vertex which uses SDRAM as a resource . |
50,748 | def advance_job_status ( namespace : str , job : Job , duration : float , err : Optional [ Exception ] ) : duration = human_duration ( duration ) if not err : job . status = JobStatus . SUCCEEDED logger . info ( 'Finished execution of %s in %s' , job , duration ) return if job . should_retry : job . status = JobStatus . NOT_SET job . retries += 1 if isinstance ( err , RetryException ) and err . at is not None : job . at = err . at else : job . at = ( datetime . now ( timezone . utc ) + exponential_backoff ( job . retries ) ) signals . job_schedule_retry . send ( namespace , job = job , err = err ) log_args = ( job . retries , job . max_retries + 1 , job , duration , human_duration ( ( job . at - datetime . now ( tz = timezone . utc ) ) . total_seconds ( ) ) ) if isinstance ( err , RetryException ) : logger . info ( 'Retry requested during execution %d/%d of %s ' 'after %s, retry in %s' , * log_args ) else : logger . warning ( 'Error during execution %d/%d of %s after %s, ' 'retry in %s' , * log_args ) return job . status = JobStatus . FAILED signals . job_failed . send ( namespace , job = job , err = err ) logger . error ( 'Error during execution %d/%d of %s after %s' , job . max_retries + 1 , job . max_retries + 1 , job , duration , exc_info = err ) | Advance the status of a job depending on its execution . |
50,749 | def tile ( self , zoom , row , col ) : return Tile ( self , zoom , row , col ) | Return Tile object of this TilePyramid . |
50,750 | def tile_x_size ( self , zoom ) : warnings . warn ( DeprecationWarning ( "tile_x_size is deprecated" ) ) validate_zoom ( zoom ) return round ( self . x_size / self . matrix_width ( zoom ) , ROUND ) | Width of a tile in SRID units at zoom level . |
50,751 | def tile_y_size ( self , zoom ) : warnings . warn ( DeprecationWarning ( "tile_y_size is deprecated" ) ) validate_zoom ( zoom ) return round ( self . y_size / self . matrix_height ( zoom ) , ROUND ) | Height of a tile in SRID units at zoom level . |
50,752 | def tile_width ( self , zoom ) : warnings . warn ( DeprecationWarning ( "tile_width is deprecated" ) ) validate_zoom ( zoom ) matrix_pixel = 2 ** ( zoom ) * self . tile_size * self . grid . shape . width tile_pixel = self . tile_size * self . metatiling return matrix_pixel if tile_pixel > matrix_pixel else tile_pixel | Tile width in pixel . |
50,753 | def tile_height ( self , zoom ) : warnings . warn ( DeprecationWarning ( "tile_height is deprecated" ) ) validate_zoom ( zoom ) matrix_pixel = 2 ** ( zoom ) * self . tile_size * self . grid . shape . height tile_pixel = self . tile_size * self . metatiling return matrix_pixel if tile_pixel > matrix_pixel else tile_pixel | Tile height in pixel . |
50,754 | def pixel_x_size ( self , zoom ) : validate_zoom ( zoom ) return round ( ( self . grid . right - self . grid . left ) / ( self . grid . shape . width * 2 ** zoom * self . tile_size ) , ROUND ) | Width of a pixel in SRID units at zoom level . |
50,755 | def pixel_y_size ( self , zoom ) : validate_zoom ( zoom ) return round ( ( self . grid . top - self . grid . bottom ) / ( self . grid . shape . height * 2 ** zoom * self . tile_size ) , ROUND ) | Height of a pixel in SRID units at zoom level . |
50,756 | def tiles_from_bbox ( self , geometry , zoom ) : validate_zoom ( zoom ) return self . tiles_from_bounds ( geometry . bounds , zoom ) | All metatiles intersecting with given bounding box . |
50,757 | def tiles_from_geom ( self , geometry , zoom ) : validate_zoom ( zoom ) if geometry . is_empty : return if not geometry . is_valid : raise ValueError ( "no valid geometry: %s" % geometry . type ) if geometry . geom_type == "Point" : yield self . tile_from_xy ( geometry . x , geometry . y , zoom ) elif geometry . geom_type == "MultiPoint" : for point in geometry : yield self . tile_from_xy ( point . x , point . y , zoom ) elif geometry . geom_type in ( "LineString" , "MultiLineString" , "Polygon" , "MultiPolygon" , "GeometryCollection" ) : prepared_geometry = prep ( clip_geometry_to_srs_bounds ( geometry , self ) ) for tile in self . tiles_from_bbox ( geometry , zoom ) : if prepared_geometry . intersects ( tile . bbox ( ) ) : yield tile | Return all tiles intersecting with input geometry . |
50,758 | def tile_from_xy ( self , x , y , zoom , on_edge_use = "rb" ) : validate_zoom ( zoom ) if x < self . left or x > self . right or y < self . bottom or y > self . top : raise ValueError ( "x or y are outside of grid bounds" ) if on_edge_use not in [ "lb" , "rb" , "rt" , "lt" ] : raise ValueError ( "on_edge_use must be one of lb, rb, rt or lt" ) return _tile_from_xy ( self , x , y , zoom , on_edge_use = on_edge_use ) | Return tile covering a point defined by x and y values . |
50,759 | def get_ladder_metadata ( session , url ) : parsed = make_scrape_request ( session , url ) tag = parsed . find ( 'a' , href = re . compile ( LADDER_ID_REGEX ) ) return { 'id' : int ( tag [ 'href' ] . split ( '/' ) [ - 1 ] ) , 'slug' : url . split ( '/' ) [ - 1 ] , 'url' : url } | Get ladder metadata . |
50,760 | def get_ladders_metadata ( session , parsed ) : ladders = { } for ladder in parsed . find_all ( 'a' , href = re . compile ( LADDER_URL_REGEX ) ) : ladders [ ladder . text ] = get_ladder_metadata ( session , ladder [ 'href' ] ) return ladders | Get metadata for all ladders . |
50,761 | def sample_counters ( mc , system_info ) : return { ( x , y ) : mc . get_router_diagnostics ( x , y ) for ( x , y ) in system_info } | Sample every router counter in the machine . |
50,762 | def monitor_counters ( mc , output , counters , detailed , f ) : output . write ( "time,{}{}\n" . format ( "x,y," if detailed else "" , "," . join ( counters ) ) ) system_info = mc . get_system_info ( ) last_counter_values = sample_counters ( mc , system_info ) start_time = time . time ( ) for _ in f ( ) : counter_values = sample_counters ( mc , system_info ) delta = deltas ( last_counter_values , counter_values ) last_counter_values = counter_values now = time . time ( ) - start_time if detailed : for x , y in sorted ( system_info ) : output . write ( "{:0.1f},{},{},{}\n" . format ( now , x , y , "," . join ( str ( getattr ( delta [ ( x , y ) ] , c ) ) for c in counters ) ) ) else : totals = [ 0 for _ in counters ] for xy in sorted ( system_info ) : for i , counter in enumerate ( counters ) : totals [ i ] += getattr ( delta [ xy ] , counter ) output . write ( "{:0.1f},{}\n" . format ( now , "," . join ( map ( str , totals ) ) ) ) | Monitor the counters on a specified machine taking a snap - shot every time the generator f yields . |
50,763 | def press_enter ( multiple = False , silent = False ) : def f ( ) : try : while True : if silent : yield input ( ) else : sys . stderr . write ( "<press enter> " ) sys . stderr . flush ( ) yield input ( ) if not multiple : break except ( EOFError , KeyboardInterrupt ) : if not silent : sys . stderr . write ( "\n" ) sys . stderr . flush ( ) return return f | Return a generator function which yields every time the user presses return . |
50,764 | def listen ( timeout = 6.0 , port = BOOT_PORT ) : s = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM ) s . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) s . bind ( ( '0.0.0.0' , port ) ) s . settimeout ( timeout ) try : message , ( ipaddr , port ) = s . recvfrom ( 512 ) return ipaddr except socket . timeout : return None | Listen for a ping broadcast message from an unbooted SpiNNaker board . |
50,765 | def clear_cache ( m , files_processed ) : for what , reason , url , path in files_processed : cp = m . doc . downloader . cache_path ( url ) if m . cache . exists ( cp ) : m . cache . remove ( cp ) | Remove any files we may have uploaded from the cache . |
50,766 | def fill_categorical_na ( df , nan_cat = 'NA' ) : for col in df . columns [ df . isna ( ) . any ( ) ] . tolist ( ) : if df [ col ] . dtype . name != 'category' : df [ col ] = df [ col ] . fillna ( '' ) else : try : df [ col ] . cat . add_categories ( [ nan_cat ] , inplace = True ) except ValueError : pass df [ col ] = df [ col ] . fillna ( nan_cat ) return df | Fill categoricals with NA possibly creating a new category and fill other NaNa with blanks |
50,767 | def geo ( self ) : import geopandas as gpd from shapely . geometry . polygon import BaseGeometry from shapely . wkt import loads gdf = gpd . GeoDataFrame ( self ) first = next ( gdf . iterrows ( ) ) [ 1 ] . geometry if isinstance ( first , str ) : shapes = [ loads ( row [ 'geometry' ] ) for i , row in gdf . iterrows ( ) ] elif not isinstance ( first , BaseGeometry ) : shapes = [ row [ 'geometry' ] . shape for i , row in gdf . iterrows ( ) ] else : shapes = gdf [ 'geometry' ] gdf [ 'geometry' ] = gpd . GeoSeries ( shapes ) gdf . set_geometry ( 'geometry' ) return gdf | Return a geopandas dataframe |
50,768 | def rows ( self ) : yield [ self . index . name ] + list ( self . columns ) for t in self . itertuples ( ) : yield list ( t ) | Yield rows like a partition does with a header first then rows . |
50,769 | def _matches_node_set_props ( type_info , node_set_props ) : matches = None for key in node_set_props : ns_val = node_set_props [ key ] if key in type_info : if ns_val == type_info [ key ] : if matches : matches = matches and True else : matches = True else : matches = False return matches | Check whether the node_set properties match the given model type definition |
50,770 | def generate_lems_file ( self , nml_file_name , nml_doc ) : if 'output' in self . simulation_config : gen_spike_saves_for_all_somas = True target = nml_doc . networks [ 0 ] . id sim_id = 'Sim_%s' % target duration = self . simulation_config [ 'run' ] [ 'tstop' ] dt = self . simulation_config [ 'run' ] [ 'dt' ] lems_file_name = 'LEMS_%s.xml' % sim_id target_dir = "./" gen_saves_for_quantities = { } gen_plots_for_quantities = { } if 'reports' in self . simulation_config : if 'membrane_potential' in self . simulation_config [ 'reports' ] : mp = self . simulation_config [ 'reports' ] [ 'membrane_potential' ] node_set = self . node_set_mappings [ mp [ 'cells' ] ] for nml_pop in node_set : comp = self . nml_pop_vs_comps [ nml_pop ] ids = node_set [ nml_pop ] display = 'Voltages_%s' % nml_pop file_name = '%s.v.dat' % nml_pop for id in ids : quantity = '%s/%i/%s/%s' % ( nml_pop , id , comp , 'v' ) if not nml_pop in self . nml_pops_having_locations : quantity = '%s[%i]/%s' % ( nml_pop , id , 'v' ) if not display in gen_plots_for_quantities : gen_plots_for_quantities [ display ] = [ ] gen_plots_for_quantities [ display ] . append ( quantity ) if not file_name in gen_saves_for_quantities : gen_saves_for_quantities [ file_name ] = [ ] gen_saves_for_quantities [ file_name ] . append ( quantity ) generate_lems_file_for_neuroml ( sim_id , nml_file_name , target , duration , dt , lems_file_name , target_dir , include_extra_files = self . nml_includes , gen_plots_for_all_v = False , plot_all_segments = False , gen_plots_for_quantities = gen_plots_for_quantities , gen_saves_for_all_v = False , save_all_segments = False , gen_saves_for_quantities = gen_saves_for_quantities , gen_spike_saves_for_all_somas = gen_spike_saves_for_all_somas , report_file_name = REPORT_FILE , copy_neuroml = True , verbose = True ) return lems_file_name | Generate a LEMS file to use in simulations of the NeuroML file |
50,771 | def clip_geometry_to_srs_bounds ( geometry , pyramid , multipart = False ) : if not geometry . is_valid : raise ValueError ( "invalid geometry given" ) pyramid_bbox = box ( * pyramid . bounds ) if pyramid . is_global and not geometry . within ( pyramid_bbox ) : inside_geom = geometry . intersection ( pyramid_bbox ) outside_geom = geometry . difference ( pyramid_bbox ) if isinstance ( outside_geom , Polygon ) : outside_geom = [ outside_geom ] all_geoms = [ inside_geom ] for geom in outside_geom : geom_bounds = Bounds ( * geom . bounds ) if geom_bounds . left < pyramid . left : geom = translate ( geom , xoff = 2 * pyramid . right ) elif geom_bounds . right > pyramid . right : geom = translate ( geom , xoff = - 2 * pyramid . right ) all_geoms . append ( geom ) if multipart : return all_geoms else : return GeometryCollection ( all_geoms ) else : if multipart : return [ geometry ] else : return geometry | Clip input geometry to SRS bounds of given TilePyramid . |
50,772 | def snap_bounds ( bounds = None , tile_pyramid = None , zoom = None , pixelbuffer = 0 ) : bounds = Bounds ( * bounds ) validate_zoom ( zoom ) lb = _tile_from_xy ( tile_pyramid , bounds . left , bounds . bottom , zoom , on_edge_use = "rt" ) rt = _tile_from_xy ( tile_pyramid , bounds . right , bounds . top , zoom , on_edge_use = "lb" ) left , bottom , _ , _ = lb . bounds ( pixelbuffer ) _ , _ , right , top = rt . bounds ( pixelbuffer ) return Bounds ( left , bottom , right , top ) | Extend bounds to be aligned with union of tile bboxes . |
50,773 | def _verify_shape_bounds ( shape , bounds ) : if not isinstance ( shape , ( tuple , list ) ) or len ( shape ) != 2 : raise TypeError ( "shape must be a tuple or list with two elements: %s" % str ( shape ) ) if not isinstance ( bounds , ( tuple , list ) ) or len ( bounds ) != 4 : raise TypeError ( "bounds must be a tuple or list with four elements: %s" % str ( bounds ) ) shape = Shape ( * shape ) bounds = Bounds ( * bounds ) shape_ratio = shape . width / shape . height bounds_ratio = ( bounds . right - bounds . left ) / ( bounds . top - bounds . bottom ) if abs ( shape_ratio - bounds_ratio ) > DELTA : min_length = min ( [ ( bounds . right - bounds . left ) / shape . width , ( bounds . top - bounds . bottom ) / shape . height ] ) proposed_bounds = Bounds ( bounds . left , bounds . bottom , bounds . left + shape . width * min_length , bounds . bottom + shape . height * min_length ) raise ValueError ( "shape ratio (%s) must equal bounds ratio (%s); try %s" % ( shape_ratio , bounds_ratio , proposed_bounds ) ) | Verify that shape corresponds to bounds apect ratio . |
50,774 | def _tile_intersecting_tilepyramid ( tile , tp ) : if tile . tp . grid != tp . grid : raise ValueError ( "Tile and TilePyramid source grids must be the same." ) tile_metatiling = tile . tile_pyramid . metatiling pyramid_metatiling = tp . metatiling multiplier = tile_metatiling / pyramid_metatiling if tile_metatiling > pyramid_metatiling : return [ tp . tile ( tile . zoom , int ( multiplier ) * tile . row + row_offset , int ( multiplier ) * tile . col + col_offset ) for row_offset , col_offset in product ( range ( int ( multiplier ) ) , range ( int ( multiplier ) ) ) ] elif tile_metatiling < pyramid_metatiling : return [ tp . tile ( tile . zoom , int ( multiplier * tile . row ) , int ( multiplier * tile . col ) ) ] else : return [ tp . tile ( * tile . id ) ] | Return all tiles from tilepyramid intersecting with tile . |
50,775 | def _global_tiles_from_bounds ( tp , bounds , zoom ) : seen = set ( ) left , right = bounds . left , bounds . right top = tp . top if bounds . top > tp . top else bounds . top bottom = tp . bottom if bounds . bottom < tp . bottom else bounds . bottom if left >= tp . left and right <= tp . right : for tile in _tiles_from_cleaned_bounds ( tp , bounds , zoom ) : yield tile if left < tp . left : for tile in chain ( _tiles_from_cleaned_bounds ( tp , Bounds ( left + ( tp . right - tp . left ) , bottom , tp . right , top ) , zoom ) , _tiles_from_cleaned_bounds ( tp , Bounds ( tp . left , bottom , right , top ) , zoom ) ) : if tile . id not in seen : seen . add ( tile . id ) yield tile if right > tp . right : for tile in chain ( _tiles_from_cleaned_bounds ( tp , Bounds ( left , bottom , tp . right , top ) , zoom ) , _tiles_from_cleaned_bounds ( tp , Bounds ( tp . left , bottom , right - ( tp . right - tp . left ) , top ) , zoom ) ) : if tile . id not in seen : seen . add ( tile . id ) yield tile | Return also Tiles if bounds cross the antimeridian . |
50,776 | def open_args ( subparsers ) : parser = subparsers . add_parser ( 'open' , help = 'open a CSV resoruce with a system application' , description = open_args . __doc__ , formatter_class = argparse . RawDescriptionHelpFormatter , ) parser . set_defaults ( run_command = open_cmd ) parser . add_argument ( 'metatabfile' , nargs = '?' , help = "Path or URL to a metatab file. If not provided, defaults to 'metadata.csv' " ) return parser | The mp open command will open a resource with the system application such as Excel or OpenOffice |
50,777 | def ner_net ( source , destinations , width , height , wrap_around = False , radius = 10 ) : route = { source : RoutingTree ( source ) } for destination in sorted ( destinations , key = ( lambda destination : shortest_mesh_path_length ( to_xyz ( source ) , to_xyz ( destination ) ) if not wrap_around else shortest_torus_path_length ( to_xyz ( source ) , to_xyz ( destination ) , width , height ) ) ) : neighbour = None concentric_hexagons = memoized_concentric_hexagons ( radius ) if len ( concentric_hexagons ) < len ( route ) / 3 : for x , y in concentric_hexagons : x += destination [ 0 ] y += destination [ 1 ] if wrap_around : x %= width y %= height if ( x , y ) in route : neighbour = ( x , y ) break else : neighbour = None neighbour_distance = None for candidate_neighbour in route : if wrap_around : distance = shortest_torus_path_length ( to_xyz ( candidate_neighbour ) , to_xyz ( destination ) , width , height ) else : distance = shortest_mesh_path_length ( to_xyz ( candidate_neighbour ) , to_xyz ( destination ) ) if distance <= radius and ( neighbour is None or distance < neighbour_distance ) : neighbour = candidate_neighbour neighbour_distance = distance if neighbour is None : neighbour = source if wrap_around : vector = shortest_torus_path ( to_xyz ( neighbour ) , to_xyz ( destination ) , width , height ) else : vector = shortest_mesh_path ( to_xyz ( neighbour ) , to_xyz ( destination ) ) ldf = longest_dimension_first ( vector , neighbour , width , height ) i = len ( ldf ) for direction , ( x , y ) in reversed ( ldf ) : i -= 1 if ( x , y ) in route : neighbour = ( x , y ) ldf = ldf [ i + 1 : ] break last_node = route [ neighbour ] for direction , ( x , y ) in ldf : this_node = RoutingTree ( ( x , y ) ) route [ ( x , y ) ] = this_node last_node . children . append ( ( Routes ( direction ) , this_node ) ) last_node = this_node return ( route [ source ] , route ) | Produce a shortest path tree for a given net using NER . |
50,778 | def route_has_dead_links ( root , machine ) : for direction , ( x , y ) , routes in root . traverse ( ) : for route in routes : if ( x , y , route ) not in machine : return True return False | Quickly determine if a route uses any dead links . |
50,779 | def avoid_dead_links ( root , machine , wrap_around = False ) : root , lookup , broken_links = copy_and_disconnect_tree ( root , machine ) for parent , child in broken_links : child_chips = set ( c . chip for c in lookup [ child ] ) path = a_star ( child , parent , set ( lookup ) . difference ( child_chips ) , machine , wrap_around ) last_node = lookup [ path [ 0 ] [ 1 ] ] last_direction = path [ 0 ] [ 0 ] for direction , ( x , y ) in path [ 1 : ] : if ( x , y ) not in child_chips : new_node = RoutingTree ( ( x , y ) ) assert ( x , y ) not in lookup , "Cycle created." lookup [ ( x , y ) ] = new_node else : new_node = lookup [ ( x , y ) ] for node in lookup [ child ] : dn = [ ( d , n ) for d , n in node . children if n == new_node ] assert len ( dn ) <= 1 if dn : node . children . remove ( dn [ 0 ] ) break last_node . children . append ( ( Routes ( last_direction ) , new_node ) ) last_node = new_node last_direction = direction last_node . children . append ( ( last_direction , lookup [ child ] ) ) return ( root , lookup ) | Modify a RoutingTree to route - around dead links in a Machine . |
50,780 | def match ( string , patterns ) : if patterns is None : return True else : return any ( re . match ( pattern , string ) for pattern in patterns ) | Given a string return true if it matches the supplied list of patterns . |
50,781 | def get_process_list ( mc , x_ = None , y_ = None , p_ = None , app_ids = None , applications = None , states = None ) : system_info = mc . get_system_info ( ) for ( x , y ) , chip_info in sorted ( iteritems ( system_info ) ) : if x_ is not None and x_ != x : continue if y_ is not None and y_ != y : continue for p in range ( chip_info . num_cores ) : if p_ is not None and p_ != p : continue try : status = mc . get_processor_status ( x = x , y = y , p = p ) keep = ( match ( str ( status . app_id ) , app_ids ) and match ( status . app_name , applications ) and match ( status . cpu_state . name , states ) ) if keep : yield ( x , y , p , status . cpu_state , status . rt_code , status . app_name , status . app_id ) except SCPError as e : class DeadStatus ( object ) : name = "{}: {}" . format ( e . __class__ . __name__ , str ( e ) ) yield ( x , y , p , DeadStatus ( ) , None , "" , - 1 ) | Scan a SpiNNaker system s cores filtering by the specified features . |
50,782 | def build_application_map ( vertices_applications , placements , allocations , core_resource = Cores ) : application_map = defaultdict ( lambda : defaultdict ( set ) ) for vertex , application in iteritems ( vertices_applications ) : chip_cores = application_map [ application ] [ placements [ vertex ] ] core_slice = allocations [ vertex ] . get ( core_resource , slice ( 0 , 0 ) ) chip_cores . update ( range ( core_slice . start , core_slice . stop ) ) return application_map | Build a mapping from application to a list of cores where the application is used . |
50,783 | def register_sentry ( raven_client , namespace : Optional [ str ] = None , send_retries : bool = False ) : @ signals . job_started . connect_via ( namespace ) def job_started ( namespace , job , ** kwargs ) : raven_client . context . activate ( ) raven_client . transaction . push ( job . task_name ) @ signals . job_finished . connect_via ( namespace ) def job_finished ( namespace , job , ** kwargs ) : raven_client . transaction . pop ( job . task_name ) raven_client . context . clear ( ) @ signals . job_failed . connect_via ( namespace ) def job_failed ( namespace , job , ** kwargs ) : raven_client . captureException ( extra = { attr : getattr ( job , attr ) for attr in job . __slots__ } ) if send_retries : @ signals . job_schedule_retry . connect_via ( namespace ) def job_schedule_retry ( namespace , job , ** kwargs ) : raven_client . captureException ( extra = { attr : getattr ( job , attr ) for attr in job . __slots__ } ) | Register the Sentry integration . |
50,784 | def add_resource ( mt_file , ref , cache ) : if isinstance ( mt_file , MetapackDoc ) : doc = mt_file else : doc = MetapackDoc ( mt_file ) if not 'Resources' in doc : doc . new_section ( 'Resources' ) doc [ 'Resources' ] . args = [ e for e in set ( doc [ 'Resources' ] . args + [ 'Name' , 'StartLine' , 'HeaderLines' , 'Encoding' ] ) if e ] seen_names = set ( ) u = parse_app_url ( ref ) if u . proto == 'file' : entries = u . list ( ) else : entries = [ ssu for su in u . list ( ) for ssu in su . list ( ) ] errors = [ ] for e in entries : if not add_single_resource ( doc , e , cache = cache , seen_names = seen_names ) : errors . append ( e ) if errors : prt ( ) warn ( "Found, but failed to add these urls:" ) for e in errors : print ( ' ' , e ) write_doc ( doc , mt_file ) | Add a resources entry downloading the intuiting the file replacing entries with the same reference |
50,785 | def bounds ( self , pixelbuffer = 0 ) : left = self . _left bottom = self . _bottom right = self . _right top = self . _top if pixelbuffer : offset = self . pixel_x_size * float ( pixelbuffer ) left -= offset bottom -= offset right += offset top += offset if self . tp . grid . is_global : top = min ( [ top , self . tile_pyramid . top ] ) bottom = max ( [ bottom , self . tile_pyramid . bottom ] ) return Bounds ( left , bottom , right , top ) | Return Tile boundaries . |
50,786 | def affine ( self , pixelbuffer = 0 ) : return Affine ( self . pixel_x_size , 0 , self . bounds ( pixelbuffer ) . left , 0 , - self . pixel_y_size , self . bounds ( pixelbuffer ) . top ) | Return an Affine object of tile . |
50,787 | def shape ( self , pixelbuffer = 0 ) : height = self . _base_shape . height + 2 * pixelbuffer width = self . _base_shape . width + 2 * pixelbuffer if pixelbuffer and self . tp . grid . is_global : matrix_height = self . tile_pyramid . matrix_height ( self . zoom ) if matrix_height == 1 : height = self . _base_shape . height elif self . row in [ 0 , matrix_height - 1 ] : height = self . _base_shape . height + pixelbuffer return Shape ( height = height , width = width ) | Return a tuple of tile height and width . |
50,788 | def is_valid ( self ) : if not all ( [ isinstance ( self . zoom , int ) , self . zoom >= 0 , isinstance ( self . row , int ) , self . row >= 0 , isinstance ( self . col , int ) , self . col >= 0 ] ) : raise TypeError ( "zoom, col and row must be integers >= 0" ) cols = self . tile_pyramid . matrix_width ( self . zoom ) rows = self . tile_pyramid . matrix_height ( self . zoom ) if self . col >= cols : raise ValueError ( "col (%s) exceeds matrix width (%s)" % ( self . col , cols ) ) if self . row >= rows : raise ValueError ( "row (%s) exceeds matrix height (%s)" % ( self . row , rows ) ) return True | Return True if tile is available in tile pyramid . |
50,789 | def get_parent ( self ) : return None if self . zoom == 0 else self . tile_pyramid . tile ( self . zoom - 1 , self . row // 2 , self . col // 2 ) | Return tile from previous zoom level . |
50,790 | def get_children ( self ) : next_zoom = self . zoom + 1 return [ self . tile_pyramid . tile ( next_zoom , self . row * 2 + row_offset , self . col * 2 + col_offset ) for row_offset , col_offset in [ ( 0 , 0 ) , ( 0 , 1 ) , ( 1 , 1 ) , ( 1 , 0 ) , ] if all ( [ self . row * 2 + row_offset < self . tp . matrix_height ( next_zoom ) , self . col * 2 + col_offset < self . tp . matrix_width ( next_zoom ) ] ) ] | Return tiles from next zoom level . |
50,791 | def get_neighbors ( self , connectedness = 8 ) : if connectedness not in [ 4 , 8 ] : raise ValueError ( "only connectedness values 8 or 4 are allowed" ) unique_neighbors = { } matrix_offsets = [ ( - 1 , 0 ) , ( 0 , 1 ) , ( 1 , 0 ) , ( 0 , - 1 ) ] if connectedness == 8 : matrix_offsets . extend ( [ ( - 1 , 1 ) , ( 1 , 1 ) , ( 1 , - 1 ) , ( - 1 , - 1 ) ] ) for row_offset , col_offset in matrix_offsets : new_row = self . row + row_offset new_col = self . col + col_offset if new_row < 0 or new_row >= self . tp . matrix_height ( self . zoom ) : continue if new_col < 0 : if not self . tp . is_global : continue new_col = self . tp . matrix_width ( self . zoom ) + new_col elif new_col >= self . tp . matrix_width ( self . zoom ) : if not self . tp . is_global : continue new_col -= self . tp . matrix_width ( self . zoom ) if new_row == self . row and new_col == self . col : continue unique_neighbors [ ( new_row , new_col ) ] = self . tp . tile ( self . zoom , new_row , new_col ) return unique_neighbors . values ( ) | Return tile neighbors . |
50,792 | def add_field ( self , identifier , length = None , start_at = None , tags = None ) : if length is not None and length <= 0 : raise ValueError ( "Fields must be at least one bit in length." ) if ( start_at is not None and ( 0 <= start_at >= self . length or start_at + ( length or 1 ) > self . length ) ) : raise ValueError ( "Field doesn't fit within {}-bit bit field." . format ( self . length ) ) if start_at is not None : end_at = start_at + ( length or 1 ) for other_identifier , other_field in self . fields . potential_fields ( self . field_values ) : if other_field . start_at is not None : other_start_at = other_field . start_at other_end_at = other_start_at + ( other_field . length or 1 ) if end_at > other_start_at and other_end_at > start_at : raise ValueError ( "Field '{}' (range {}-{}) " "overlaps field '{}' (range {}-{})" . format ( identifier , start_at , end_at , other_identifier , other_start_at , other_end_at ) ) if type ( tags ) is str : tags = set ( tags . split ( ) ) elif tags is None : tags = set ( ) else : tags = set ( tags ) field = type ( self ) . _Field ( length , start_at , tags ) self . fields . add_field ( field , identifier , self . field_values ) for parent_identifier in self . fields . get_field_requirements ( identifier , self . field_values ) : parent = self . fields . get_field ( parent_identifier , self . field_values ) parent . tags . update ( tags ) | Add a new field to the BitField . |
50,793 | def get_value ( self , tag = None , field = None ) : assert not ( tag is not None and field is not None ) , "Cannot filter by tag and field simultaneously." selected_fields = self . _select_by_field_or_tag ( tag , field ) missing_fields_idents = set ( selected_fields ) - set ( self . field_values ) if missing_fields_idents : raise ValueError ( "Cannot generate value with undefined fields {}." . format ( ", " . join ( "'{}'" . format ( f ) for f in missing_fields_idents ) ) ) value = 0 for identifier , field in iteritems ( selected_fields ) : if field . length is None or field . start_at is None : raise ValueError ( "Field '{}' does not have a fixed size/position." . format ( identifier ) ) value |= ( self . field_values [ identifier ] << field . start_at ) return value | Generate an integer whose bits are set according to the values of fields in this bit field . All other bits are set to zero . |
50,794 | def get_mask ( self , tag = None , field = None ) : if tag is not None and field is not None : raise TypeError ( "get_mask() takes exactly one keyword argument, " "either 'field' or 'tag' (both given)" ) selected_fields = self . _select_by_field_or_tag ( tag , field ) mask = 0 for identifier , field in iteritems ( selected_fields ) : if field . length is None or field . start_at is None : raise ValueError ( "Field '{}' does not have a fixed size/position." . format ( identifier ) ) mask |= ( ( 1 << field . length ) - 1 ) << field . start_at return mask | Get the mask for all fields which exist in the current bit field . |
50,795 | def get_tags ( self , field ) : return self . fields . get_field ( field , self . field_values ) . tags . copy ( ) | Get the set of tags for a given field . |
50,796 | def get_location_and_length ( self , field ) : field_obj = self . fields . get_field ( field , self . field_values ) if field_obj . length is None or field_obj . start_at is None : raise ValueError ( "Field '{}' does not have a fixed size/position." . format ( field ) ) return ( field_obj . start_at , field_obj . length ) | Get the location and length of a field within the bitfield . |
50,797 | def assign_fields ( self ) : queue = [ ( self . fields , { } ) ] while queue : node , field_values = queue . pop ( 0 ) self . _assign_fields ( node . fields , field_values , assign_positions = False ) for requirements , child in iteritems ( node . children ) : requirements = dict ( requirements ) requirements . update ( field_values ) queue . append ( ( child , requirements ) ) def recurse_assign_fields ( node = self . fields , field_values = { } ) : for requirements , child in iteritems ( node . children ) : child_field_values = dict ( requirements ) child_field_values . update ( field_values ) recurse_assign_fields ( child , child_field_values ) self . _assign_fields ( node . fields , field_values , assign_positions = True ) recurse_assign_fields ( ) | Assign a position & length to any fields which do not have one . |
50,798 | def _assign_fields ( self , identifiers , field_values , assign_positions , assigned_bits = 0 ) : for i , f in self . fields . potential_fields ( field_values ) : if f . length is not None and f . start_at is not None : assigned_bits |= ( ( 1 << f . length ) - 1 ) << f . start_at for identifier in identifiers : field = self . fields . get_field ( identifier , field_values ) if field . length is not None and field . start_at is not None : pass elif assign_positions or field . start_at is not None : assigned_bits |= self . _assign_field ( assigned_bits , identifier , field_values ) return assigned_bits | For internal use only . Assign lengths & positions to a subset of all potential fields with the supplied field_values . |
50,799 | def _assign_field ( self , assigned_bits , identifier , field_values ) : field = self . fields . get_field ( identifier , field_values ) length = field . length if length is None : length = int ( log ( field . max_value , 2 ) ) + 1 start_at = field . start_at if start_at is None : start_at = self . length for bit in range ( 0 , self . length - length ) : field_bits = ( ( 1 << length ) - 1 ) << bit if not ( assigned_bits & field_bits ) : start_at = bit assigned_bits |= field_bits break else : field_bits = ( ( 1 << length ) - 1 ) << start_at if assigned_bits & field_bits : raise ValueError ( "{}-bit field {} with fixed position does not fit in " "{}." . format ( field . length , self . fields . get_field_human_readable ( identifier , field_values ) , type ( self ) . __name__ ) ) assigned_bits |= field_bits if start_at + length <= self . length : field . length = length field . start_at = start_at else : raise ValueError ( "{}-bit field {} does not fit in {}." . format ( field . length , self . fields . get_field_human_readable ( identifier , field_values ) , type ( self ) . __name__ ) ) return assigned_bits | For internal use only . Assign a length and position to a field which may have either one of these values missing . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.