idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
50,500
def spinn5_chip_coord ( x , y , root_x = 0 , root_y = 0 ) : dx , dy = SPINN5_ETH_OFFSET [ ( y - root_y ) % 12 ] [ ( x - root_x ) % 12 ] return ( - int ( dx ) , - int ( dy ) )
Get the coordinates of a chip on its board .
50,501
def spinn5_fpga_link ( x , y , link , root_x = 0 , root_y = 0 ) : x , y = spinn5_chip_coord ( x , y , root_x , root_y ) return SPINN5_FPGA_LINKS . get ( ( x , y , link ) )
Get the identity of the FPGA link which corresponds with the supplied link .
50,502
def _load_resource ( self , source_r , abs_path = False ) : r = self . doc . resource ( source_r . name ) r . url = self . resource_root . join ( r . url ) . inner
The CSV package has no reseources so we just need to resolve the URLs to them . Usually the CSV package is built from a file system ackage on a publically acessible server .
50,503
def attach_tasks ( self , tasks : Tasks ) : if tasks . _spin is not None and tasks . _spin is not self : logger . warning ( 'Tasks already attached to a different Engine' ) self . _tasks . update ( tasks ) tasks . _spin = self
Attach a set of tasks .
50,504
def schedule_batch ( self , batch : Batch ) : jobs = list ( ) for task , at , args , kwargs in batch . jobs_to_create : task = self . _tasks . get ( task ) jobs . append ( Job ( task . name , task . queue , at , task . max_retries , task_args = args , task_kwargs = kwargs ) ) return self . _broker . enqueue_jobs ( jobs )
Schedule many jobs at once .
50,505
def start_workers ( self , number : int = DEFAULT_WORKER_NUMBER , queue = DEFAULT_QUEUE , block = True , stop_when_queue_empty = False ) : if self . _arbiter or self . _workers : raise RuntimeError ( 'Workers are already running' ) self . _working_queue = queue tasks_names = '\n' . join ( [ ' - ' + task . name for task in self . _tasks . tasks . values ( ) if task . queue == self . _working_queue ] ) logger . info ( 'Starting %d workers on queue "%s" with tasks:\n%s' , number , self . _working_queue , tasks_names ) self . _broker . start ( ) self . _workers = Workers ( num_workers = number , namespace = self . namespace , ) self . _result_notifier = threading . Thread ( target = run_forever , args = ( self . _result_notifier_func , self . _must_stop , logger ) , name = '{}-result-notifier' . format ( self . namespace ) ) self . _result_notifier . start ( ) self . _arbiter = threading . Thread ( target = run_forever , args = ( self . _arbiter_func , self . _must_stop , logger , stop_when_queue_empty ) , name = '{}-arbiter' . format ( self . namespace ) ) self . _arbiter . start ( ) if block : with handle_sigterm ( ) : try : self . _arbiter . join ( ) except KeyboardInterrupt : self . stop_workers ( ) except AttributeError : pass
Start the worker threads .
50,506
def stop_workers ( self , _join_arbiter = True ) : self . _must_stop . set ( ) self . _workers . stop ( ) self . _result_notifier . join ( ) self . _broker . stop ( ) if _join_arbiter : self . _arbiter . join ( ) self . _reset ( )
Stop the workers and wait for them to terminate .
50,507
def handle_trunks ( self , trunks , event_type ) : LOG . debug ( "Trunks event received: %(event_type)s. Trunks: %(trunks)s" , { 'event_type' : event_type , 'trunks' : trunks } ) if event_type == events . DELETED : for trunk in trunks : self . _trunks . pop ( trunk . id , None ) else : for trunk in trunks : self . _trunks [ trunk . id ] = trunk self . _setup_trunk ( trunk )
Trunk data model change from the server .
50,508
def handle_subports ( self , subports , event_type ) : LOG . debug ( "Subports event received: %(event_type)s. " "Subports: %(subports)s" , { 'event_type' : event_type , 'subports' : subports } ) if event_type == events . CREATED : for subport in subports : trunk = self . _trunks . get ( subport [ 'trunk_id' ] ) if trunk : trunk . sub_ports . append ( subport ) elif event_type == events . DELETED : for subport in subports : trunk = self . _trunks . get ( subport [ 'trunk_id' ] ) if trunk and subport in trunk . sub_ports : trunk . sub_ports . remove ( subport ) affected_trunk_ids = set ( [ s [ 'trunk_id' ] for s in subports ] ) for trunk_id in affected_trunk_ids : trunk = self . _trunks . get ( trunk_id ) if trunk : self . _setup_trunk ( trunk )
Subport data model change from the server .
50,509
def _setup_trunk ( self , trunk , vlan_id = None ) : LOG . info ( 'Binding trunk port: %s.' , trunk ) try : self . _trunk_rpc . update_subport_bindings ( self . _context , trunk . sub_ports ) vlan_trunk = [ s . segmentation_id for s in trunk . sub_ports ] self . _set_port_vlan ( trunk . port_id , vlan_id , vlan_trunk ) self . _trunk_rpc . update_trunk_status ( self . _context , trunk . id , t_const . ACTIVE_STATUS ) except Exception : LOG . exception ( "Failure setting up subports for %s" , trunk . port_id ) self . _trunk_rpc . update_trunk_status ( self . _context , trunk . id , t_const . DEGRADED_STATUS )
Sets up VLAN trunk and updates the trunk status .
50,510
def main ( ) : register_config_opts ( ) common_config . init ( sys . argv [ 1 : ] ) neutron_config . setup_logging ( ) proxy = MetadataProxy ( ) proxy . run ( )
The entry point for neutron - hnv - metadata - proxy .
50,511
def _get_port_profile_id ( self , request ) : port_profile_id = request . path . split ( "/" ) [ - 1 ] . strip ( ) if uuidutils . is_uuid_like ( port_profile_id ) : LOG . debug ( "The instance id was found in request path." ) return port_profile_id LOG . debug ( "Failed to get the instance id from the request." ) return None
Get the port profile ID from the request path .
50,512
def _work ( self ) : server = wsgi . Server ( name = self . _AGENT_BINARY , num_threads = CONF . AGENT . worker_count ) server . start ( application = _MetadataProxyHandler ( ) , port = CONF . bind_port , host = CONF . bind_host ) server . wait ( )
Start the neutron - hnv - metadata - proxy agent .
50,513
def links_between ( a , b , machine ) : ax , ay = a bx , by = b return set ( link for link , ( dx , dy ) in ( ( l , l . to_vector ( ) ) for l in Links ) if ( ax + dx ) % machine . width == bx and ( ay + dy ) % machine . height == by and ( ax , ay , link ) in machine )
Get the set of working links connecting chips a and b .
50,514
def set_s3_profile ( profile_name ) : import os session = boto3 . Session ( profile_name = profile_name ) os . environ [ 'AWS_ACCESS_KEY_ID' ] = session . get_credentials ( ) . access_key os . environ [ 'AWS_SECRET_ACCESS_KEY' ] = session . get_credentials ( ) . secret_key
Load the credentials for an s3 profile into environmental variables
50,515
def send ( self , * sender , ** kwargs ) : if len ( sender ) == 0 : sender = None elif len ( sender ) > 1 : raise TypeError ( 'send() accepts only one positional argument, ' '%s given' % len ( sender ) ) else : sender = sender [ 0 ] if not self . receivers : return [ ] rv = list ( ) for receiver in self . receivers_for ( sender ) : try : rv . append ( ( receiver , receiver ( sender , ** kwargs ) ) ) except Exception : logger . exception ( 'Error while dispatching signal "{}" ' 'to receiver' . format ( self . name ) ) return rv
Emit this signal on behalf of sender passing on kwargs .
50,516
def update_metatab ( self , doc , resources ) : if not 'Documentation' in doc : doc . new_section ( "Documentation" ) ds = doc [ 'Documentation' ] if not 'Name' in ds . args : ds . add_arg ( 'Name' , prepend = True ) ds . new_term ( 'Root.Documentation' , 'docs/notebook.html' , name = "notebook.html" , title = 'Jupyter Notebook (HTML)' ) for name , data in resources . get ( 'outputs' , { } ) . items ( ) : if name == 'documentation.html' : ds . new_term ( 'Root.Documentation' , 'docs/' + name , name = name , title = 'Primary Documentation (HTML)' ) elif name == 'html_basic_body.html' : pass elif name . endswith ( '.html' ) : ds . new_term ( 'Root.Documentation' , 'docs/' + name , name = name , title = 'Documentation (HTML)' ) elif name . endswith ( '.md' ) : ds . new_term ( 'Root.Documentation' , 'docs/' + name , name = name , title = 'Documentation (Markdown)' ) elif name . endswith ( '.pdf' ) : ds . new_term ( 'Root.Documentation' , 'docs/' + name , name = name , title = 'Documentation (PDF)' ) elif name . endswith ( '.png' ) : ds . new_term ( 'Root.Image' , 'docs/' + name , name = name , title = 'Image for HTML Documentation' ) else : pass
Add documentation entries for resources
50,517
def get_package_dir_name ( self , nb ) : package_dir = self . package_dir if not package_dir : package_dir = getcwd ( ) package_name = self . package_name if not package_name : doc = ExtractInlineMetatabDoc ( package_url = "metapack+file:" + package_dir ) . run ( nb ) if not doc : raise NotebookError ( "Notebook does not have an inline metatab doc" ) t = doc . find_first ( 'Root.Name' , section = 'Root' ) if not t : raise NotebookError ( "Inline Metatab doc doesnt have a Root.Name term" ) package_name = doc . as_version ( None ) return package_dir , package_name
This is the name of the package we will be creating .
50,518
def get_output_dir ( self , nb ) : self . package_dir , self . package_name = self . get_package_dir_name ( nb ) return join ( self . package_dir , self . package_name )
Open a notebook and determine the output directory from the name
50,519
def extract_terms ( self , nb ) : emt = ExtractMetatabTerms ( ) emt . preprocess ( nb , { } ) return emt . terms
Extract some term values usually set with tags or metadata
50,520
def from_notebook_node ( self , nb , resources = None , ** kw ) : nb_copy = copy . deepcopy ( nb ) try : self . output_dir = self . get_output_dir ( nb ) except NotebookError as e : self . log . fatal ( e ) sys . exit ( 1 ) assert self . output_dir resources = self . _init_resources ( resources ) resources [ 'outputs' ] = { } if 'language' in nb [ 'metadata' ] : resources [ 'language' ] = nb [ 'metadata' ] [ 'language' ] . lower ( ) nb_copy , resources = self . _preprocess ( nb_copy , resources ) self . extra_terms = self . extract_terms ( nb_copy ) self . clear_output ( nb_copy ) nb_copy , resources = self . exec_notebook ( nb_copy , resources , self . notebook_dir ) eld = ExtractLibDirs ( ) eld . preprocess ( nb_copy , { } ) self . lib_dirs = eld . lib_dirs efm = ExtractFinalMetatabDoc ( ) efm . preprocess ( nb_copy , { } ) if not efm . doc : raise MetapackError ( "No metatab doc" ) self . doc = efm . doc for section , term , value in self . extra_terms : self . doc [ section ] . get_or_new_term ( term , value ) nb , _ = RemoveMetatab ( ) . preprocess ( nb , { } ) resources [ 'outputs' ] [ 'notebooks/{}.ipynb' . format ( self . package_name ) ] = nbformat . writes ( nb ) . encode ( 'utf-8' ) return efm . doc . as_csv ( ) , resources
Create a Metatab package from a notebook node
50,521
def preprocess_cell ( self , cell , resources , cell_index ) : from nbformat . notebooknode import NotebookNode attach_names = [ ] for k , attach in cell . get ( 'attachments' , { } ) . items ( ) : for mime_type in self . extract_output_types : if mime_type in attach : if not 'outputs' in cell : cell [ 'outputs' ] = [ ] o = NotebookNode ( { 'data' : NotebookNode ( { mime_type : attach [ mime_type ] } ) , 'metadata' : NotebookNode ( { 'filenames' : { mime_type : k } } ) , 'output_type' : 'display_data' } ) cell [ 'outputs' ] . append ( o ) attach_names . append ( ( mime_type , k ) ) nb , resources = super ( ) . preprocess_cell ( cell , resources , cell_index ) output_names = list ( resources . get ( 'outputs' , { } ) . keys ( ) ) if attach_names : for output_name , ( mimetype , an ) in zip ( reversed ( output_names ) , reversed ( attach_names ) ) : cell . source = re . sub ( '\(attachment:{}\)' . format ( an ) , '(__IMGDIR__/{})' . format ( output_name ) , cell . source ) return nb , resources
Also extracts attachments
50,522
def hilbert ( level , angle = 1 , s = None ) : class HilbertState ( object ) : def __init__ ( self , x = 0 , y = 0 , dx = 1 , dy = 0 ) : self . x , self . y , self . dx , self . dy = x , y , dx , dy if s is None : s = HilbertState ( ) yield s . x , s . y if level <= 0 : return s . dx , s . dy = s . dy * - angle , s . dx * angle for s . x , s . y in hilbert ( level - 1 , - angle , s ) : yield s . x , s . y s . x , s . y = s . x + s . dx , s . y + s . dy yield s . x , s . y s . dx , s . dy = s . dy * angle , s . dx * - angle for s . x , s . y in hilbert ( level - 1 , angle , s ) : yield s . x , s . y s . x , s . y = s . x + s . dx , s . y + s . dy yield s . x , s . y for s . x , s . y in hilbert ( level - 1 , angle , s ) : yield s . x , s . y s . dx , s . dy = s . dy * angle , s . dx * - angle s . x , s . y = s . x + s . dx , s . y + s . dy yield s . x , s . y for s . x , s . y in hilbert ( level - 1 , - angle , s ) : yield s . x , s . y s . dx , s . dy = s . dy * - angle , s . dx * angle
Generator of points along a 2D Hilbert curve .
50,523
def hilbert_chip_order ( machine ) : max_dimen = max ( machine . width , machine . height ) hilbert_levels = int ( ceil ( log ( max_dimen , 2.0 ) ) ) if max_dimen >= 1 else 0 return hilbert ( hilbert_levels )
A generator which iterates over a set of chips in a machine in a hilbert path .
50,524
def place ( vertices_resources , nets , machine , constraints , breadth_first = True ) : return sequential_place ( vertices_resources , nets , machine , constraints , ( None if not breadth_first else breadth_first_vertex_order ( vertices_resources , nets ) ) , hilbert_chip_order ( machine ) )
Places vertices in breadth - first order along a hilbert - curve path through the chips in the machine .
50,525
def minimise_tables ( routing_tables , target_lengths , methods = ( remove_default_entries , ordered_covering ) ) : if not isinstance ( target_lengths , dict ) : lengths = collections . defaultdict ( lambda : target_lengths ) else : lengths = target_lengths new_tables = dict ( ) for chip , table in iteritems ( routing_tables ) : try : new_table = minimise_table ( table , lengths [ chip ] , methods ) except MinimisationFailedError as exc : exc . chip = chip raise if new_table : new_tables [ chip ] = new_table return new_tables
Utility function which attempts to minimises routing tables for multiple chips .
50,526
def minimise_table ( table , target_length , methods = ( remove_default_entries , ordered_covering ) ) : methods = list ( methods ) methods . insert ( 0 , _identity ) if target_length is not None : best_achieved = len ( table ) for f in methods : try : new_table = f ( table , target_length ) return new_table except MinimisationFailedError as exc : if best_achieved is None or exc . final_length < best_achieved : best_achieved = exc . final_length raise MinimisationFailedError ( target_length , best_achieved ) else : return min ( ( f ( table , target_length ) for f in methods ) , key = len )
Apply different minimisation algorithms to minimise a single routing table .
50,527
def _identity ( table , target_length ) : if target_length is None or len ( table ) < target_length : return table raise MinimisationFailedError ( target_length , len ( table ) )
Identity minimisation function .
50,528
def configure_ckan ( m ) : from ckanapi import RemoteCKAN try : doc = MetapackDoc ( m . mt_file , cache = m . cache ) except ( IOError , MetatabError ) as e : err ( "Failed to open metatab '{}': {}" . format ( m . mt_file , e ) ) c = RemoteCKAN ( m . ckan_url , apikey = m . api_key ) groups = { g [ 'name' ] : g for g in c . action . group_list ( all_fields = True ) } for g in doc [ 'Groups' ] : if g . value not in groups : prt ( 'Creating group: ' , g . value ) c . action . group_create ( name = g . value , title = g . get_value ( 'title' ) , description = g . get_value ( 'description' ) , id = g . get_value ( 'id' ) , image_url = g . get_value ( 'image_url' ) ) orgs = { o [ 'name' ] : o for o in c . action . organization_list ( all_fields = True ) } for o in doc [ 'Organizations' ] : if o . value not in orgs : prt ( 'Creating organization: ' , o . value ) c . action . organization_create ( name = o . value , title = o . get_value ( 'title' ) , description = o . get_value ( 'description' ) , id = o . get_value ( 'id' ) , image_url = o . get_value ( 'image_url' ) )
Load groups and organizations from a file in Metatab format
50,529
def dump_ckan ( m ) : doc = MetapackDoc ( cache = m . cache ) doc . new_section ( 'Groups' , 'Title Description Id Image_url' . split ( ) ) doc . new_section ( 'Organizations' , 'Title Description Id Image_url' . split ( ) ) c = RemoteCKAN ( m . ckan_url , apikey = m . api_key ) for g in c . action . group_list ( all_fields = True ) : print ( g . keys ( ) ) for o in c . action . organization_list ( all_fields = True ) : print ( g . keys ( ) )
Create a groups and organization file
50,530
def update_mt_arg ( self , metatabfile ) : o = MetapackCliMemo ( self . args ) o . set_mt_arg ( metatabfile ) return o
Return a new memo with a new metatabfile argument
50,531
def declaration_path ( name ) : from os . path import dirname , join , exists import metatab . declarations from metatab . exc import IncludeError d = dirname ( metatab . declarations . __file__ ) path = join ( d , name ) if not exists ( path ) : path = join ( d , name + '.csv' ) if not exists ( path ) : raise IncludeError ( "No local declaration file for name '{}' " . format ( name ) ) return path
Return the path to an included declaration
50,532
def flatten ( d , sep = '.' ) : def _flatten ( e , parent_key = '' , sep = '.' ) : import collections prefix = parent_key + sep if parent_key else '' if isinstance ( e , collections . MutableMapping ) : return tuple ( ( prefix + k2 , v2 ) for k , v in e . items ( ) for k2 , v2 in _flatten ( v , k , sep ) ) elif isinstance ( e , collections . MutableSequence ) : return tuple ( ( prefix + k2 , v2 ) for i , v in enumerate ( e ) for k2 , v2 in _flatten ( v , str ( i ) , sep ) ) else : return ( parent_key , ( e , ) ) , return tuple ( ( k , v [ 0 ] ) for k , v in _flatten ( d , '' , sep ) )
Flatten a data structure into tuples
50,533
def make_dir_structure ( base_dir ) : def maybe_makedir ( * args ) : p = join ( base_dir , * args ) if exists ( p ) and not isdir ( p ) : raise IOError ( "File '{}' exists but is not a directory " . format ( p ) ) if not exists ( p ) : makedirs ( p ) maybe_makedir ( DOWNLOAD_DIR ) maybe_makedir ( PACKAGE_DIR ) maybe_makedir ( OLD_DIR )
Make the build directory structure .
50,534
def guess_format ( url ) : import requests from requests . exceptions import InvalidSchema from rowgenerators import parse_url_to_dict parts = parse_url_to_dict ( url ) if parts . get ( 'path' ) : type , encoding = mimetypes . guess_type ( url ) elif parts [ 'scheme' ] in ( 'http' , 'https' ) : type , encoding = 'text/html' , None else : type , encoding = None , None if type is None : try : r = requests . head ( url , allow_redirects = False ) type = r . headers [ 'Content-Type' ] if ';' in type : type , encoding = [ e . strip ( ) for e in type . split ( ';' ) ] except InvalidSchema : pass return type , mime_map . get ( type )
Try to guess the format of a resource possibly with a HEAD request
50,535
def get_materialized_data_cache ( doc = None ) : from metapack . constants import MATERIALIZED_DATA_PREFIX from os . path import join if not doc : from metapack import Downloader downloader = Downloader ( ) return downloader . cache . getsyspath ( MATERIALIZED_DATA_PREFIX ) else : dr = doc . _cache . getsyspath ( join ( MATERIALIZED_DATA_PREFIX , doc . name ) ) ensure_dir ( dr ) return dr
Return the cache directory where data can be written during a build usually for a Jupyter notebook that generates many files for each execution
50,536
def traverse ( self ) : to_visit = deque ( [ ( None , self ) ] ) while to_visit : direction , node = to_visit . popleft ( ) out_directions = set ( ) for child_direction , child in node . children : if child_direction is not None : out_directions . add ( child_direction ) if isinstance ( child , RoutingTree ) : assert child_direction is not None to_visit . append ( ( child_direction , child ) ) yield direction , node . chip , out_directions
Traverse the tree yielding the direction taken to a node the co - ordinates of that node and the directions leading from the Node .
50,537
def wrap_url ( s , l ) : parts = s . split ( '/' ) if len ( parts ) == 1 : return parts [ 0 ] else : i = 0 lines = [ ] for j in range ( i , len ( parts ) + 1 ) : tv = '/' . join ( parts [ i : j ] ) nv = '/' . join ( parts [ i : j + 1 ] ) if len ( nv ) > l or nv == tv : i = j lines . append ( tv ) return '/\n' . join ( lines )
Wrap a URL string
50,538
def add ( self , uuid ) : if uuid : try : x = hash64 ( uuid ) except UnicodeEncodeError : x = hash64 ( uuid . encode ( 'ascii' , 'ignore' ) ) j = x & ( ( 1 << self . b ) - 1 ) w = x >> self . b self . M [ j ] = max ( self . M [ j ] , self . _get_rho ( w , self . bitcount_arr ) )
Adds a key to the HyperLogLog
50,539
def estimate ( self ) : E = self . alpha * float ( self . m ** 2 ) / np . power ( 2.0 , - self . M ) . sum ( ) if E <= 2.5 * self . m : V = self . m - np . count_nonzero ( self . M ) return int ( self . m * np . log ( self . m / float ( V ) ) ) if V > 0 else int ( E ) elif E <= float ( long ( 1 ) << self . precision ) / 30.0 : return int ( E ) else : return int ( - ( long ( 1 ) << self . precision ) * np . log ( 1.0 - E / ( long ( 1 ) << self . precision ) ) )
Returns the estimate of the cardinality
50,540
def base_url ( self ) : if self . doc . package_url : return self . doc . package_url return self . doc . _ref
Base URL for resolving resource URLs
50,541
def env ( self ) : from copy import copy env = copy ( self . doc . env ) assert env is not None , 'Got a null execution context' env . update ( self . _envvar_env ) env . update ( self . all_props ) return env
The execution context for rowprocessors and row - generating notebooks and functions .
50,542
def _resolved_url ( self ) : if not self . url : return None u = parse_app_url ( self . url ) if u . scheme == 'index' : u = u . resolve ( ) if u . scheme != 'file' : return u elif u . resource_format == 'ipynb' : t = self . doc . package_url . inner . join_dir ( self . url ) t = t . as_type ( type ( u ) ) t . fragment = u . fragment return t elif u . proto == 'metatab' : u = self . expanded_url return u . get_resource ( ) . get_target ( ) elif u . proto == 'metapack' : u = self . expanded_url if u . resource : return u . resource . resolved_url . get_resource ( ) . get_target ( ) else : return u if u . scheme == 'file' : return self . expanded_url elif False : assert isinstance ( self . doc . package_url , MetapackPackageUrl ) , ( type ( self . doc . package_url ) , self . doc . package_url ) try : t = self . doc . package_url . resolve_url ( self . url ) t . scheme_extension = parse_app_url ( self . url ) . scheme_extension try : if not any ( t . fragment ) and any ( u . fragment ) : t . fragment = u . fragment except TypeError : if not t . fragment and u . fragment : t . fragment = u . fragment t = parse_app_url ( str ( t ) ) return t except ResourceError as e : raise else : raise ResourceError ( 'Unknown case for url {} ' . format ( self . url ) )
Return a URL that properly combines the base_url and a possibly relative resource url
50,543
def schema_term ( self ) : if not self . name : raise MetapackError ( "Resource for url '{}' doe not have name" . format ( self . url ) ) t = self . doc . find_first ( 'Root.Table' , value = self . get_value ( 'name' ) ) frm = 'name' if not t : t = self . doc . find_first ( 'Root.Table' , value = self . get_value ( 'schema' ) ) frm = 'schema' if not t : frm = None return t
Return the Table term for this resource which is referenced either by the table property or the schema property
50,544
def headers ( self ) : t = self . schema_term if t : return [ self . _name_for_col_term ( c , i ) for i , c in enumerate ( t . children , 1 ) if c . term_is ( "Table.Column" ) ] else : return None
Return the headers for the resource . Returns the AltName if specified ; if not then the Name and if that is empty a name based on the column position . These headers are specifically applicable to the output table and may not apply to the resource source . FOr those headers use source_headers
50,545
def source_headers ( self ) : t = self . schema_term if t : return [ self . _name_for_col_term ( c , i ) for i , c in enumerate ( t . children , 1 ) if c . term_is ( "Table.Column" ) and c . get_value ( 'name' ) != EMPTY_SOURCE_HEADER ] else : return None
Returns the headers for the resource source . Specifically does not include any header that is the EMPTY_SOURCE_HEADER value of _NONE_
50,546
def columns ( self ) : try : r = self . expanded_url . resource . columns ( ) return list ( r ) except AttributeError as e : pass return self . schema_columns
Return column information from the schema or from an upstreram package
50,547
def schema_columns ( self ) : t = self . schema_term columns = [ ] if t : for i , c in enumerate ( t . children ) : if c . term_is ( "Table.Column" ) : p = c . all_props p [ 'pos' ] = i p [ 'name' ] = c . value p [ 'header' ] = self . _name_for_col_term ( c , i ) columns . append ( p ) return columns
Return column informatino only from this schema
50,548
def row_processor_table ( self , ignore_none = False ) : from rowgenerators . rowpipe import Table type_map = { None : None , 'string' : 'str' , 'text' : 'str' , 'number' : 'float' , 'integer' : 'int' } def map_type ( v ) : return type_map . get ( v , v ) if self . schema_term : t = Table ( self . get_value ( 'name' ) ) col_n = 0 for c in self . schema_term . children : if ignore_none and c . name == EMPTY_SOURCE_HEADER : continue if c . term_is ( 'Table.Column' ) : t . add_column ( self . _name_for_col_term ( c , col_n ) , datatype = map_type ( c . get_value ( 'datatype' ) ) , valuetype = map_type ( c . get_value ( 'valuetype' ) ) , transform = c . get_value ( 'transform' ) , width = c . get_value ( 'width' ) ) col_n += 1 return t else : return None
Create a row processor from the schema to convert the text values from the CSV into real types
50,549
def raw_row_generator ( self ) : from rowgenerators import get_generator self . doc . set_sys_path ( ) ru = self . resolved_url try : resource = ru . resource return resource . row_generator except AttributeError : pass ut = ru . get_resource ( ) . get_target ( ) source_url = parse_app_url ( self . url ) ut . encoding = self . get_value ( 'encoding' ) or ( source_url . encoding if source_url else None ) g = get_generator ( ut , resource = self , doc = self . _doc , working_dir = self . _doc . doc_dir , env = self . env ) assert g , ut return g
Like rowgenerator but does not try to create a row processor table
50,550
def _get_header ( self ) : try : header_lines = [ int ( e ) for e in str ( self . get_value ( 'headerlines' , 0 ) ) . split ( ',' ) ] except ValueError as e : header_lines = [ 0 ] header_rows = islice ( self . row_generator , min ( header_lines ) , max ( header_lines ) + 1 ) from tableintuit import RowIntuiter headers = RowIntuiter . coalesce_headers ( header_rows ) return headers
Get the header from the deinfed header rows for use on references or resources where the schema has not been run
50,551
def iterdict ( self ) : from collections import OrderedDict headers = None for row in self : if headers is None : headers = row continue yield OrderedDict ( zip ( headers , row ) )
Iterate over the resource in dict records
50,552
def iterrows ( self ) : row_proxy = None headers = None for row in self : if not headers : headers = row row_proxy = RowProxy ( headers ) continue yield row_proxy . set_row ( row )
Iterate over the resource as row proxy objects which allow acessing colums as attributes
50,553
def iterrowproxy ( self , cls = RowProxy ) : row_proxy = None headers = None for row in self : if not headers : headers = row row_proxy = cls ( headers ) continue yield row_proxy . set_row ( row )
Iterate over the resource as row proxy objects which allow acessing colums as attributes . Like iterrows but allows for setting a specific RowProxy class .
50,554
def iterstruct ( self ) : from rowgenerators . rowpipe . json import add_to_struct json_headers = self . json_headers for row in islice ( self , 1 , None ) : d = { } for pos , jh in json_headers : add_to_struct ( d , jh , row [ pos ] ) yield d
Yield data structures built from the JSON header specifications in a table
50,555
def iterjson ( self , * args , ** kwargs ) : from rowgenerators . rowpipe . json import VTEncoder import json if 'cls' not in kwargs : kwargs [ 'cls' ] = VTEncoder for s in self . iterstruct : yield ( json . dumps ( s , * args , ** kwargs ) )
Yields the data structures from iterstruct as JSON strings
50,556
def iteryaml ( self , * args , ** kwargs ) : from rowgenerators . rowpipe . json import VTEncoder import yaml if 'cls' not in kwargs : kwargs [ 'cls' ] = VTEncoder for s in self . iterstruct : yield ( yaml . safe_dump ( s ) )
Yields the data structures from iterstruct as YAML strings
50,557
def dataframe ( self , dtype = False , parse_dates = True , * args , ** kwargs ) : import pandas as pd rg = self . row_generator t = self . resolved_url . get_resource ( ) . get_target ( ) if t . target_format == 'csv' : return self . read_csv ( dtype , parse_dates , * args , ** kwargs ) try : return rg . dataframe ( * args , ** kwargs ) except AttributeError : pass headers = next ( islice ( self , 0 , 1 ) ) data = islice ( self , 1 , None ) df = pd . DataFrame ( list ( data ) , columns = headers , * args , ** kwargs ) self . errors = df . metatab_errors = rg . errors if hasattr ( rg , 'errors' ) and rg . errors else { } return df
Return a pandas datafrome from the resource
50,558
def geoframe ( self , * args , ** kwargs ) : from geopandas import GeoDataFrame import geopandas as gpd from shapely . geometry . polygon import BaseGeometry from shapely . wkt import loads gdf = None try : gdf = self . resolved_url . geoframe ( * args , ** kwargs ) except AttributeError : pass if gdf is None : try : gdf = self . resolved_url . geo_generator . geoframe ( * args , ** kwargs ) except AttributeError : pass if gdf is None : try : gdf = self . row_generator . geoframe ( * args , ** kwargs ) except AttributeError : pass if gdf is None : try : gdf = GeoDataFrame ( self . dataframe ( * args , ** kwargs ) ) first = next ( gdf . iterrows ( ) ) [ 1 ] [ 'geometry' ] if isinstance ( first , str ) : shapes = [ loads ( row [ 'geometry' ] ) for i , row in gdf . iterrows ( ) ] elif not isinstance ( first , BaseGeometry ) : shapes = [ row [ 'geometry' ] . shape for i , row in gdf . iterrows ( ) ] else : shapes = gdf [ 'geometry' ] gdf [ 'geometry' ] = gpd . GeoSeries ( shapes ) gdf . set_geometry ( 'geometry' ) if gdf . crs is None : gdf . crs = { 'init' : 'epsg:4326' } except KeyError as e : raise ResourceError ( "Failed to create GeoDataFrame for resource '{}': No geometry column" . format ( self . name ) ) except ( KeyError , TypeError ) as e : raise ResourceError ( "Failed to create GeoDataFrame for resource '{}': {}" . format ( self . name , str ( e ) ) ) assert gdf . crs is not None return gdf
Return a Geo dataframe
50,559
def read_csv ( self , dtype = False , parse_dates = True , * args , ** kwargs ) : import pandas t = self . resolved_url . get_resource ( ) . get_target ( ) kwargs = self . _update_pandas_kwargs ( dtype , parse_dates , kwargs ) return pandas . read_csv ( t . fspath , * args , ** kwargs )
Fetch the target and pass through to pandas . read_csv
50,560
def read_fwf ( self , * args , ** kwargs ) : import pandas t = self . resolved_url . get_resource ( ) . get_target ( ) return pandas . read_fwf ( t . fspath , * args , ** kwargs )
Fetch the target and pass through to pandas . read_fwf .
50,561
def petl ( self , * args , ** kwargs ) : import petl t = self . resolved_url . get_resource ( ) . get_target ( ) if t . target_format == 'txt' : return petl . fromtext ( str ( t . fspath ) , * args , ** kwargs ) elif t . target_format == 'csv' : return petl . fromcsv ( str ( t . fspath ) , * args , ** kwargs ) else : raise Exception ( "Can't handle" )
Return a PETL source object
50,562
def context ( self ) : t = self . schema_term if not t : return { } sql_columns = [ ] all_columns = [ ] for i , c in enumerate ( t . children ) : if c . term_is ( "Table.Column" ) : p = c . all_props if p . get ( 'sqlselect' ) : sql_columns . append ( p . get ( 'sqlselect' ) ) all_columns . append ( c . name ) return { 'SQL_COLUMNS' : ', ' . join ( sql_columns ) , 'ALL_COLUMNS' : ', ' . join ( all_columns ) }
Build the interpolation context from the schemas
50,563
def caller_locals ( ) : import inspect frame = inspect . currentframe ( ) try : return frame . f_back . f_back . f_locals finally : del frame
Get the local variables in the caller s frame .
50,564
def open_package ( locals = None , dr = None ) : if locals is None : locals = caller_locals ( ) try : return op ( locals [ 'metatab_doc' ] ) except KeyError : package_name = None build_package_dir = None source_package = None if dr is None : dr = getcwd ( ) for i , e in enumerate ( walk_up ( dr ) ) : intr = set ( [ DEFAULT_METATAB_FILE , LINES_METATAB_FILE , IPYNB_METATAB_FILE ] ) & set ( e [ 2 ] ) if intr : source_package = join ( e [ 0 ] , list ( intr ) [ 0 ] ) p = op ( source_package ) package_name = p . find_first_value ( "Root.Name" ) if not package_name : raise PackageError ( "Source package in {} does not have root.Name term" . format ( e [ 0 ] ) ) if PACKAGE_PREFIX in e [ 1 ] : build_package_dir = join ( e [ 0 ] , PACKAGE_PREFIX ) break if i > 2 : break if build_package_dir and package_name and exists ( join ( build_package_dir , package_name ) ) : built_package = join ( build_package_dir , package_name ) try : return op ( built_package ) except RowGeneratorError as e : pass if source_package : return op ( source_package ) raise PackageError ( "Failed to find package, either in locals() or above dir '{}' " . format ( dr ) )
Try to open a package with the metatab_doc variable which is set when a Notebook is run as a resource . If that does not exist try the local _packages directory
50,565
def rebuild_schema ( doc , r , df ) : import numpy as np try : r = doc . resource ( r . name ) except AttributeError : r = doc . resource ( r ) def alt_col_name ( name , i ) : import re if not name : return 'col{}' . format ( i ) return re . sub ( '_+' , '_' , re . sub ( '[^\w_]' , '_' , str ( name ) ) . lower ( ) ) . rstrip ( '_' ) df_types = { np . dtype ( 'O' ) : 'text' , np . dtype ( 'int64' ) : 'integer' , np . dtype ( 'float64' ) : 'number' } try : df_index_frame = df . index . to_frame ( ) except AttributeError : df_index_frame = None def get_col_dtype ( c ) : c = str ( c ) try : return df_types [ df [ c ] . dtype ] except KeyError : pass try : return df_types [ df_index_frame [ c ] . dtype ] except TypeError : pass if c == 'id' or c == df . index . name : return df_types [ df . index . dtype ] return 'unknown' columns = [ ] schema_term = r . schema_term [ 0 ] if schema_term : old_cols = { c [ 'name' ] . value : c . properties for c in schema_term . children } for c in schema_term . children : schema_term . remove_child ( c ) schema_term . children = [ ] else : old_cols = { } schema_term = doc [ 'Schema' ] . new_term ( 'Table' , r . schema_name ) index_names = [ n if n else "id" for n in df . index . names ] for i , col in enumerate ( index_names + list ( df . columns ) ) : acn = alt_col_name ( col , i ) if alt_col_name ( col , i ) != str ( col ) else '' d = { 'name' : col , 'datatype' : get_col_dtype ( col ) , 'altname' : acn } if col in old_cols . keys ( ) : lookup_name = col elif acn in old_cols . keys ( ) : lookup_name = acn else : lookup_name = None if lookup_name and lookup_name in old_cols : for k , v in schema_term . properties . items ( ) : old_col = old_cols . get ( lookup_name ) for k , v in old_col . items ( ) : if k != 'name' and v : d [ k ] = v columns . append ( d ) for c in columns : name = c [ 'name' ] del c [ 'name' ] datatype = c [ 'datatype' ] del c [ 'datatype' ] altname = c [ 'altname' ] del c [ 'altname' ] schema_term . new_child ( 'Column' , name , datatype = datatype , altname = altname , ** c )
Rebuild the schema for a resource based on a dataframe
50,566
def rewrite_schema ( r , df , doc = None ) : from metapack . cli . core import write_doc if doc is None : doc = open_source_package ( ) rebuild_schema ( doc , r , df ) write_doc ( doc , doc . ref )
Rebuild the schema for a resource based on a dataframe and re - write the doc
50,567
def interactive_rewrite_schema ( r , df , doc = None ) : if 'metatab_doc' in caller_locals ( ) : return False if doc is None : doc = open_source_package ( ) rewrite_schema ( r , df , doc ) return True
Rebuild the schema for a resource based on a dataframe and re - write the doc but only if running the notebook interactively not while building
50,568
def get_dataframes ( ) : for k , v in caller_locals ( ) . items ( ) : if k . startswith ( '_' ) : continue if isinstance ( v , pd . core . frame . DataFrame ) : yield k , v
Yield tuples of dataframe variable name and the dataframe . Skips variables with names that start with an underscore
50,569
def get_notebook_rel_path ( pkg = None ) : pkg = pkg or open_source_package ( ) pkg_path = str ( pkg . package_url . fspath ) nb_path = get_notebook_path ( ) return nb_path . replace ( pkg_path , '' ) . strip ( '/' )
Get the path of a notebook relative to the current soruce package
50,570
def add_dataframe ( df , name , pkg = None , description = '' ) : from warnings import warn from metapack . cli . core import alt_col_name , type_map import numpy as np if name is None or df is None : warn ( "Did not find dataframe for reference '{}' " . format ( ref ) ) return pkg = pkg or open_source_package ( ) resource_ref = 'file:' + get_notebook_rel_path ( pkg ) + '#' + name t = pkg . find_first ( 'Root.Datafile' , value = resource_ref ) col_props = { } if t : print ( "Datafile exists for url '{}', deleting" . format ( resource_ref ) ) if t . schema_term : col_props = { c [ 'name' ] : c for c in t . columns ( ) } pkg . remove_term ( t . schema_term ) pkg . remove_term ( t ) t = pkg [ 'Resources' ] . new_term ( 'Root.Datafile' , resource_ref , name = name , description = description ) st = pkg [ 'Schema' ] . new_term ( 'Table' , t . schema_name , description = description ) for i , name in enumerate ( df . columns ) : props = col_props . get ( name , { } ) try : native_type = type ( np . asscalar ( df [ name ] . dtype . type ( 0 ) ) ) . __name__ except ValueError : native_type = df [ name ] . dtype . name except AttributeError : native_type = type ( df [ name ] [ 0 ] ) . __name__ for pn in 'datatype name pos header' . split ( ) : if pn in props : del props [ pn ] if 'altname' in props : altname = props [ 'altname' ] del props [ 'altname' ] else : raw_alt_name = alt_col_name ( name , i ) altname = raw_alt_name if raw_alt_name != name else '' col = df [ name ] if hasattr ( col , 'description' ) : props [ 'description' ] = col . description t = st . new_child ( 'Column' , name , datatype = type_map . get ( native_type , native_type ) , altname = altname , ** props ) pkg . write_csv ( )
Add a dataframe to a source package . Pass in either the name of the dataframe or the dataframe . If the dataframeis passed it the name will be the dataframe s variable name . The function will re - write the source package with the new resource .
50,571
def add_int_enums_to_docstring ( enum ) : if enum . __doc__ is None : enum . __doc__ = "" enum . __doc__ += ( "\n\n" "Attributes\n" "----------\n" ) for val in list ( enum ) : enum . __doc__ += "{} = {}\n" . format ( val . name , int ( val ) ) return enum
Decorator for IntEnum which re - writes the documentation string so that Sphinx enumerates all the enumeration values .
50,572
def add_signature_to_docstring ( f , include_self = False , kw_only_args = { } ) : def decorate ( f_wrapper ) : args , varargs , keywords , defaults = inspect . getargspec ( f ) if defaults is None : defaults = [ ] assert set ( args ) . isdisjoint ( set ( kw_only_args ) ) assert varargs is None or varargs not in kw_only_args assert keywords is None or keywords not in kw_only_args if not include_self : if ( len ( args ) >= 1 and args [ 0 ] == "self" and len ( args ) > len ( defaults ) ) : args . pop ( 0 ) signature = "{}(" . format ( f_wrapper . __name__ ) for arg in args [ : - len ( defaults ) ] if defaults else args : signature += "{}, " . format ( arg ) for arg , default in zip ( args [ - len ( defaults ) : ] , defaults ) : signature += "{}={}, " . format ( arg , repr ( default ) ) if kw_only_args or varargs is not None : if varargs is None and kw_only_args : assert "_" not in args assert "_" not in kw_only_args assert "_" != keywords signature += "*_, " else : signature += "*{}, " . format ( varargs ) for keyword , default in iteritems ( kw_only_args ) : signature += "{}={}, " . format ( keyword , default ) if keywords is not None : signature += "**{}, " . format ( keywords ) signature = "{})" . format ( signature . rstrip ( ", " ) ) if f_wrapper . __doc__ is None : f_wrapper . __doc__ = signature elif not f_wrapper . __doc__ . lstrip ( ) . startswith ( "{}(" . format ( f_wrapper . __name__ ) ) : f_wrapper . __doc__ = "{}\n{}" . format ( signature , f_wrapper . __doc__ ) return f_wrapper return decorate
Decorator which adds the function signature of f to the decorated function s docstring .
50,573
def _if_not_closed ( f ) : @ add_signature_to_docstring ( f ) @ functools . wraps ( f ) def f_ ( self , * args , ** kwargs ) : if self . closed or self . _parent . _freed : raise OSError return f ( self , * args , ** kwargs ) return f_
Run the method iff . the memory view hasn t been closed and the parent object has not been freed .
50,574
def _if_not_freed ( f ) : @ add_signature_to_docstring ( f ) @ functools . wraps ( f ) def f_ ( self , * args , ** kwargs ) : if self . _freed : raise OSError return f ( self , * args , ** kwargs ) return f_
Run the method iff . the memory view hasn t been closed .
50,575
def unpack_routing_table_entry ( packed ) : _ , free , route , key , mask = struct . unpack ( consts . RTE_PACK_STRING , packed ) if route & 0xff000000 == 0xff000000 : return None routes = { r for r in routing_table . Routes if ( route >> r ) & 0x1 } rte = routing_table . RoutingTableEntry ( routes , key , mask ) app_id = free & 0xff core = ( free >> 8 ) & 0x0f return ( rte , app_id , core )
Unpack a routing table entry read from a SpiNNaker machine .
50,576
def send_scp ( self , * args , ** kwargs ) : x = kwargs . pop ( "x" ) y = kwargs . pop ( "y" ) p = kwargs . pop ( "p" ) return self . _send_scp ( x , y , p , * args , ** kwargs )
Transmit an SCP Packet and return the response .
50,577
def _get_connection ( self , x , y ) : if ( self . _width is None or self . _height is None or self . _root_chip is None ) : return self . connections [ None ] else : eth_chip = spinn5_local_eth_coord ( x , y , self . _width , self . _height , * self . _root_chip ) conn = self . connections . get ( eth_chip ) if conn is not None : return conn else : return self . connections [ None ]
Get the appropriate connection for a chip .
50,578
def boot ( self , width = None , height = None , only_if_needed = True , check_booted = True , ** boot_kwargs ) : if width is not None or height is not None : warnings . warn ( "Machine width and height are no longer needed when " "booting a machine." , DeprecationWarning ) if only_if_needed : quick_fail_mc = MachineController ( self . initial_host , n_tries = 1 ) try : info = quick_fail_mc . get_software_version ( 255 , 255 , 0 ) if "SpiNNaker" not in info . version_string : raise SpiNNakerBootError ( "Remote host is not a SpiNNaker machine and so cannot " "be booted. (Are you using a BMP IP/hostname?)" ) return False except SCPError : pass boot_kwargs . setdefault ( "boot_port" , self . boot_port ) self . structs = boot . boot ( self . initial_host , ** boot_kwargs ) assert len ( self . structs ) > 0 if check_booted : try : p2p_address = ( 255 , 255 ) while p2p_address == ( 255 , 255 ) : time . sleep ( 0.1 ) p2p_address = self . get_software_version ( 255 , 255 , 0 ) . position except SCPError : raise SpiNNakerBootError ( "The remote machine could not be booted." ) return True
Boot a SpiNNaker machine .
50,579
def discover_connections ( self , x = 255 , y = 255 ) : working_chips = set ( ( x , y ) for ( x , y ) , route in iteritems ( self . get_p2p_routing_table ( x , y ) ) if route != consts . P2PTableEntry . none ) self . _width = max ( x for x , y in working_chips ) + 1 self . _height = max ( y for x , y in working_chips ) + 1 num_new_connections = 0 for x , y in spinn5_eth_coords ( self . _width , self . _height , * self . root_chip ) : if ( x , y ) in working_chips and ( x , y ) not in self . connections : try : ip = self . get_ip_address ( x , y ) except SCPError : continue if ip is not None : self . connections [ ( x , y ) ] = SCPConnection ( ip , self . scp_port , self . n_tries , self . timeout ) try : self . get_software_version ( x , y , 0 ) num_new_connections += 1 except SCPError : self . connections . pop ( ( x , y ) ) . close ( ) return num_new_connections
Attempt to discover all available Ethernet connections to a machine .
50,580
def application ( self , app_id ) : context = self ( app_id = app_id ) context . before_close ( lambda : self . send_signal ( "stop" ) ) return context
Update the context to use the given application ID and stop the application when done .
50,581
def get_software_version ( self , x = 255 , y = 255 , processor = 0 ) : sver = self . _send_scp ( x , y , processor , SCPCommands . sver ) p2p = sver . arg1 >> 16 p2p_address = ( p2p >> 8 , p2p & 0x00ff ) pcpu = ( sver . arg1 >> 8 ) & 0xff vcpu = sver . arg1 & 0xff buffer_size = ( sver . arg2 & 0xffff ) software_name , version , version_labels = unpack_sver_response_version ( sver ) return CoreInfo ( p2p_address , pcpu , vcpu , version , buffer_size , sver . arg3 , software_name , version_labels )
Get the software version for a given SpiNNaker core .
50,582
def get_ip_address ( self , x , y ) : chip_info = self . get_chip_info ( x = x , y = y ) return chip_info . ip_address if chip_info . ethernet_up else None
Get the IP address of a particular SpiNNaker chip s Ethernet link .
50,583
def write_across_link ( self , address , data , x , y , link ) : if address % 4 : raise ValueError ( "Addresses must be word-aligned." ) if len ( data ) % 4 : raise ValueError ( "Data must be a whole number of words." ) length_bytes = len ( data ) cur_byte = 0 while length_bytes > 0 : to_write = min ( length_bytes , ( self . scp_data_length & ~ 0b11 ) ) cur_data = data [ cur_byte : cur_byte + to_write ] self . _send_scp ( x , y , 0 , SCPCommands . link_write , arg1 = address , arg2 = to_write , arg3 = int ( link ) , data = cur_data , expected_args = 0 ) address += to_write cur_byte += to_write length_bytes -= to_write
Write a bytestring to an address in memory on a neigbouring chip .
50,584
def read_across_link ( self , address , length_bytes , x , y , link ) : if address % 4 : raise ValueError ( "Addresses must be word-aligned." ) if length_bytes % 4 : raise ValueError ( "Lengths must be multiples of words." ) data = bytearray ( length_bytes ) mem = memoryview ( data ) while length_bytes > 0 : to_read = min ( length_bytes , ( self . scp_data_length & ~ 0b11 ) ) response = self . _send_scp ( x , y , 0 , SCPCommands . link_read , arg1 = address , arg2 = to_read , arg3 = int ( link ) , expected_args = 0 ) mem [ : to_read ] = response . data mem = mem [ to_read : ] address += to_read length_bytes -= to_read return bytes ( data )
Read a bytestring from an address in memory on a neigbouring chip .
50,585
def read_struct_field ( self , struct_name , field_name , x , y , p = 0 ) : field , address , pack_chars = self . _get_struct_field_and_address ( struct_name , field_name ) length = struct . calcsize ( pack_chars ) data = self . read ( address , length , x , y , p ) unpacked = struct . unpack ( pack_chars , data ) if field . length == 1 : return unpacked [ 0 ] else : return unpacked
Read the value out of a struct maintained by SARK .
50,586
def write_struct_field ( self , struct_name , field_name , values , x , y , p = 0 ) : field , address , pack_chars = self . _get_struct_field_and_address ( struct_name , field_name ) if field . length != 1 : assert len ( values ) == field . length data = struct . pack ( pack_chars , * values ) else : data = struct . pack ( pack_chars , values ) self . write ( address , data , x , y , p )
Write a value into a struct .
50,587
def _get_vcpu_field_and_address ( self , field_name , x , y , p ) : vcpu_struct = self . structs [ b"vcpu" ] field = vcpu_struct [ six . b ( field_name ) ] address = ( self . read_struct_field ( "sv" , "vcpu_base" , x , y ) + vcpu_struct . size * p ) + field . offset pack_chars = b"<" + field . pack_chars return field , address , pack_chars
Get the field and address for a VCPU struct field .
50,588
def read_vcpu_struct_field ( self , field_name , x , y , p ) : field , address , pack_chars = self . _get_vcpu_field_and_address ( field_name , x , y , p ) length = struct . calcsize ( pack_chars ) data = self . read ( address , length , x , y ) unpacked = struct . unpack ( pack_chars , data ) if field . length == 1 : return unpacked [ 0 ] else : if b"s" in pack_chars : return unpacked [ 0 ] . strip ( b"\x00" ) . decode ( "utf-8" ) return unpacked
Read a value out of the VCPU struct for a specific core .
50,589
def write_vcpu_struct_field ( self , field_name , value , x , y , p ) : field , address , pack_chars = self . _get_vcpu_field_and_address ( field_name , x , y , p ) if b"s" in pack_chars : data = struct . pack ( pack_chars , value . encode ( 'utf-8' ) ) elif field . length == 1 : data = struct . pack ( pack_chars , value ) else : data = struct . pack ( pack_chars , * value ) self . write ( address , data , x , y )
Write a value to the VCPU struct for a specific core .
50,590
def get_processor_status ( self , p , x , y ) : address = ( self . read_struct_field ( "sv" , "vcpu_base" , x , y ) + self . structs [ b"vcpu" ] . size * p ) data = self . read ( address , self . structs [ b"vcpu" ] . size , x , y ) state = { name . decode ( 'utf-8' ) : struct . unpack ( f . pack_chars , data [ f . offset : f . offset + struct . calcsize ( f . pack_chars ) ] ) [ 0 ] for ( name , f ) in iteritems ( self . structs [ b"vcpu" ] . fields ) } state [ "registers" ] = [ state . pop ( "r{}" . format ( i ) ) for i in range ( 8 ) ] state [ "user_vars" ] = [ state . pop ( "user{}" . format ( i ) ) for i in range ( 4 ) ] state [ "app_name" ] = state [ "app_name" ] . strip ( b'\x00' ) . decode ( 'utf-8' ) state [ "cpu_state" ] = consts . AppState ( state [ "cpu_state" ] ) state [ "rt_code" ] = consts . RuntimeException ( state [ "rt_code" ] ) sw_ver = state . pop ( "sw_ver" ) state [ "version" ] = ( ( sw_ver >> 16 ) & 0xFF , ( sw_ver >> 8 ) & 0xFF , ( sw_ver >> 0 ) & 0xFF ) for newname , oldname in [ ( "iobuf_address" , "iobuf" ) , ( "program_state_register" , "psr" ) , ( "stack_pointer" , "sp" ) , ( "link_register" , "lr" ) , ] : state [ newname ] = state . pop ( oldname ) state . pop ( "__PAD" ) return ProcessorStatus ( ** state )
Get the status of a given core and the application executing on it .
50,591
def get_iobuf ( self , p , x , y ) : return self . get_iobuf_bytes ( p , x , y ) . decode ( "utf-8" )
Read the messages io_printf d into the IOBUF buffer on a specified core .
50,592
def get_iobuf_bytes ( self , p , x , y ) : iobuf_size = self . read_struct_field ( "sv" , "iobuf_size" , x , y ) address = self . read_vcpu_struct_field ( "iobuf" , x , y , p ) iobuf = b"" while address : iobuf_data = self . read ( address , iobuf_size + 16 , x , y ) address , time , ms , length = struct . unpack ( "<4I" , iobuf_data [ : 16 ] ) iobuf += iobuf_data [ 16 : 16 + length ] return iobuf
Read raw bytes io_printf d into the IOBUF buffer on a specified core .
50,593
def get_router_diagnostics ( self , x , y ) : data = self . read ( 0xe1000300 , 64 , x = x , y = y ) return RouterDiagnostics ( * struct . unpack ( "<16I" , data ) )
Get the values of the router diagnostic counters .
50,594
def iptag_set ( self , iptag , addr , port , x , y ) : ip_addr = struct . pack ( '!4B' , * map ( int , socket . gethostbyname ( addr ) . split ( '.' ) ) ) self . _send_scp ( x , y , 0 , SCPCommands . iptag , int ( consts . IPTagCommands . set ) << 16 | iptag , port , struct . unpack ( '<I' , ip_addr ) [ 0 ] )
Set the value of an IPTag .
50,595
def iptag_get ( self , iptag , x , y ) : ack = self . _send_scp ( x , y , 0 , SCPCommands . iptag , int ( consts . IPTagCommands . get ) << 16 | iptag , 1 , expected_args = 0 ) return IPTag . from_bytestring ( ack . data )
Get the value of an IPTag .
50,596
def iptag_clear ( self , iptag , x , y ) : self . _send_scp ( x , y , 0 , SCPCommands . iptag , int ( consts . IPTagCommands . clear ) << 16 | iptag )
Clear an IPTag .
50,597
def fill ( self , address , data , size , x , y , p ) : if size % 4 or address % 4 : data = struct . pack ( '<B' , data ) * size self . write ( address , data , x , y , p ) else : self . _send_scp ( x , y , p , SCPCommands . fill , address , data , size )
Fill a region of memory with the specified byte .
50,598
def sdram_alloc ( self , size , tag = 0 , x = Required , y = Required , app_id = Required , clear = False ) : assert 0 <= tag < 256 arg1 = app_id << 8 | consts . AllocOperations . alloc_sdram rv = self . _send_scp ( x , y , 0 , SCPCommands . alloc_free , arg1 , size , tag ) if rv . arg1 == 0 : tag_in_use = False if tag != 0 : alloc_tags = self . read_struct_field ( "sv" , "alloc_tag" , x , y ) index = ( app_id << 8 ) + tag entry = self . read ( alloc_tags + index , 4 , x , y ) tag_in_use = ( entry != 0 ) raise SpiNNakerMemoryError ( size , x , y , tag , tag_in_use ) address = rv . arg1 if clear : self . fill ( address , 0 , size , x , y , 0 ) return address
Allocate a region of SDRAM for an application .
50,599
def sdram_free ( self , ptr , x = Required , y = Required ) : self . _send_scp ( x , y , 0 , SCPCommands . alloc_free , consts . AllocOperations . free_sdram_by_ptr , ptr )
Free an allocated block of memory in SDRAM .