idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
59,400
def dist_sq ( self , other = None ) : v = self - other if other else self return sum ( map ( lambda a : a * a , v ) )
For fast length comparison
59,401
def yaw_pitch ( self ) : if not self : return YawPitch ( 0 , 0 ) ground_distance = math . sqrt ( self . x ** 2 + self . z ** 2 ) if ground_distance : alpha1 = - math . asin ( self . x / ground_distance ) / math . pi * 180 alpha2 = math . acos ( self . z / ground_distance ) / math . pi * 180 if alpha2 > 90 : yaw = 180 - alpha1 else : yaw = alpha1 pitch = math . atan2 ( - self . y , ground_distance ) / math . pi * 180 else : yaw = 0 y = round ( self . y ) if y > 0 : pitch = - 90 elif y < 0 : pitch = 90 else : pitch = 0 return YawPitch ( yaw , pitch )
Calculate the yaw and pitch of this vector
59,402
def make_slot_check ( wanted ) : if isinstance ( wanted , types . FunctionType ) : return wanted if isinstance ( wanted , int ) : item , meta = wanted , None elif isinstance ( wanted , Slot ) : item , meta = wanted . item_id , wanted . damage elif isinstance ( wanted , ( Item , Block ) ) : item , meta = wanted . id , wanted . metadata elif isinstance ( wanted , str ) : item_or_block = get_item_or_block ( wanted , init = True ) item , meta = item_or_block . id , item_or_block . metadata else : try : item , meta = wanted except TypeError : raise ValueError ( 'Illegal args for make_slot_check(): %s' % wanted ) return lambda slot : item == slot . item_id and meta in ( None , slot . damage )
Creates and returns a function that takes a slot and checks if it matches the wanted item .
59,403
def _make_window ( window_dict ) : cls_name = '%sWindow' % camel_case ( str ( window_dict [ 'name' ] ) ) bases = ( Window , ) attrs = { '__module__' : sys . modules [ __name__ ] , 'name' : str ( window_dict [ 'name' ] ) , 'inv_type' : str ( window_dict [ 'id' ] ) , 'inv_data' : window_dict , } def make_slot_method ( index , size = 1 ) : if size == 1 : return lambda self : self . slots [ index ] else : return lambda self : self . slots [ index : ( index + size ) ] for slots in window_dict . get ( 'slots' , [ ] ) : index = slots [ 'index' ] size = slots . get ( 'size' , 1 ) attr_name = snake_case ( str ( slots [ 'name' ] ) ) attr_name += '_slot' if size == 1 else '_slots' slots_method = make_slot_method ( index , size ) slots_method . __name__ = attr_name attrs [ attr_name ] = property ( slots_method ) for i , prop_name in enumerate ( window_dict . get ( 'properties' , [ ] ) ) : def make_prop_method ( i ) : return lambda self : self . properties [ i ] prop_method = make_prop_method ( i ) prop_name = snake_case ( str ( prop_name ) ) prop_method . __name__ = prop_name attrs [ prop_name ] = property ( prop_method ) cls = type ( cls_name , bases , attrs ) assert not hasattr ( sys . modules [ __name__ ] , cls_name ) , 'Window "%s" already registered at %s' % ( cls_name , __name__ ) setattr ( sys . modules [ __name__ ] , cls_name , cls ) return cls
Creates a new class for that window and registers it at this module .
59,404
def get_dict ( self ) : data = { 'id' : self . item_id } if self . item_id != constants . INV_ITEMID_EMPTY : data [ 'damage' ] = self . damage data [ 'amount' ] = self . amount if self . nbt is not None : data [ 'enchants' ] = self . nbt return data
Formats the slot for network packing .
59,405
def on_success ( self , inv_plugin , emit_set_slot ) : self . dirty = set ( ) self . apply ( inv_plugin ) for changed_slot in self . dirty : emit_set_slot ( changed_slot )
Called when the click was successful and should be applied to the inventory .
59,406
def authenticate ( self ) : endpoint = '/authenticate' payload = { 'agent' : { 'name' : 'Minecraft' , 'version' : self . ygg_version , } , 'username' : self . username , 'password' : self . password , 'clientToken' : self . client_token , } rep = self . _ygg_req ( endpoint , payload ) if not rep or 'error' in rep : return False self . access_token = rep [ 'accessToken' ] self . client_token = rep [ 'clientToken' ] self . available_profiles = rep [ 'availableProfiles' ] self . selected_profile = rep [ 'selectedProfile' ] return True
Generate an access token using an username and password . Any existing client token is invalidated if not provided .
59,407
def validate ( self ) : endpoint = '/validate' payload = dict ( accessToken = self . access_token ) rep = self . _ygg_req ( endpoint , payload ) return not bool ( rep )
Check if an access token is valid
59,408
def total_stored ( self , wanted , slots = None ) : if slots is None : slots = self . window . slots wanted = make_slot_check ( wanted ) return sum ( slot . amount for slot in slots if wanted ( slot ) )
Calculates the total number of items of that type in the current window or given slot range .
59,409
def find_slot ( self , wanted , slots = None ) : for slot in self . find_slots ( wanted , slots ) : return slot return None
Searches the given slots or if not given active hotbar slot hotbar inventory open window in this order .
59,410
def find_slots ( self , wanted , slots = None ) : if slots is None : slots = self . inv_slots_preferred + self . window . window_slots wanted = make_slot_check ( wanted ) for slot in slots : if wanted ( slot ) : yield slot
Yields all slots containing the item . Searches the given slots or if not given active hotbar slot hotbar inventory open window in this order .
59,411
def click_slot ( self , slot , right = False ) : if isinstance ( slot , int ) : slot = self . window . slots [ slot ] button = constants . INV_BUTTON_RIGHT if right else constants . INV_BUTTON_LEFT return self . send_click ( windows . SingleClick ( slot , button ) )
Left - click or right - click the slot .
59,412
def drop_slot ( self , slot = None , drop_stack = False ) : if slot is None : if self . cursor_slot . is_empty : slot = self . active_slot else : slot = self . cursor_slot elif isinstance ( slot , int ) : slot = self . window . slots [ slot ] if slot == self . cursor_slot : return self . click_slot ( self . cursor_slot , not drop_stack ) return self . send_click ( windows . DropClick ( slot , drop_stack ) )
Drop one or all items of the slot .
59,413
def inv_slots_preferred ( self ) : slots = [ self . active_slot ] slots . extend ( slot for slot in self . window . hotbar_slots if slot != self . active_slot ) slots . extend ( self . window . inventory_slots ) return slots
List of all available inventory slots in the preferred search order . Does not include the additional slots from the open window .
59,414
def get_block_entity_data ( self , pos_or_x , y = None , z = None ) : if None not in ( y , z ) : pos_or_x = pos_or_x , y , z coord_tuple = tuple ( int ( floor ( c ) ) for c in pos_or_x ) return self . block_entities . get ( coord_tuple , None )
Access block entity data .
59,415
def set_block_entity_data ( self , pos_or_x , y = None , z = None , data = None ) : if None not in ( y , z ) : pos_or_x = pos_or_x , y , z coord_tuple = tuple ( int ( floor ( c ) ) for c in pos_or_x ) old_data = self . block_entities . get ( coord_tuple , None ) self . block_entities [ coord_tuple ] = data return old_data
Update block entity data .
59,416
def parse_vlq ( self , segment ) : values = [ ] cur , shift = 0 , 0 for c in segment : val = B64 [ ord ( c ) ] val , cont = val & 0b11111 , val >> 5 cur += val << shift shift += 5 if not cont : cur , sign = cur >> 1 , cur & 1 if sign : cur = - cur values . append ( cur ) cur , shift = 0 , 0 if cur or shift : raise SourceMapDecodeError ( 'leftover cur/shift in vlq decode' ) return values
Parse a string of VLQ - encoded data .
59,417
def decode ( self , source ) : if source [ : 4 ] == ")]}'" or source [ : 3 ] == ")]}" : source = source . split ( '\n' , 1 ) [ 1 ] smap = json . loads ( source ) sources = smap [ 'sources' ] sourceRoot = smap . get ( 'sourceRoot' ) names = list ( map ( text_type , smap [ 'names' ] ) ) mappings = smap [ 'mappings' ] lines = mappings . split ( ';' ) if sourceRoot is not None : sources = list ( map ( partial ( os . path . join , sourceRoot ) , sources ) ) tokens = [ ] line_index = [ ] index = { } dst_col , src_id , src_line , src_col , name_id = 0 , 0 , 0 , 0 , 0 for dst_line , line in enumerate ( lines ) : line_index . append ( [ ] ) segments = line . split ( ',' ) dst_col = 0 for segment in segments : if not segment : continue parse = self . parse_vlq ( segment ) dst_col += parse [ 0 ] src = None name = None if len ( parse ) > 1 : try : src_id += parse [ 1 ] if not 0 <= src_id < len ( sources ) : raise SourceMapDecodeError ( "Segment %s references source %d; there are " "%d sources" % ( segment , src_id , len ( sources ) ) ) src = sources [ src_id ] src_line += parse [ 2 ] src_col += parse [ 3 ] if len ( parse ) > 4 : name_id += parse [ 4 ] if not 0 <= name_id < len ( names ) : raise SourceMapDecodeError ( "Segment %s references name %d; there are " "%d names" % ( segment , name_id , len ( names ) ) ) name = names [ name_id ] except IndexError : raise SourceMapDecodeError ( "Invalid segment %s, parsed as %r" % ( segment , parse ) ) try : assert dst_line >= 0 , ( 'dst_line' , dst_line ) assert dst_col >= 0 , ( 'dst_col' , dst_col ) assert src_line >= 0 , ( 'src_line' , src_line ) assert src_col >= 0 , ( 'src_col' , src_col ) except AssertionError as e : raise SourceMapDecodeError ( "Segment %s has negative %s (%d), in file %s" % ( segment , e . message [ 0 ] , e . message [ 1 ] , src ) ) token = Token ( dst_line , dst_col , src , src_line , src_col , name ) tokens . append ( token ) index [ ( dst_line , dst_col ) ] = token line_index [ dst_line ] . append ( dst_col ) return SourceMapIndex ( smap , tokens , line_index , index , sources )
Decode a source map object into a SourceMapIndex .
59,418
def discover ( source ) : "Given a JavaScript file, find the sourceMappingURL line" source = source . splitlines ( ) if len ( source ) > 10 : possibilities = source [ : 5 ] + source [ - 5 : ] else : possibilities = source for line in set ( possibilities ) : pragma = line [ : 21 ] if pragma == '//# sourceMappingURL=' or pragma == '//@ sourceMappingURL=' : return line [ 21 : ] . rstrip ( ) return None
Given a JavaScript file find the sourceMappingURL line
59,419
def clean ( ) : d = [ 'build' , 'dist' , 'scikits.audiolab.egg-info' , HTML_DESTDIR , PDF_DESTDIR ] for i in d : paver . path . path ( i ) . rmtree ( ) ( paver . path . path ( 'docs' ) / options . sphinx . builddir ) . rmtree ( )
Remove build dist egg - info garbage .
59,420
def add_attendees ( self , attendees , required = True ) : new_attendees = self . _build_resource_dictionary ( attendees , required = required ) for email in new_attendees : self . _attendees [ email ] = new_attendees [ email ] self . _dirty_attributes . add ( u'attendees' )
Adds new attendees to the event .
59,421
def remove_attendees ( self , attendees ) : attendees_to_delete = self . _build_resource_dictionary ( attendees ) for email in attendees_to_delete . keys ( ) : if email in self . _attendees : del self . _attendees [ email ] self . _dirty_attributes . add ( u'attendees' )
Removes attendees from the event .
59,422
def add_resources ( self , resources ) : new_resources = self . _build_resource_dictionary ( resources ) for key in new_resources : self . _resources [ key ] = new_resources [ key ] self . _dirty_attributes . add ( u'resources' )
Adds new resources to the event .
59,423
def remove_resources ( self , resources ) : resources_to_delete = self . _build_resource_dictionary ( resources ) for email in resources_to_delete . keys ( ) : if email in self . _resources : del self . _resources [ email ] self . _dirty_attributes . add ( u'resources' )
Removes resources from the event .
59,424
def validate ( self ) : if not self . start : raise ValueError ( "Event has no start date" ) if not self . end : raise ValueError ( "Event has no end date" ) if self . end < self . start : raise ValueError ( "Start date is after end date" ) if self . reminder_minutes_before_start and not isinstance ( self . reminder_minutes_before_start , int ) : raise TypeError ( "reminder_minutes_before_start must be of type int" ) if self . is_all_day and not isinstance ( self . is_all_day , bool ) : raise TypeError ( "is_all_day must be of type bool" )
Validates that all required fields are present
59,425
def info_factory ( name , libnames , headers , frameworks = None , section = None , classname = None ) : if not classname : classname = '%s_info' % name if not section : section = name if not frameworks : framesworks = [ ] class _ret ( system_info ) : def __init__ ( self ) : system_info . __init__ ( self ) def library_extensions ( self ) : return system_info . library_extensions ( self ) def calc_info ( self ) : if libnames : libs = self . get_libs ( 'libraries' , '' ) if not libs : libs = libnames lib_dirs = self . get_lib_dirs ( ) tmp = None for d in lib_dirs : tmp = self . check_libs ( d , libs ) if tmp is not None : info = tmp break if tmp is None : return include_dirs = self . get_include_dirs ( ) inc_dir = None for d in include_dirs : p = self . combine_paths ( d , headers ) if p : inc_dir = os . path . dirname ( p [ 0 ] ) dict_append ( info , include_dirs = [ d ] ) break if inc_dir is None : log . info ( ' %s not found' % name ) return self . set_info ( ** info ) else : if frameworks : fargs = [ ] for f in frameworks : p = "/System/Library/Frameworks/%s.framework" % f if os . path . exists ( p ) : fargs . append ( "-framework" ) fargs . append ( f ) if fargs : self . set_info ( extra_link_args = fargs ) return _ret . __name__ = classname _ret . section = section return _ret
Create a system_info class .
59,426
def load_all_details ( self ) : log . debug ( u"Loading all details" ) if self . count > 0 : del ( self . events [ : ] ) log . debug ( u"Requesting all event details for events: {event_list}" . format ( event_list = str ( self . event_ids ) ) ) body = soap_request . get_item ( exchange_id = self . event_ids , format = u'AllProperties' ) response_xml = self . service . send ( body ) self . _parse_response_for_all_events ( response_xml ) return self
This function will execute all the event lookups for known events .
59,427
def seek ( self , offset , whence = 0 , mode = 'rw' ) : try : st = self . _sndfile . seek ( offset , whence , mode ) except IOError , e : raise PyaudioIOError ( str ( e ) ) return st
similar to python seek function taking only in account audio data .
59,428
def read_frames ( self , nframes , dtype = np . float64 ) : return self . _sndfile . read_frames ( nframes , dtype )
Read nframes frames of the file .
59,429
def write_frames ( self , input , nframes = - 1 ) : if nframes == - 1 : if input . ndim == 1 : nframes = input . size elif input . ndim == 2 : nframes = input . shape [ 0 ] else : raise ValueError ( "Input has to be rank 1 (mono) or rank 2 " "(multi-channels)" ) return self . _sndfile . write_frames ( input [ : nframes , ... ] )
write data to file .
59,430
def delete_field ( field_uri ) : root = T . DeleteItemField ( T . FieldURI ( FieldURI = field_uri ) ) return root
Helper function to request deletion of a field . This is necessary when you want to overwrite values instead of appending .
59,431
def get_occurrence ( exchange_id , instance_index , format = u"Default" ) : root = M . GetItem ( M . ItemShape ( T . BaseShape ( format ) ) , M . ItemIds ( ) ) items_node = root . xpath ( "//m:ItemIds" , namespaces = NAMESPACES ) [ 0 ] for index in instance_index : items_node . append ( T . OccurrenceItemId ( RecurringMasterId = exchange_id , InstanceIndex = str ( index ) ) ) return root
Requests one or more calendar items from the store matching the master & index .
59,432
def new_event ( event ) : id = T . DistinguishedFolderId ( Id = event . calendar_id ) if event . calendar_id in DISTINGUISHED_IDS else T . FolderId ( Id = event . calendar_id ) start = convert_datetime_to_utc ( event . start ) end = convert_datetime_to_utc ( event . end ) root = M . CreateItem ( M . SavedItemFolderId ( id ) , M . Items ( T . CalendarItem ( T . Subject ( event . subject ) , T . Body ( event . body or u'' , BodyType = "HTML" ) , ) ) , SendMeetingInvitations = "SendToAllAndSaveCopy" ) calendar_node = root . xpath ( u'/m:CreateItem/m:Items/t:CalendarItem' , namespaces = NAMESPACES ) [ 0 ] if event . reminder_minutes_before_start : calendar_node . append ( T . ReminderIsSet ( 'true' ) ) calendar_node . append ( T . ReminderMinutesBeforeStart ( str ( event . reminder_minutes_before_start ) ) ) else : calendar_node . append ( T . ReminderIsSet ( 'false' ) ) calendar_node . append ( T . Start ( start . strftime ( EXCHANGE_DATETIME_FORMAT ) ) ) calendar_node . append ( T . End ( end . strftime ( EXCHANGE_DATETIME_FORMAT ) ) ) if event . is_all_day : calendar_node . append ( T . IsAllDayEvent ( 'true' ) ) calendar_node . append ( T . Location ( event . location or u'' ) ) if event . required_attendees : calendar_node . append ( resource_node ( element = T . RequiredAttendees ( ) , resources = event . required_attendees ) ) if event . optional_attendees : calendar_node . append ( resource_node ( element = T . OptionalAttendees ( ) , resources = event . optional_attendees ) ) if event . resources : calendar_node . append ( resource_node ( element = T . Resources ( ) , resources = event . resources ) ) if event . recurrence : if event . recurrence == u'daily' : recurrence = T . DailyRecurrence ( T . Interval ( str ( event . recurrence_interval ) ) , ) elif event . recurrence == u'weekly' : recurrence = T . WeeklyRecurrence ( T . Interval ( str ( event . recurrence_interval ) ) , T . DaysOfWeek ( event . recurrence_days ) , ) elif event . recurrence == u'monthly' : recurrence = T . AbsoluteMonthlyRecurrence ( T . Interval ( str ( event . recurrence_interval ) ) , T . DayOfMonth ( str ( event . start . day ) ) , ) elif event . recurrence == u'yearly' : recurrence = T . AbsoluteYearlyRecurrence ( T . DayOfMonth ( str ( event . start . day ) ) , T . Month ( event . start . strftime ( "%B" ) ) , ) calendar_node . append ( T . Recurrence ( recurrence , T . EndDateRecurrence ( T . StartDate ( event . start . strftime ( EXCHANGE_DATE_FORMAT ) ) , T . EndDate ( event . recurrence_end_date . strftime ( EXCHANGE_DATE_FORMAT ) ) , ) ) ) return root
Requests a new event be created in the store .
59,433
def update_property_node ( node_to_insert , field_uri ) : root = T . SetItemField ( T . FieldURI ( FieldURI = field_uri ) , T . CalendarItem ( node_to_insert ) ) return root
Helper function - generates a SetItemField which tells Exchange you want to overwrite the contents of a field .
59,434
def _validate_example ( rh , method , example_type ) : example = getattr ( method , example_type + "_example" ) schema = getattr ( method , example_type + "_schema" ) if example is None : return None try : validate ( example , schema ) except ValidationError as e : raise ValidationError ( "{}_example for {}.{} could not be validated.\n{}" . format ( example_type , rh . __name__ , method . __name__ , str ( e ) ) ) return json . dumps ( example , indent = 4 , sort_keys = True )
Validates example against schema
59,435
def _get_rh_methods ( rh ) : for k , v in vars ( rh ) . items ( ) : if all ( [ k in HTTP_METHODS , is_method ( v ) , hasattr ( v , "input_schema" ) ] ) : yield ( k , v )
Yield all HTTP methods in rh that are decorated with schema . validate
59,436
def _escape_markdown_literals ( string ) : literals = list ( "\\`*_{}[]()<>#+-.!:|" ) escape = lambda c : '\\' + c if c in literals else c return "" . join ( map ( escape , string ) )
Escape any markdown literals in string by prepending with \\
59,437
def _cleandoc ( doc ) : indent_length = lambda s : len ( s ) - len ( s . lstrip ( " " ) ) not_empty = lambda s : s != "" lines = doc . split ( "\n" ) indent = min ( map ( indent_length , filter ( not_empty , lines ) ) ) return "\n" . join ( s [ indent : ] for s in lines )
Remove uniform indents from doc lines that are not empty
59,438
def get_api_docs ( routes ) : routes = map ( _get_tuple_from_route , routes ) documentation = [ ] for url , rh , methods in sorted ( routes , key = lambda a : a [ 0 ] ) : if issubclass ( rh , APIHandler ) : documentation . append ( _get_route_doc ( url , rh , methods ) ) documentation = ( "**This documentation is automatically generated.**\n\n" + "**Output schemas only represent `data` and not the full output; " + "see output examples and the JSend specification.**\n" + "\n<br>\n<br>\n" . join ( documentation ) ) return documentation
Generates GitHub Markdown formatted API documentation using provided schemas in RequestHandler methods and their docstrings .
59,439
def error ( self , message , data = None , code = None ) : result = { 'status' : 'error' , 'message' : message } if data : result [ 'data' ] = data if code : result [ 'code' ] = code self . write ( result ) self . finish ( )
An error occurred in processing the request i . e . an exception was thrown .
59,440
def input_schema_clean ( input_ , input_schema ) : if input_schema . get ( 'type' ) == 'object' : try : defaults = get_object_defaults ( input_schema ) except NoObjectDefaults : pass else : return deep_update ( defaults , input_ ) return input_
Updates schema default values with input data .
59,441
def validate ( input_schema = None , output_schema = None , input_example = None , output_example = None , validator_cls = None , format_checker = None , on_empty_404 = False , use_defaults = False ) : @ container def _validate ( rh_method ) : @ wraps ( rh_method ) @ tornado . gen . coroutine def _wrapper ( self , * args , ** kwargs ) : if input_schema is not None : try : encoding = "UTF-8" input_ = json . loads ( self . request . body . decode ( encoding ) ) except ValueError as e : raise jsonschema . ValidationError ( "Input is malformed; could not decode JSON object." ) if use_defaults : input_ = input_schema_clean ( input_ , input_schema ) jsonschema . validate ( input_ , input_schema , cls = validator_cls , format_checker = format_checker ) else : input_ = None setattr ( self , "body" , input_ ) output = rh_method ( self , * args , ** kwargs ) if is_future ( output ) : output = yield output if not output and on_empty_404 : raise APIError ( 404 , "Resource not found." ) if output_schema is not None : try : jsonschema . validate ( { "result" : output } , { "type" : "object" , "properties" : { "result" : output_schema } , "required" : [ "result" ] } ) except jsonschema . ValidationError as e : raise TypeError ( str ( e ) ) self . success ( output ) setattr ( _wrapper , "input_schema" , input_schema ) setattr ( _wrapper , "output_schema" , output_schema ) setattr ( _wrapper , "input_example" , input_example ) setattr ( _wrapper , "output_example" , output_example ) return _wrapper return _validate
Parameterized decorator for schema validation
59,442
def read ( filename ) : return codecs . open ( os . path . join ( __DIR__ , filename ) , 'r' ) . read ( )
Read and return filename in root dir of project and return string
59,443
def deep_update ( source , overrides ) : for key , value in overrides . items ( ) : if isinstance ( value , collections . Mapping ) and value : returned = deep_update ( source . get ( key , { } ) , value ) source [ key ] = returned else : source [ key ] = overrides [ key ] return source
Update a nested dictionary or similar mapping .
59,444
def is_handler_subclass ( cls , classnames = ( "ViewHandler" , "APIHandler" ) ) : if isinstance ( cls , list ) : return any ( is_handler_subclass ( c ) for c in cls ) elif isinstance ( cls , type ) : return any ( c . __name__ in classnames for c in inspect . getmro ( cls ) ) else : raise TypeError ( "Unexpected type `{}` for class `{}`" . format ( type ( cls ) , cls ) )
Determines if cls is indeed a subclass of classnames
59,445
def write_error ( self , status_code , ** kwargs ) : def get_exc_message ( exception ) : return exception . log_message if hasattr ( exception , "log_message" ) else str ( exception ) self . clear ( ) self . set_status ( status_code ) exception = kwargs [ "exc_info" ] [ 1 ] if any ( isinstance ( exception , c ) for c in [ APIError , ValidationError ] ) : if isinstance ( exception , ValidationError ) : self . set_status ( 400 ) self . fail ( get_exc_message ( exception ) ) else : self . error ( message = self . _reason , data = get_exc_message ( exception ) if self . settings . get ( "debug" ) else None , code = status_code )
Override of RequestHandler . write_error
59,446
def gen_submodule_names ( package ) : for importer , modname , ispkg in pkgutil . walk_packages ( path = package . __path__ , prefix = package . __name__ + '.' , onerror = lambda x : None ) : yield modname
Walk package and yield names of all submodules
59,447
def get_module_routes ( module_name , custom_routes = None , exclusions = None , arg_pattern = r'(?P<{}>[a-zA-Z0-9_\-]+)' ) : def has_method ( module , cls_name , method_name ) : return all ( [ method_name in vars ( getattr ( module , cls_name ) ) , is_method ( reduce ( getattr , [ module , cls_name , method_name ] ) ) ] ) def yield_args ( module , cls_name , method_name ) : wrapped_method = reduce ( getattr , [ module , cls_name , method_name ] ) method = extract_method ( wrapped_method ) argspec_args = getattr ( method , "__argspec_args" , inspect . getargspec ( method ) . args ) return [ a for a in argspec_args if a not in [ "self" ] ] def generate_auto_route ( module , module_name , cls_name , method_name , url_name ) : def get_handler_name ( ) : if url_name == "__self__" : if cls_name . lower ( ) . endswith ( 'handler' ) : return cls_name . lower ( ) . replace ( 'handler' , '' , 1 ) return cls_name . lower ( ) else : return url_name def get_arg_route ( ) : if yield_args ( module , cls_name , method_name ) : return "/{}/?$" . format ( "/" . join ( [ arg_pattern . format ( argname ) for argname in yield_args ( module , cls_name , method_name ) ] ) ) return r"/?" return "/{}/{}{}" . format ( "/" . join ( module_name . split ( "." ) [ 1 : ] ) , get_handler_name ( ) , get_arg_route ( ) ) if not custom_routes : custom_routes = [ ] if not exclusions : exclusions = [ ] module = importlib . import_module ( module_name ) custom_routes_s = [ c . __name__ for r , c in custom_routes ] rhs = { cls_name : cls for ( cls_name , cls ) in inspect . getmembers ( module , inspect . isclass ) } auto_routes = list ( chain ( * [ list ( set ( chain ( * [ [ ( generate_auto_route ( module , module_name , cls_name , method_name , url_name ) , getattr ( module , cls_name ) ) for url_name in getattr ( module , cls_name ) . __url_names__ ] + [ ( url , getattr ( module , cls_name ) ) for url in getattr ( module , cls_name ) . __urls__ ] for method_name in HTTP_METHODS if has_method ( module , cls_name , method_name ) ] ) ) ) for cls_name , cls in rhs . items ( ) if is_handler_subclass ( cls ) and cls_name not in ( custom_routes_s + exclusions ) ] ) ) routes = auto_routes + custom_routes return routes
Create and return routes for module_name
59,448
def coroutine ( func , replace_callback = True ) : if TORNADO_MAJOR != 4 : wrapper = gen . coroutine ( func ) else : wrapper = gen . coroutine ( func , replace_callback ) wrapper . __argspec_args = inspect . getargspec ( func ) . args return wrapper
Tornado - JSON compatible wrapper for tornado . gen . coroutine
59,449
def main ( ) : arg_parse = setup_argparse ( ) args = arg_parse . parse_args ( ) if not args . quiet : print ( 'GNS3 Topology Converter' ) if args . debug : logging_level = logging . DEBUG else : logging_level = logging . WARNING logging . basicConfig ( level = logging_level , format = LOG_MSG_FMT , datefmt = LOG_DATE_FMT ) logging . getLogger ( __name__ ) if args . topology == 'topology.net' : args . topology = os . path . join ( os . getcwd ( ) , 'topology.net' ) topology_files = [ { 'file' : topology_abspath ( args . topology ) , 'snapshot' : False } ] topology_files . extend ( get_snapshots ( args . topology ) ) topology_name = name ( args . topology , args . name ) for topology in topology_files : do_conversion ( topology , topology_name , args . output , args . debug )
Entry point for gns3 - converter
59,450
def setup_argparse ( ) : parser = argparse . ArgumentParser ( description = 'Convert old ini-style GNS3 topologies (<=0.8.7) to ' 'the newer version 1+ JSON format' ) parser . add_argument ( '--version' , action = 'version' , version = '%(prog)s ' + __version__ ) parser . add_argument ( '-n' , '--name' , help = 'Topology name (default uses the ' 'name of the old project ' 'directory)' ) parser . add_argument ( '-o' , '--output' , help = 'Output directory' ) parser . add_argument ( 'topology' , nargs = '?' , default = 'topology.net' , help = 'GNS3 .net topology file (default: topology.net)' ) parser . add_argument ( '--debug' , help = 'Enable debugging output' , action = 'store_true' ) parser . add_argument ( '-q' , '--quiet' , help = 'Quiet-mode (no output to console)' , action = 'store_true' ) return parser
Setup the argparse argument parser
59,451
def do_conversion ( topology_def , topology_name , output_dir = None , debug = False , quiet = False ) : gns3_conv = Converter ( topology_def [ 'file' ] , debug ) old_top = gns3_conv . read_topology ( ) new_top = JSONTopology ( ) ( topology ) = gns3_conv . process_topology ( old_top ) new_top . nodes = gns3_conv . generate_nodes ( topology ) new_top . links = gns3_conv . generate_links ( new_top . nodes ) new_top . notes = gns3_conv . generate_notes ( topology [ 'artwork' ] [ 'NOTE' ] ) new_top . shapes = gns3_conv . generate_shapes ( topology [ 'artwork' ] [ 'SHAPE' ] ) new_top . images = gns3_conv . generate_images ( topology [ 'artwork' ] [ 'PIXMAP' ] ) new_top . name = topology_name save ( output_dir , gns3_conv , new_top , topology_def [ 'snapshot' ] , quiet )
Convert the topology
59,452
def get_snapshots ( topology ) : snapshots = [ ] snap_dir = os . path . join ( topology_dirname ( topology ) , 'snapshots' ) if os . path . exists ( snap_dir ) : snaps = os . listdir ( snap_dir ) for directory in snaps : snap_top = os . path . join ( snap_dir , directory , 'topology.net' ) if os . path . exists ( snap_top ) : snapshots . append ( { 'file' : snap_top , 'snapshot' : True } ) return snapshots
Return the paths of any snapshot topologies
59,453
def name ( topology_file , topology_name = None ) : if topology_name is not None : logging . debug ( 'topology name supplied' ) topo_name = topology_name else : logging . debug ( 'topology name not supplied' ) topo_name = os . path . basename ( topology_dirname ( topology_file ) ) return topo_name
Calculate the name to save the converted topology as using either either a specified name or the directory name of the current project
59,454
def snapshot_name ( topo_name ) : topo_name = os . path . basename ( topology_dirname ( topo_name ) ) snap_re = re . compile ( '^topology_(.+)(_snapshot_)(\d{6}_\d{6})$' ) result = snap_re . search ( topo_name ) if result is not None : snap_name = result . group ( 1 ) + '_' + result . group ( 3 ) else : raise ConvertError ( 'Unable to get snapshot name' ) return snap_name
Get the snapshot name
59,455
def save ( output_dir , converter , json_topology , snapshot , quiet ) : try : old_topology_dir = topology_dirname ( converter . topology ) if output_dir : output_dir = os . path . abspath ( output_dir ) else : output_dir = os . getcwd ( ) topology_name = json_topology . name topology_files_dir = os . path . join ( output_dir , topology_name + '-files' ) if snapshot : snap_name = snapshot_name ( converter . topology ) output_dir = os . path . join ( topology_files_dir , 'snapshots' , snap_name ) topology_files_dir = os . path . join ( output_dir , topology_name + '-files' ) if not os . path . exists ( output_dir ) : os . makedirs ( output_dir ) config_err = copy_configs ( converter . configs , old_topology_dir , topology_files_dir ) copy_vpcs_configs ( old_topology_dir , topology_files_dir ) copy_topology_image ( old_topology_dir , output_dir ) if not snapshot : copy_instructions ( old_topology_dir , output_dir ) image_err = copy_images ( converter . images , old_topology_dir , topology_files_dir ) make_vbox_dirs ( json_topology . get_vboxes ( ) , output_dir , topology_name ) make_qemu_dirs ( json_topology . get_qemus ( ) , output_dir , topology_name ) if config_err : logging . warning ( 'Some router startup configurations could not be ' 'found to be copied to the new topology' ) if image_err : logging . warning ( 'Some images could not be found to be copied to ' 'the new topology' ) filename = '%s.gns3' % topology_name file_path = os . path . join ( output_dir , filename ) with open ( file_path , 'w' ) as file : json . dump ( json_topology . get_topology ( ) , file , indent = 4 , sort_keys = True ) if not snapshot and not quiet : print ( 'Your topology has been converted and can found in:\n' ' %s' % output_dir ) except OSError as error : logging . error ( error )
Save the converted topology
59,456
def copy_configs ( configs , source , target ) : config_err = False if len ( configs ) > 0 : config_dir = os . path . join ( target , 'dynamips' , 'configs' ) os . makedirs ( config_dir ) for config in configs : old_config_file = os . path . join ( source , config [ 'old' ] ) new_config_file = os . path . join ( config_dir , os . path . basename ( config [ 'new' ] ) ) if os . path . isfile ( old_config_file ) : shutil . copy ( old_config_file , new_config_file ) else : config_err = True logging . error ( 'Unable to find %s' % config [ 'old' ] ) return config_err
Copy dynamips configs to converted topology
59,457
def copy_vpcs_configs ( source , target ) : vpcs_files = glob . glob ( os . path . join ( source , 'configs' , '*.vpc' ) ) vpcs_hist = os . path . join ( source , 'configs' , 'vpcs.hist' ) vpcs_config_path = os . path . join ( target , 'vpcs' , 'multi-host' ) if os . path . isfile ( vpcs_hist ) : vpcs_files . append ( vpcs_hist ) if len ( vpcs_files ) > 0 : os . makedirs ( vpcs_config_path ) for old_file in vpcs_files : new_file = os . path . join ( vpcs_config_path , os . path . basename ( old_file ) ) shutil . copy ( old_file , new_file )
Copy any VPCS configs to the converted topology
59,458
def copy_topology_image ( source , target ) : files = glob . glob ( os . path . join ( source , '*.png' ) ) for file in files : shutil . copy ( file , target )
Copy any images of the topology to the converted topology
59,459
def copy_images ( images , source , target ) : image_err = False if len ( images ) > 0 : images_dir = os . path . join ( target , 'images' ) os . makedirs ( images_dir ) for image in images : if os . path . isabs ( image ) : old_image_file = image else : old_image_file = os . path . join ( source , image ) new_image_file = os . path . join ( images_dir , os . path . basename ( image ) ) if os . path . isfile ( os . path . abspath ( old_image_file ) ) : shutil . copy ( old_image_file , new_image_file ) else : image_err = True logging . error ( 'Unable to find %s' % old_image_file ) return image_err
Copy images to converted topology
59,460
def make_vbox_dirs ( max_vbox_id , output_dir , topology_name ) : if max_vbox_id is not None : for i in range ( 1 , max_vbox_id + 1 ) : vbox_dir = os . path . join ( output_dir , topology_name + '-files' , 'vbox' , 'vm-%s' % i ) os . makedirs ( vbox_dir )
Create VirtualBox working directories if required
59,461
def make_qemu_dirs ( max_qemu_id , output_dir , topology_name ) : if max_qemu_id is not None : for i in range ( 1 , max_qemu_id + 1 ) : qemu_dir = os . path . join ( output_dir , topology_name + '-files' , 'qemu' , 'vm-%s' % i ) os . makedirs ( qemu_dir )
Create Qemu VM working directories if required
59,462
def add_wic ( self , old_wic , wic ) : new_wic = 'wic' + old_wic [ - 1 ] self . node [ 'properties' ] [ new_wic ] = wic
Convert the old style WIC slot to a new style WIC slot and add the WIC to the node properties
59,463
def add_slot_ports ( self , slot ) : slot_nb = int ( slot [ 4 ] ) slot_adapter = self . node [ 'properties' ] [ slot ] num_ports = ADAPTER_MATRIX [ slot_adapter ] [ 'ports' ] port_type = ADAPTER_MATRIX [ slot_adapter ] [ 'type' ] ports = [ ] for i in range ( num_ports ) : port_name = PORT_TYPES [ port_type ] + '%s/%s' % ( slot_nb , i ) port_temp = { 'name' : port_name , 'id' : self . port_id , 'port_number' : i , 'slot_number' : slot_nb } ports . append ( port_temp ) self . port_id += 1 self . node [ 'ports' ] . extend ( ports )
Add the ports to be added for a adapter card
59,464
def add_info_from_hv ( self ) : if 'image' in self . hypervisor : self . node [ 'properties' ] [ 'image' ] = os . path . basename ( self . hypervisor [ 'image' ] ) if 'idlepc' in self . hypervisor : self . node [ 'properties' ] [ 'idlepc' ] = self . hypervisor [ 'idlepc' ] if 'ram' in self . hypervisor : self . node [ 'properties' ] [ 'ram' ] = self . hypervisor [ 'ram' ] if 'npe' in self . hypervisor : self . device_info [ 'npe' ] = self . hypervisor [ 'npe' ] if 'chassis' in self . hypervisor : self . device_info [ 'chassis' ] = self . hypervisor [ 'chassis' ] if self . device_info [ 'model' ] == 'c3600' : self . node [ 'properties' ] [ 'chassis' ] = self . device_info [ 'chassis' ]
Add the information we need from the old hypervisor section
59,465
def add_device_items ( self , item , device ) : if item in ( 'aux' , 'console' ) : self . node [ 'properties' ] [ item ] = device [ item ] elif item . startswith ( 'slot' ) : self . node [ 'properties' ] [ item ] = device [ item ] elif item == 'connections' : self . connections = device [ item ] elif INTERFACE_RE . search ( item ) or VBQ_INT_RE . search ( item ) : self . interfaces . append ( { 'from' : item , 'to' : device [ item ] } ) elif NUMBER_RE . search ( item ) : if self . device_info [ 'type' ] == 'EthernetSwitch' : self . calc_ethsw_port ( item , device [ item ] ) elif self . device_info [ 'type' ] == 'FrameRelaySwitch' : self . calc_frsw_port ( item , device [ item ] ) elif MAPINT_RE . search ( item ) : self . add_mapping ( ( item , device [ item ] ) ) elif item == 'cnfg' : new_config = os . path . join ( 'configs' , 'i%s_startup-config.cfg' % self . node [ 'id' ] ) self . node [ 'properties' ] [ 'startup_config' ] = new_config self . config . append ( { 'old' : fix_path ( device [ item ] ) , 'new' : new_config } ) elif item . startswith ( 'wic' ) : self . add_wic ( item , device [ item ] ) elif item == 'symbol' : self . set_symbol ( device [ item ] ) elif item == 'nics' : self . node [ 'properties' ] [ 'adapters' ] = device [ item ] elif item == 'image' : self . node [ 'properties' ] [ 'vmname' ] = device [ item ] elif item == 'vbox_id' or item == 'qemu_id' : self . node [ item ] = device [ item ]
Add the various items from the device to the node
59,466
def add_to_virtualbox ( self ) : if 'vmname' not in self . node [ 'properties' ] : self . node [ 'properties' ] [ 'vmname' ] = self . hypervisor [ 'VBoxDevice' ] [ 'image' ] if 'adapters' not in self . node [ 'properties' ] : self . node [ 'properties' ] [ 'adapters' ] = self . hypervisor [ 'VBoxDevice' ] [ 'nics' ] if 'console' not in self . node [ 'properties' ] : self . node [ 'properties' ] [ 'console' ] = self . base_ports [ 'vbox_console' ] + self . node [ 'vbox_id' ] - 1
Add additional parameters that were in the VBoxDevice section or not present
59,467
def add_to_qemu ( self ) : device = self . device_info [ 'ext_conf' ] node_prop = self . node [ 'properties' ] hv_device = self . hypervisor [ device ] if 'hda_disk_image' not in node_prop : if 'image' in hv_device : node_prop [ 'hda_disk_image' ] = hv_device [ 'image' ] elif 'image1' in hv_device : node_prop [ 'hda_disk_image' ] = hv_device [ 'image1' ] if 'hdb_disk_image' not in node_prop and 'image2' in hv_device : node_prop [ 'hdb_disk_image' ] = hv_device [ 'image2' ] if 'ram' not in node_prop and 'ram' in hv_device : node_prop [ 'ram' ] = hv_device [ 'ram' ] else : node_prop [ 'ram' ] = 256 if 'options' not in node_prop and 'options' in hv_device : node_prop [ 'options' ] = hv_device [ 'options' ] if 'kernel_image' not in node_prop and 'kernel' in hv_device : node_prop [ 'kernel_image' ] = hv_device [ 'kernel' ] if 'kernel_command_line' not in node_prop and 'kernel_cmdline' in hv_device : node_prop [ 'kernel_command_line' ] = hv_device [ 'kernel_cmdline' ] if 'initrd' not in node_prop and 'initrd' in hv_device : node_prop [ 'initrd' ] = hv_device [ 'initrd' ] if 'adapters' not in node_prop and 'nics' in hv_device : node_prop [ 'adapters' ] = hv_device [ 'nics' ] elif 'adapters' not in node_prop and 'nics' not in hv_device : node_prop [ 'adapters' ] = 6 if 'adapter_type' not in node_prop and 'netcard' in hv_device : node_prop [ 'adapter_type' ] = hv_device [ 'netcard' ] if 'console' not in node_prop : node_prop [ 'console' ] = self . base_ports [ 'qemu_console' ] + self . node [ 'qemu_id' ] - 1 if 'qemu_path' not in node_prop : qemu_path = self . hypervisor [ 'qemu_path' ] if 'flavor' in hv_device : qemu_path = re . sub ( r'qemu-system-.*' , 'qemu-system' + hv_device [ 'flavor' ] , qemu_path ) node_prop [ 'qemu_path' ] = qemu_path
Add additional parameters to a QemuVM Device that were present in its global conf section
59,468
def add_vm_ethernet_ports ( self ) : for i in range ( self . node [ 'properties' ] [ 'adapters' ] ) : port = { 'id' : self . port_id , 'name' : 'Ethernet%s' % i , 'port_number' : i } self . node [ 'ports' ] . append ( port ) self . port_id += 1
Add ethernet ports to Virtualbox and Qemu nodes
59,469
def set_qemu_symbol ( self ) : valid_devices = { 'ASA' : 'asa' , 'PIX' : 'PIX_firewall' , 'JUNOS' : 'router' , 'IDS' : 'ids' } if self . device_info [ 'from' ] in valid_devices and 'default_symbol' not in self . node and 'hover_symbol' not in self . node : self . set_symbol ( valid_devices [ self . device_info [ 'from' ] ] )
Set the appropriate symbol for QEMU Devices
59,470
def set_symbol ( self , symbol ) : if symbol == 'EtherSwitch router' : symbol = 'multilayer_switch' elif symbol == 'Host' : symbol = 'computer' normal = ':/symbols/%s.normal.svg' % symbol selected = ':/symbols/%s.selected.svg' % symbol self . node [ 'default_symbol' ] = normal self . node [ 'hover_symbol' ] = selected
Set a symbol for a device
59,471
def calc_ethsw_port ( self , port_num , port_def ) : port_def = port_def . split ( ' ' ) if len ( port_def ) == 4 : destination = { 'device' : port_def [ 2 ] , 'port' : port_def [ 3 ] } else : destination = { 'device' : 'NIO' , 'port' : port_def [ 2 ] } port = { 'id' : self . port_id , 'name' : str ( port_num ) , 'port_number' : int ( port_num ) , 'type' : port_def [ 0 ] , 'vlan' : int ( port_def [ 1 ] ) } self . node [ 'ports' ] . append ( port ) self . calc_link ( self . node [ 'id' ] , self . port_id , port [ 'name' ] , destination ) self . port_id += 1
Split and create the port entry for an Ethernet Switch
59,472
def calc_mb_ports ( self ) : model = self . device_info [ 'model' ] chassis = self . device_info [ 'chassis' ] num_ports = MODEL_MATRIX [ model ] [ chassis ] [ 'ports' ] ports = [ ] if num_ports > 0 : port_type = MODEL_MATRIX [ model ] [ chassis ] [ 'type' ] for i in range ( num_ports ) : port_temp = { 'name' : PORT_TYPES [ port_type ] + '0/' + str ( i ) , 'id' : self . port_id , 'port_number' : i , 'slot_number' : 0 } ports . append ( port_temp ) self . port_id += 1 self . node [ 'ports' ] . extend ( ports )
Add the default ports to add to a router
59,473
def calc_link ( self , src_id , src_port , src_port_name , destination ) : if destination [ 'device' ] == 'NIO' : destination [ 'port' ] = destination [ 'port' ] . lower ( ) link = { 'source_node_id' : src_id , 'source_port_id' : src_port , 'source_port_name' : src_port_name , 'source_dev' : self . node [ 'properties' ] [ 'name' ] , 'dest_dev' : destination [ 'device' ] , 'dest_port' : destination [ 'port' ] } self . links . append ( link )
Add a link item for processing later
59,474
def set_description ( self ) : if self . device_info [ 'type' ] == 'Router' : self . node [ 'description' ] = '%s %s' % ( self . device_info [ 'type' ] , self . device_info [ 'model' ] ) else : self . node [ 'description' ] = self . device_info [ 'desc' ]
Set the node description
59,475
def set_type ( self ) : if self . device_info [ 'type' ] == 'Router' : self . node [ 'type' ] = self . device_info [ 'model' ] . upper ( ) else : self . node [ 'type' ] = self . device_info [ 'type' ]
Set the node type
59,476
def calc_device_links ( self ) : for connection in self . interfaces : int_type = connection [ 'from' ] [ 0 ] int_name = connection [ 'from' ] . replace ( int_type , PORT_TYPES [ int_type . upper ( ) ] ) src_port = None for port in self . node [ 'ports' ] : if int_name == port [ 'name' ] : src_port = port [ 'id' ] break dest_temp = connection [ 'to' ] . split ( ' ' ) if len ( dest_temp ) == 2 : conn_to = { 'device' : dest_temp [ 0 ] , 'port' : dest_temp [ 1 ] } else : conn_to = { 'device' : 'NIO' , 'port' : dest_temp [ 0 ] } self . calc_link ( self . node [ 'id' ] , src_port , int_name , conn_to )
Calculate a router or VirtualBox link
59,477
def calc_cloud_connection ( self ) : self . node [ 'properties' ] [ 'nios' ] = [ ] if self . connections is None : return None else : self . connections = self . connections . split ( ' ' ) for connection in sorted ( self . connections ) : connection = connection . split ( ':' ) connection_len = len ( connection ) if connection_len == 4 : nio = '%s:%s' % ( connection [ 2 ] , connection [ 3 ] ) elif connection_len == 6 : nio = '%s:%s:%s:%s' % ( connection [ 2 ] . lower ( ) , connection [ 3 ] , connection [ 4 ] , connection [ 5 ] ) else : return RuntimeError ( 'Error: Unknown connection string length ' '(Length: %s)' % connection_len ) self . node [ 'properties' ] [ 'nios' ] . append ( nio ) self . node [ 'ports' ] . append ( { 'id' : self . port_id , 'name' : nio , 'stub' : True } ) self . port_id += 1 return None
Add the ports and nios for a cloud connection
59,478
def process_mappings ( self ) : for mapping_a in self . mappings : for mapping_b in self . mappings : if mapping_a [ 'source' ] == mapping_b [ 'dest' ] : self . mappings . remove ( mapping_b ) break self . node [ 'properties' ] [ 'mappings' ] = { } mappings = self . node [ 'properties' ] [ 'mappings' ] for mapping in self . mappings : mappings [ mapping [ 'source' ] ] = mapping [ 'dest' ]
Process the mappings for a Frame Relay switch . Removes duplicates and adds the mappings to the node properties
59,479
def fix_path ( path ) : if '\\' in path : path = path . replace ( '\\' , '/' ) path = os . path . normpath ( path ) return path
Fix windows path s . Linux path s will remain unaltered
59,480
def read_topology ( self ) : configspec = resource_stream ( __name__ , 'configspec' ) try : handle = open ( self . _topology ) handle . close ( ) try : config = ConfigObj ( self . _topology , configspec = configspec , raise_errors = True , list_values = False , encoding = 'utf-8' ) except SyntaxError : logging . error ( 'Error loading .net file' ) sys . exit ( 1 ) except IOError : logging . error ( 'Cannot open topology file' ) sys . exit ( 1 ) vtor = Validator ( ) res = config . validate ( vtor , preserve_errors = True ) if res : logging . debug ( 'Validation passed' ) elif not res : for entry in flatten_errors ( config , res ) : ( section_list , key , error ) = entry if key is not None : section_list . append ( key ) else : section_list . append ( '[missing section]' ) section_string = ', ' . join ( section_list ) if error is False : error = 'Missing value or section' print ( section_string , ' = ' , error ) input ( 'Press ENTER to continue' ) sys . exit ( 1 ) configspec . close ( ) return config
Read the ini - style topology file using ConfigObj
59,481
def process_topology ( self , old_top ) : sections = self . get_sections ( old_top ) topo = LegacyTopology ( sections , old_top ) for instance in sorted ( sections ) : if instance . startswith ( 'vbox' ) or instance . startswith ( 'qemu' ) : if instance . startswith ( 'qemu' ) and 'qemupath' in old_top [ instance ] : topo . add_qemu_path ( instance ) for device in EXTRA_CONF : try : if isinstance ( old_top [ instance ] [ device ] , dict ) : topo . add_conf_item ( instance , device ) old_top [ instance ] . pop ( device ) except KeyError : pass for item in sorted ( old_top [ instance ] ) : if isinstance ( old_top [ instance ] [ item ] , dict ) : if item in MODEL_TRANSFORM : topo . add_conf_item ( instance , item ) elif instance == 'GNS3-DATA' and ( item . startswith ( 'SHAPE' ) or item . startswith ( 'NOTE' ) or item . startswith ( 'PIXMAP' ) ) : topo . add_artwork_item ( instance , item ) else : topo . add_physical_item ( instance , item ) return topo . topology
Processes the sections returned by get_instances
59,482
def generate_links ( self , nodes ) : new_links = [ ] for link in self . links : if INTERFACE_RE . search ( link [ 'dest_port' ] ) or VBQ_INT_RE . search ( link [ 'dest_port' ] ) : int_type = link [ 'dest_port' ] [ 0 ] dest_port = link [ 'dest_port' ] . replace ( int_type , PORT_TYPES [ int_type . upper ( ) ] ) else : dest_port = link [ 'dest_port' ] dest_details = self . convert_destination_to_id ( link [ 'dest_dev' ] , dest_port , nodes ) desc = 'Link from %s port %s to %s port %s' % ( link [ 'source_dev' ] , link [ 'source_port_name' ] , dest_details [ 'name' ] , dest_port ) new_links . append ( { 'description' : desc , 'destination_node_id' : dest_details [ 'id' ] , 'destination_port_id' : dest_details [ 'pid' ] , 'source_port_id' : link [ 'source_port_id' ] , 'source_node_id' : link [ 'source_node_id' ] } ) link_id = 1 for link in new_links : t_link = str ( link [ 'source_node_id' ] ) + ':' + str ( link [ 'source_port_id' ] ) for link2 in new_links : d_link = str ( link2 [ 'destination_node_id' ] ) + ':' + str ( link2 [ 'destination_port_id' ] ) if t_link == d_link : new_links . remove ( link2 ) break link [ 'id' ] = link_id link_id += 1 self . add_node_connection ( link , nodes ) return new_links
Generate a list of links
59,483
def device_id_from_name ( device_name , nodes ) : device_id = None for node in nodes : if device_name == node [ 'properties' ] [ 'name' ] : device_id = node [ 'id' ] break return device_id
Get the device ID when given a device name
59,484
def port_id_from_name ( port_name , device_id , nodes ) : port_id = None for node in nodes : if device_id == node [ 'id' ] : for port in node [ 'ports' ] : if port_name == port [ 'name' ] : port_id = port [ 'id' ] break break return port_id
Get the port ID when given a port name
59,485
def convert_destination_to_id ( destination_node , destination_port , nodes ) : device_id = None device_name = None port_id = None if destination_node != 'NIO' : for node in nodes : if destination_node == node [ 'properties' ] [ 'name' ] : device_id = node [ 'id' ] device_name = destination_node for port in node [ 'ports' ] : if destination_port == port [ 'name' ] : port_id = port [ 'id' ] break break else : for node in nodes : if node [ 'type' ] == 'Cloud' : for port in node [ 'ports' ] : if destination_port . lower ( ) == port [ 'name' ] . lower ( ) : device_id = node [ 'id' ] device_name = node [ 'properties' ] [ 'name' ] port_id = port [ 'id' ] break info = { 'id' : device_id , 'name' : device_name , 'pid' : port_id } return info
Convert a destination to device and port ID
59,486
def get_node_name_from_id ( node_id , nodes ) : node_name = '' for node in nodes : if node [ 'id' ] == node_id : node_name = node [ 'properties' ] [ 'name' ] break return node_name
Get the name of a node when given the node_id
59,487
def get_port_name_from_id ( node_id , port_id , nodes ) : port_name = '' for node in nodes : if node [ 'id' ] == node_id : for port in node [ 'ports' ] : if port [ 'id' ] == port_id : port_name = port [ 'name' ] break return port_name
Get the name of a port for a given node and port ID
59,488
def add_node_connection ( self , link , nodes ) : src_desc = 'connected to %s on port %s' % ( self . get_node_name_from_id ( link [ 'destination_node_id' ] , nodes ) , self . get_port_name_from_id ( link [ 'destination_node_id' ] , link [ 'destination_port_id' ] , nodes ) ) dest_desc = 'connected to %s on port %s' % ( self . get_node_name_from_id ( link [ 'source_node_id' ] , nodes ) , self . get_port_name_from_id ( link [ 'source_node_id' ] , link [ 'source_port_id' ] , nodes ) ) for node in nodes : if node [ 'id' ] == link [ 'source_node_id' ] : for port in node [ 'ports' ] : if port [ 'id' ] == link [ 'source_port_id' ] : port [ 'link_id' ] = link [ 'id' ] port [ 'description' ] = src_desc break elif node [ 'id' ] == link [ 'destination_node_id' ] : for port in node [ 'ports' ] : if port [ 'id' ] == link [ 'destination_port_id' ] : port [ 'link_id' ] = link [ 'id' ] port [ 'description' ] = dest_desc break
Add a connection to a node
59,489
def generate_shapes ( shapes ) : new_shapes = { 'ellipse' : [ ] , 'rectangle' : [ ] } for shape in shapes : tmp_shape = { } for shape_item in shapes [ shape ] : if shape_item != 'type' : tmp_shape [ shape_item ] = shapes [ shape ] [ shape_item ] new_shapes [ shapes [ shape ] [ 'type' ] ] . append ( tmp_shape ) return new_shapes
Generate the shapes for the topology
59,490
def generate_notes ( notes ) : new_notes = [ ] for note in notes : tmp_note = { } for note_item in notes [ note ] : tmp_note [ note_item ] = notes [ note ] [ note_item ] new_notes . append ( tmp_note ) return new_notes
Generate the notes list
59,491
def generate_images ( self , pixmaps ) : new_images = [ ] for image in pixmaps : tmp_image = { } for img_item in pixmaps [ image ] : if img_item == 'path' : path = os . path . join ( 'images' , os . path . basename ( pixmaps [ image ] [ img_item ] ) ) tmp_image [ 'path' ] = fix_path ( path ) self . images . append ( pixmaps [ image ] [ img_item ] ) else : tmp_image [ img_item ] = pixmaps [ image ] [ img_item ] new_images . append ( tmp_image ) return new_images
Generate the images list and store the images to copy
59,492
def add_qemu_path ( self , instance ) : tmp_conf = { 'qemu_path' : self . old_top [ instance ] [ 'qemupath' ] } if len ( self . topology [ 'conf' ] ) == 0 : self . topology [ 'conf' ] . append ( tmp_conf ) else : self . topology [ 'conf' ] [ self . hv_id ] . update ( tmp_conf )
Add the qemu path to the hypervisor conf data
59,493
def add_conf_item ( self , instance , item ) : tmp_conf = { } if item not in EXTRA_CONF : tmp_conf [ 'model' ] = MODEL_TRANSFORM [ item ] for s_item in sorted ( self . old_top [ instance ] [ item ] ) : if self . old_top [ instance ] [ item ] [ s_item ] is not None : tmp_conf [ s_item ] = self . old_top [ instance ] [ item ] [ s_item ] if item in EXTRA_CONF : tmp_conf = { item : tmp_conf } if len ( self . topology [ 'conf' ] ) == 0 : self . topology [ 'conf' ] . append ( tmp_conf ) else : self . topology [ 'conf' ] [ self . hv_id ] . update ( tmp_conf ) else : self . topology [ 'conf' ] . append ( tmp_conf ) self . hv_id = len ( self . topology [ 'conf' ] ) - 1
Add a hypervisor configuration item
59,494
def device_typename ( item ) : dev_type = { 'ROUTER' : { 'from' : 'ROUTER' , 'desc' : 'Router' , 'type' : 'Router' , 'label_x' : 19.5 } , 'QEMU' : { 'from' : 'QEMU' , 'desc' : 'QEMU VM' , 'type' : 'QemuVM' , 'ext_conf' : 'QemuDevice' , 'label_x' : - 12 } , 'ASA' : { 'from' : 'ASA' , 'desc' : 'QEMU VM' , 'type' : 'QemuVM' , 'ext_conf' : '5520' , 'label_x' : 2.5 } , 'PIX' : { 'from' : 'PIX' , 'desc' : 'QEMU VM' , 'type' : 'QemuVM' , 'ext_conf' : '525' , 'label_x' : - 12 } , 'JUNOS' : { 'from' : 'JUNOS' , 'desc' : 'QEMU VM' , 'type' : 'QemuVM' , 'ext_conf' : 'O-series' , 'label_x' : - 12 } , 'IDS' : { 'from' : 'IDS' , 'desc' : 'QEMU VM' , 'type' : 'QemuVM' , 'ext_conf' : 'IDS-4215' , 'label_x' : - 12 } , 'VBOX' : { 'from' : 'VBOX' , 'desc' : 'VirtualBox VM' , 'type' : 'VirtualBoxVM' , 'ext_conf' : 'VBoxDevice' , 'label_x' : - 4.5 } , 'FRSW' : { 'from' : 'FRSW' , 'desc' : 'Frame Relay switch' , 'type' : 'FrameRelaySwitch' , 'label_x' : 7.5 } , 'ETHSW' : { 'from' : 'ETHSW' , 'desc' : 'Ethernet switch' , 'type' : 'EthernetSwitch' , 'label_x' : 15.5 } , 'Hub' : { 'from' : 'Hub' , 'desc' : 'Ethernet hub' , 'type' : 'EthernetHub' , 'label_x' : 12.0 } , 'ATMSW' : { 'from' : 'ATMSW' , 'desc' : 'ATM switch' , 'type' : 'ATMSwitch' , 'label_x' : 2.0 } , 'ATMBR' : { 'from' : 'ATMBR' , 'desc' : 'ATMBR' , 'type' : 'ATMBR' } , 'Cloud' : { 'from' : 'Cloud' , 'desc' : 'Cloud' , 'type' : 'Cloud' , 'label_x' : 47.5 } } item_type = item . split ( ' ' ) [ 0 ] name = item . replace ( '%s ' % dev_type [ item_type ] [ 'from' ] , '' ) return name , dev_type [ item_type ]
Convert the old names to new - style names and types
59,495
def get_topology ( self ) : topology = { 'name' : self . _name , 'resources_type' : 'local' , 'topology' : { } , 'type' : 'topology' , 'version' : '1.0' } if self . _links : topology [ 'topology' ] [ 'links' ] = self . _links if self . _nodes : topology [ 'topology' ] [ 'nodes' ] = self . _nodes if self . _servers : topology [ 'topology' ] [ 'servers' ] = self . _servers if self . _notes : topology [ 'topology' ] [ 'notes' ] = self . _notes if self . _shapes [ 'ellipse' ] : topology [ 'topology' ] [ 'ellipses' ] = self . _shapes [ 'ellipse' ] if self . _shapes [ 'rectangle' ] : topology [ 'topology' ] [ 'rectangles' ] = self . _shapes [ 'rectangle' ] if self . _images : topology [ 'topology' ] [ 'images' ] = self . _images return topology
Get the converted topology ready for JSON encoding
59,496
def get_vboxes ( self ) : vbox_list = [ ] vbox_max = None for node in self . nodes : if node [ 'type' ] == 'VirtualBoxVM' : vbox_list . append ( node [ 'vbox_id' ] ) if len ( vbox_list ) > 0 : vbox_max = max ( vbox_list ) return vbox_max
Get the maximum ID of the VBoxes
59,497
def get_qemus ( self ) : qemu_vm_list = [ ] qemu_vm_max = None for node in self . nodes : if node [ 'type' ] == 'QemuVM' : qemu_vm_list . append ( node [ 'qemu_id' ] ) if len ( qemu_vm_list ) > 0 : qemu_vm_max = max ( qemu_vm_list ) return qemu_vm_max
Get the maximum ID of the Qemu VMs
59,498
def getElements ( self , name = '' ) : 'Get a list of child elements' if not name : return self . children else : elements = [ ] for element in self . children : if element . name == name : elements . append ( element ) return elements
Get a list of child elements
59,499
def StartElement ( self , name , attributes ) : 'SAX start element even handler' element = Element ( name . encode ( ) , attributes ) if len ( self . nodeStack ) > 0 : parent = self . nodeStack [ - 1 ] parent . AddChild ( element ) else : self . root = element self . nodeStack . append ( element )
SAX start element even handler