idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
60,400
def _delete_sbo_tar_gz ( self ) : if not self . auto and os . path . isfile ( self . meta . build_path + self . script ) : os . remove ( self . meta . build_path + self . script )
Delete slackbuild tar . gz file after untar
60,401
def _delete_dir ( self ) : if not self . auto and os . path . isdir ( self . meta . build_path + self . prgnam ) : shutil . rmtree ( self . meta . build_path + self . prgnam )
Delete old folder if exists before start build
60,402
def listed ( self ) : print ( "\nPackages in the blacklist:\n" ) for black in self . get_black ( ) : if black : print ( "{0}{1}{2}" . format ( self . meta . color [ "GREEN" ] , black , self . meta . color [ "ENDC" ] ) ) self . quit = True if self . quit : print ( "" )
Print blacklist packages
60,403
def add ( self , pkgs ) : blacklist = self . get_black ( ) pkgs = set ( pkgs ) print ( "\nAdd packages in the blacklist:\n" ) with open ( self . blackfile , "a" ) as black_conf : for pkg in pkgs : if pkg not in blacklist : print ( "{0}{1}{2}" . format ( self . meta . color [ "GREEN" ] , pkg , self . meta . color [ "ENDC" ] ) ) black_conf . write ( pkg + "\n" ) self . quit = True black_conf . close ( ) if self . quit : print ( "" )
Add blacklist packages if not exist
60,404
def remove ( self , pkgs ) : print ( "\nRemove packages from the blacklist:\n" ) with open ( self . blackfile , "w" ) as remove : for line in self . black_conf . splitlines ( ) : if line not in pkgs : remove . write ( line + "\n" ) else : print ( "{0}{1}{2}" . format ( self . meta . color [ "RED" ] , line , self . meta . color [ "ENDC" ] ) ) self . quit = True remove . close ( ) if self . quit : print ( "" )
Remove packages from blacklist
60,405
def packages ( self , pkgs , repo ) : self . black = [ ] for bl in self . get_black ( ) : pr = bl . split ( ":" ) for pkg in pkgs : self . __priority ( pr , repo , pkg ) self . __blackpkg ( bl , repo , pkg ) return self . black
Return packages in blacklist or by repository
60,406
def __priority ( self , pr , repo , pkg ) : if ( pr [ 0 ] == repo and pr [ 1 ] . startswith ( "*" ) and pr [ 1 ] . endswith ( "*" ) ) : if pr [ 1 ] [ 1 : - 1 ] in pkg : self . black . append ( self . __add ( repo , pkg ) ) elif pr [ 0 ] == repo and pr [ 1 ] . endswith ( "*" ) : if pkg . startswith ( pr [ 1 ] [ : - 1 ] ) : self . black . append ( self . __add ( repo , pkg ) ) elif pr [ 0 ] == repo and pr [ 1 ] . startswith ( "*" ) : if pkg . endswith ( pr [ 1 ] [ 1 : ] ) : self . black . append ( self . __add ( repo , pkg ) ) elif pr [ 0 ] == repo and "*" not in pr [ 1 ] : self . black . append ( self . __add ( repo , pkg ) )
Add packages in blacklist by priority
60,407
def __blackpkg ( self , bl , repo , pkg ) : if bl . startswith ( "*" ) and bl . endswith ( "*" ) : if bl [ 1 : - 1 ] in pkg : self . black . append ( self . __add ( repo , pkg ) ) elif bl . endswith ( "*" ) : if pkg . startswith ( bl [ : - 1 ] ) : self . black . append ( self . __add ( repo , pkg ) ) elif bl . startswith ( "*" ) : if pkg . endswith ( bl [ 1 : ] ) : self . black . append ( self . __add ( repo , pkg ) ) if bl not in self . black and "*" not in bl : self . black . append ( bl )
Add packages in blacklist
60,408
def view ( self ) : print ( "" ) description , count = "" , 0 if self . repo == "sbo" : description = SBoGrep ( self . name ) . description ( ) else : PACKAGES_TXT = Utils ( ) . read_file ( self . lib ) for line in PACKAGES_TXT . splitlines ( ) : if line . startswith ( self . name + ":" ) : description += line [ len ( self . name ) + 2 : ] + "\n" count += 1 if count == 11 : break if description : print ( "{0}{1}{2}" . format ( self . COLOR , description , self . meta . color [ "ENDC" ] ) ) else : self . msg . pkg_not_found ( "" , self . name , "No matching" , "\n" ) raise SystemExit ( 1 ) if description and self . repo == "sbo" : print ( "" )
Print package description by repository
60,409
def start ( self ) : dwn_count = 1 self . _directory_prefix ( ) for dwn in self . url : self . file_name = dwn . split ( "/" ) [ - 1 ] . replace ( "%2B" , "+" ) if dwn . startswith ( "file:///" ) : source_dir = dwn [ 7 : - 7 ] . replace ( slack_ver ( ) , "" ) self . _make_tarfile ( self . file_name , source_dir ) self . _check_certificate ( ) print ( "\n[{0}/{1}][ {2}Download{3} ] . format ( dwn_count , len ( self . url ) , self . meta . color [ "GREEN" ] , self . meta . color [ "ENDC" ] , self . file_name ) ) if self . downder in [ "wget" ] : subprocess . call ( "{0} {1} {2}{3} {4}" . format ( self . downder , self . downder_options , self . dir_prefix , self . path , dwn ) , shell = True ) if self . downder in [ "aria2c" ] : subprocess . call ( "{0} {1} {2}{3} {4}" . format ( self . downder , self . downder_options , self . dir_prefix , self . path [ : - 1 ] , dwn ) , shell = True ) elif self . downder in [ "curl" , "http" ] : subprocess . call ( "{0} {1} {2}{3} {4}" . format ( self . downder , self . downder_options , self . path , self . file_name , dwn ) , shell = True ) self . _check_if_downloaded ( ) dwn_count += 1
Download files using wget or other downloader . Optional curl aria2c and httpie
60,410
def _make_tarfile ( self , output_filename , source_dir ) : with tarfile . open ( output_filename , "w:gz" ) as tar : tar . add ( source_dir , arcname = os . path . basename ( source_dir ) )
Create . tar . gz file
60,411
def _directory_prefix ( self ) : if self . downder == "wget" : self . dir_prefix = "--directory-prefix=" elif self . downder == "aria2c" : self . dir_prefix = "--dir="
Downloader options for specific directory
60,412
def _check_if_downloaded ( self ) : if not os . path . isfile ( self . path + self . file_name ) : print ( "" ) self . msg . template ( 78 ) print ( "| Download '{0}' file [ {1}FAILED{2} ]" . format ( self . file_name , self . meta . color [ "RED" ] , self . meta . color [ "ENDC" ] ) ) self . msg . template ( 78 ) print ( "" ) if not self . msg . answer ( ) in [ "y" , "Y" ] : raise SystemExit ( )
Check if file downloaded
60,413
def _check_certificate ( self ) : if ( self . file_name . startswith ( "jdk-" ) and self . repo == "sbo" and self . downder == "wget" ) : certificate = ( ' --no-check-certificate --header="Cookie: ' 'oraclelicense=accept-securebackup-cookie"' ) self . msg . template ( 78 ) print ( "| '{0}' need to go ahead downloading" . format ( certificate [ : 23 ] . strip ( ) ) ) self . msg . template ( 78 ) print ( "" ) self . downder_options += certificate if not self . msg . answer ( ) in [ "y" , "Y" ] : raise SystemExit ( )
Check for certificates options for wget
60,414
def slack_package ( prgnam ) : binaries , cache , binary = [ ] , "0" , "" for pkg in find_package ( prgnam , _meta_ . output ) : if pkg . startswith ( prgnam ) and pkg [ : - 4 ] . endswith ( "_SBo" ) : binaries . append ( pkg ) for bins in binaries : if LooseVersion ( bins ) > LooseVersion ( cache ) : binary = bins cache = binary if not binary : Msg ( ) . build_FAILED ( prgnam ) raise SystemExit ( 1 ) return [ "" . join ( _meta_ . output + binary ) ]
Return maximum binary Slackware package from output directory
60,415
def server ( self ) : try : tar = urllib2 . urlopen ( self . registry ) meta = tar . info ( ) return int ( meta . getheaders ( "Content-Length" ) [ 0 ] ) except ( urllib2 . URLError , IndexError ) : return " "
Returns the size of remote files
60,416
def units ( comp_sum , uncomp_sum ) : compressed = round ( ( sum ( map ( float , comp_sum ) ) / 1024 ) , 2 ) uncompressed = round ( ( sum ( map ( float , uncomp_sum ) ) / 1024 ) , 2 ) comp_unit = uncomp_unit = "Mb" if compressed > 1024 : compressed = round ( ( compressed / 1024 ) , 2 ) comp_unit = "Gb" if uncompressed > 1024 : uncompressed = round ( ( uncompressed / 1024 ) , 2 ) uncomp_unit = "Gb" if compressed < 1 : compressed = sum ( map ( int , comp_sum ) ) comp_unit = "Kb" if uncompressed < 1 : uncompressed = sum ( map ( int , uncomp_sum ) ) uncomp_unit = "Kb" return [ comp_unit , uncomp_unit ] , [ compressed , uncompressed ]
Calculate package size
60,417
def status ( sec ) : if _meta_ . prg_bar in [ "on" , "ON" ] : syms = [ "|" , "/" , "-" , "\\" ] for sym in syms : sys . stdout . write ( "\b{0}{1}{2}" . format ( _meta_ . color [ "GREY" ] , sym , _meta_ . color [ "ENDC" ] ) ) sys . stdout . flush ( ) time . sleep ( float ( sec ) )
Toolbar progressive status
60,418
def continue_to_install ( self ) : if ( self . count_uni > 0 or self . count_upg > 0 or "--download-only" in self . flag or "--rebuild" in self . flag ) : if self . master_packages and self . msg . answer ( ) in [ "y" , "Y" ] : installs , upgraded = self . build_install ( ) if "--download-only" in self . flag : raise SystemExit ( ) self . msg . reference ( installs , upgraded ) write_deps ( self . deps_dict ) delete ( self . build_folder )
Continue to install ?
60,419
def clear_masters ( self ) : self . master_packages = Utils ( ) . remove_dbs ( self . master_packages ) for mas in self . master_packages : if mas in self . dependencies : self . master_packages . remove ( mas )
Clear master slackbuilds if already exist in dependencies or if added to install two or more times
60,420
def matching ( self ) : for sbo in self . package_not_found : for pkg in self . data : if sbo in pkg and pkg not in self . blacklist : self . package_found . append ( pkg )
Return found matching SBo packages
60,421
def sbo_version_source ( self , slackbuilds ) : sbo_versions , sources = [ ] , [ ] for sbo in slackbuilds : status ( 0.02 ) sbo_ver = "{0}-{1}" . format ( sbo , SBoGrep ( sbo ) . version ( ) ) sbo_versions . append ( sbo_ver ) sources . append ( SBoGrep ( sbo ) . source ( ) ) return [ sbo_versions , sources ]
Create sbo name with version
60,422
def one_for_all ( self , deps ) : requires , dependencies = [ ] , [ ] deps . reverse ( ) requires = Utils ( ) . dimensional_list ( deps ) dependencies = Utils ( ) . remove_dbs ( requires ) return dependencies
Because there are dependencies that depend on other dependencies are created lists into other lists . Thus creating this loop create one - dimensional list and remove double packages from dependencies .
60,423
def tag ( self , sbo ) : sbo_name = "-" . join ( sbo . split ( "-" ) [ : - 1 ] ) find = GetFromInstalled ( sbo_name ) . name ( ) if find_package ( sbo , self . meta . pkg_path ) : paint = self . meta . color [ "GREEN" ] self . count_ins += 1 if "--rebuild" in self . flag : self . count_upg += 1 elif sbo_name == find : paint = self . meta . color [ "YELLOW" ] self . count_upg += 1 else : paint = self . meta . color [ "RED" ] self . count_uni += 1 return paint
Tag with color green if package already installed color yellow for packages to upgrade and color red if not installed .
60,424
def select_arch ( self , src ) : arch = self . arch for item in self . unst : if item in src : arch = item return arch
Looks if sources unsupported or untested from arch else select arch .
60,425
def filenames ( self , sources ) : filename = [ ] for src in sources : filename . append ( src . split ( "/" ) [ - 1 ] ) return filename
Return filenames from sources links
60,426
def not_downgrade ( self , prgnam ) : name = "-" . join ( prgnam . split ( "-" ) [ : - 1 ] ) sbo_ver = prgnam . split ( "-" ) [ - 1 ] ins_ver = GetFromInstalled ( name ) . version ( ) [ 1 : ] if not ins_ver : ins_ver = "0" if LooseVersion ( sbo_ver ) < LooseVersion ( ins_ver ) : self . msg . template ( 78 ) print ( "| Package {0} don't downgrade, " "setting by user" . format ( name ) ) self . msg . template ( 78 ) return True
Don t downgrade packages if sbo version is lower than installed
60,427
def sbosrcarsh ( self , prgnam , sbo_link , src_link ) : sources = [ ] name = "-" . join ( prgnam . split ( "-" ) [ : - 1 ] ) category = "{0}/{1}/" . format ( sbo_link . split ( "/" ) [ - 2 ] , name ) for link in src_link : source = link . split ( "/" ) [ - 1 ] sources . append ( "{0}{1}{2}" . format ( self . meta . sbosrcarch_link , category , source ) ) return sources
Alternative repository for sbo sources
60,428
def prog_version ( ) : print ( "Version : {0}\n" "Licence : {1}\n" "Email : {2}\n" "Maintainer: {3}" . format ( _meta_ . __version__ , _meta_ . __license__ , _meta_ . __email__ , _meta_ . __maintainer__ ) )
Print version license and email
60,429
def pkg_not_found ( self , bol , pkg , message , eol ) : print ( "{0}No such package {1}: {2}{3}" . format ( bol , pkg , message , eol ) )
Print message when package not found
60,430
def build_FAILED ( self , prgnam ) : self . template ( 78 ) print ( "| Some error on the package {0} [ {1}FAILED{2} ]" . format ( prgnam , self . meta . color [ "RED" ] , self . meta . color [ "ENDC" ] ) ) self . template ( 78 ) print ( "| See the log file in '{0}/var/log/slpkg/sbo/build_logs{1}' " "directory or read the README file" . format ( self . meta . color [ "CYAN" ] , self . meta . color [ "ENDC" ] ) ) self . template ( 78 ) print ( "" )
Print error message if build failed
60,431
def security_pkg ( self , pkg ) : print ( "" ) self . template ( 78 ) print ( "| {0}{1}*** WARNING ***{2}" ) . format ( " " * 27 , self . meta . color [ "RED" ] , self . meta . color [ "ENDC" ] ) self . template ( 78 ) print ( "| Before proceed with the package '{0}' will you must read\n" "| the README file. You can use the command " "'slpkg -n {1}'" ) . format ( pkg , pkg ) self . template ( 78 ) print ( "" )
Warning message for some special reasons
60,432
def reference ( self , install , upgrade ) : self . template ( 78 ) print ( "| Total {0} {1} installed and {2} {3} upgraded" . format ( len ( install ) , self . pkg ( len ( install ) ) , len ( upgrade ) , self . pkg ( len ( upgrade ) ) ) ) self . template ( 78 ) for installed , upgraded in itertools . izip_longest ( install , upgrade ) : if upgraded : print ( "| Package {0} upgraded successfully" . format ( upgraded ) ) if installed : print ( "| Package {0} installed successfully" . format ( installed ) ) self . template ( 78 ) print ( "" )
Reference list with packages installed and upgraded
60,433
def matching ( self , packages ) : print ( "\nNot found package with the name [ {0}{1}{2} ]. " "Matching packages:\nNOTE: Not dependenc" "ies are resolved\n" . format ( self . meta . color [ "CYAN" ] , "" . join ( packages ) , self . meta . color [ "ENDC" ] ) )
Message for matching packages
60,434
def mirrors ( name , location ) : rel = _meta_ . slack_rel ver = slack_ver ( ) repo = Repo ( ) . slack ( ) if _meta_ . arch == "x86_64" : if rel == "stable" : http = repo + "slackware64-{0}/{1}{2}" . format ( ver , location , name ) else : http = repo + "slackware64-{0}/{1}{2}" . format ( rel , location , name ) elif _meta_ . arch . startswith ( "arm" ) : if rel == "stable" : http = repo + "slackwarearm-{0}/{1}{2}" . format ( ver , location , name ) else : http = repo + "slackwarearm-{0}/{1}{2}" . format ( rel , location , name ) else : if rel == "stable" : http = repo + "slackware-{0}/{1}{2}" . format ( ver , location , name ) else : http = repo + "slackware-{0}/{1}{2}" . format ( rel , location , name ) return http
Select Slackware official mirror packages based architecture and version .
60,435
def select ( self ) : print ( "\nDetected Slackware binary package for installation:\n" ) for pkg in self . packages : print ( " " + pkg . split ( "/" ) [ - 1 ] ) print ( "" ) self . msg . template ( 78 ) print ( "| Choose a Slackware command:" ) self . msg . template ( 78 ) for com in sorted ( self . commands ) : print ( "| {0}{1}{2}) {3}{4}{5}" . format ( self . meta . color [ "RED" ] , com , self . meta . color [ "ENDC" ] , self . meta . color [ "GREEN" ] , self . commands [ com ] , self . meta . color [ "ENDC" ] ) ) self . msg . template ( 78 ) try : self . choice = raw_input ( " > " ) except EOFError : print ( "" ) raise SystemExit ( ) if self . choice in self . commands . keys ( ) : sys . stdout . write ( " \x1b[1A{0}{1}{2}\n\n" . format ( self . meta . color [ "CYAN" ] , self . commands [ self . choice ] , self . meta . color [ "ENDC" ] ) ) sys . stdout . flush ( ) self . execute ( )
Select Slackware command
60,436
def execute ( self ) : if self . choice in self . commands . keys ( ) : if self . choice == "i" : PackageManager ( self . packages ) . install ( "" ) elif self . choice in [ "u" , "r" ] : PackageManager ( self . packages ) . upgrade ( self . commands [ self . choice ] [ 11 : ] )
Execute Slackware command
60,437
def read_enabled ( self ) : for line in self . conf . splitlines ( ) : line = line . lstrip ( ) if self . tag in line : self . tag_line = True if ( line and self . tag_line and not line . startswith ( "#" ) and self . tag not in line ) : self . enabled . append ( line ) self . tag_line = False
Read enable repositories
60,438
def read_disabled ( self ) : for line in self . conf . splitlines ( ) : line = line . lstrip ( ) if self . tag in line : self . tag_line = True if self . tag_line and line . startswith ( "#" ) : line = "" . join ( line . split ( "#" ) ) . strip ( ) self . disabled . append ( line ) self . tag_line = False
Read disable repositories
60,439
def update_repos ( self ) : with open ( "{0}{1}" . format ( self . meta . conf_path , self . repositories_conf ) , "w" ) as new_conf : for line in self . conf . splitlines ( ) : line = line . lstrip ( ) if self . tag in line : self . tag_line = True if self . tag_line and line . startswith ( "#" ) : repo = "" . join ( line . split ( "#" ) ) . strip ( ) if repo in self . selected : new_conf . write ( line . replace ( line , repo + "\n" ) ) continue if ( self . tag_line and not line . startswith ( "#" ) and line != self . tag ) : repo = line . strip ( ) if repo not in self . selected : new_conf . write ( line . replace ( line , "# " + line + "\n" ) ) continue new_conf . write ( line + "\n" )
Update repositories . conf file with enabled or disabled repositories
60,440
def reference ( self ) : total_enabled = ", " . join ( self . selected ) if len ( total_enabled ) < 1 : total_enabled = ( "{0}Are you crazy? This is a package " "manager for packages :p{1}" . format ( self . meta . color [ "RED" ] , self . meta . color [ "ENDC" ] ) ) self . msg . template ( 78 ) print ( "| Enabled repositories:" ) self . msg . template ( 78 ) print ( "| {0}" . format ( total_enabled ) ) self . msg . template ( 78 ) print ( "{0}Total {1}/{2} repositories enabled.{3}\n" . format ( self . meta . color [ "GREY" ] , len ( self . selected ) , len ( self . enabled + self . disabled ) , self . meta . color [ "ENDC" ] ) )
Reference enable repositories
60,441
def sbo_search_pkg ( name ) : repo = Repo ( ) . default_repository ( ) [ "sbo" ] sbo_url = "{0}{1}/" . format ( repo , slack_ver ( ) ) SLACKBUILDS_TXT = Utils ( ) . read_file ( _meta_ . lib_path + "sbo_repo/SLACKBUILDS.TXT" ) for line in SLACKBUILDS_TXT . splitlines ( ) : if line . startswith ( "SLACKBUILD LOCATION" ) : sbo_name = ( line [ 23 : ] . split ( "/" ) [ - 1 ] . replace ( "\n" , "" ) ) . strip ( ) if name == sbo_name : return ( sbo_url + line [ 23 : ] . strip ( ) + "/" ) return ""
Search for package path from SLACKBUILDS . TXT file and return url
60,442
def router_main ( self ) : while True : gotpkt = True try : timestamp , dev , pkt = self . net . recv_packet ( timeout = 1.0 ) except NoPackets : log_debug ( "No packets available in recv_packet" ) gotpkt = False except Shutdown : log_debug ( "Got shutdown signal" ) break if gotpkt : log_debug ( "Got a packet: {}" . format ( str ( pkt ) ) )
Main method for router ; we stay in a loop in this method receiving packets until the end of time .
60,443
def find_source_files ( input_path , excludes ) : java_files = [ ] input_path = os . path . normpath ( os . path . abspath ( input_path ) ) for dirpath , dirnames , filenames in os . walk ( input_path ) : if is_excluded ( dirpath , excludes ) : del dirnames [ : ] continue for filename in filenames : if filename . endswith ( ".java" ) : java_files . append ( os . path . join ( dirpath , filename ) ) return java_files
Get a list of filenames for all Java source files within the given directory .
60,444
def from_bytes ( rawbytes ) : ipopts = IPOptionList ( ) i = 0 while i < len ( rawbytes ) : opttype = rawbytes [ i ] optcopied = opttype >> 7 optclass = ( opttype >> 5 ) & 0x03 optnum = opttype & 0x1f optnum = IPOptionNumber ( optnum ) obj = IPOptionClasses [ optnum ] ( ) eaten = obj . from_bytes ( rawbytes [ i : ] ) i += eaten ipopts . append ( obj ) return ipopts
Takes a byte string as a parameter and returns a list of IPOption objects .
60,445
def to_bytes ( self ) : raw = b'' if not self . _options : return raw for ipopt in self . _options : raw += ipopt . to_bytes ( ) padbytes = 4 - ( len ( raw ) % 4 ) raw += b'\x00' * padbytes return raw
Takes a list of IPOption objects and returns a packed byte string of options appropriately padded if necessary .
60,446
def _start_usercode ( entryfunction , netobj , codeargdict ) : numargs = entryfunction . __code__ . co_argcount takenet = numargs >= 1 takestarargs = entryfunction . __code__ . co_flags & 0x04 == 0x04 takekw = entryfunction . __code__ . co_flags & 0x08 == 0x08 args = codeargdict [ 'args' ] kwargs = codeargdict [ 'kwargs' ] if args and not takestarargs : log_warn ( "User code arguments passed on command line, " "but the user code doesn't take arguments" ) if kwargs and not takekw : log_warn ( "User code keyword args passed on command line, " "but the user code doesn't take kwargs" ) if not takenet : raise RuntimeError ( "Your code does not appear to accept at " "least one parameter for the net object" ) if takestarargs and takekw : entryfunction ( netobj , * args , ** kwargs ) elif takestarargs : entryfunction ( netobj , * args ) elif takekw : entryfunction ( netobj , ** kwargs ) else : entryfunction ( netobj )
figure out how to correctly start the user code . warn if args are passed on the command line but the code doesn t accept them .
60,447
def interface_by_name ( self , name ) : if name in self . _devinfo : return self . _devinfo [ name ] raise KeyError ( "No device named {}" . format ( name ) )
Given a device name return the corresponding interface object
60,448
def interface_by_ipaddr ( self , ipaddr ) : ipaddr = IPAddr ( ipaddr ) for devname , iface in self . _devinfo . items ( ) : if iface . ipaddr == ipaddr : return iface raise KeyError ( "No device has IP address {}" . format ( ipaddr ) )
Given an IP address return the interface that owns this address
60,449
def interface_by_macaddr ( self , macaddr ) : macaddr = EthAddr ( macaddr ) for devname , iface in self . _devinfo . items ( ) : if iface . ethaddr == macaddr : return iface raise KeyError ( "No device has MAC address {}" . format ( macaddr ) )
Given a MAC address return the interface that owns this address
60,450
def from_bytes ( rawbytes ) : icmpv6popts = ICMPv6OptionList ( ) i = 0 while i < len ( rawbytes ) : opttype = rawbytes [ i ] optnum = ICMPv6OptionNumber ( opttype ) obj = ICMPv6OptionClasses [ optnum ] ( ) eaten = obj . from_bytes ( rawbytes [ i : ] ) i += eaten icmpv6popts . append ( obj ) return icmpv6popts
Takes a byte string as a parameter and returns a list of ICMPv6Option objects .
60,451
def to_bytes ( self ) : raw = b'' if not self . _options : return raw for icmpv6popt in self . _options : raw += icmpv6popt . to_bytes ( ) return raw
Takes a list of ICMPv6Option objects and returns a packed byte string of options appropriately padded if necessary .
60,452
def _unpack_bitmap ( bitmap , xenum ) : unpacked = set ( ) for enval in xenum : if enval . value & bitmap == enval . value : unpacked . add ( enval ) return unpacked
Given an integer bitmap and an enumerated type build a set that includes zero or more enumerated type values corresponding to the bitmap .
60,453
def _make_wildcard_attr_map ( ) : _xmap = { } for wc in OpenflowWildcard : if not wc . name . endswith ( 'All' ) and not wc . name . endswith ( 'Mask' ) : translated = '' for ch in wc . name : if ch . isupper ( ) : translated += '_' translated += ch . lower ( ) else : translated += ch _xmap [ translated ] = wc return _xmap
Create a dictionary that maps an attribute name in OpenflowMatch with a non - prefix - related wildcard bit from the above OpenflowWildcard enumeration .
60,454
def matches_packet ( self , pkt ) : match = [ ] wildbits = _make_bitmap ( self . _wildcards ) for mf , pkttuple in OpenflowMatch . _match_field_to_packet . items ( ) : mf = "_{}" . format ( mf ) if mf == '_nw_src' or mf == '_nw_dst' : wattr = "{}_wildcard" . format ( mf ) bits = 32 - getattr ( self , wattr ) if bits < 32 : netaddr = ip_network ( "{}/{}" . format ( getattr ( self , mf ) , bits ) , strict = False ) for pktcls , field in pkttuple : if pkt . has_header ( pktcls ) : match . append ( getattr ( pkt [ pktcls ] , field ) in netaddr ) continue elif _wildcard_attr_map [ mf ] . value & wildbits : continue for pktcls , field in pkttuple : if pkt . has_header ( pktcls ) : match . append ( getattr ( pkt [ pktcls ] , field ) == getattr ( self , mf ) ) return all ( match )
Return True if the given packet matches this match object .
60,455
def build_from_packet ( pkt ) : m = OpenflowMatch ( ) for mf , pkttuple in OpenflowMatch . _match_field_to_packet . items ( ) : for pktcls , field in pkttuple : if pkt . has_header ( pktcls ) : setattr ( m , mf , getattr ( pkt [ pktcls ] , field ) ) continue return m
Build and return a new OpenflowMatch object based on the packet object passed as a parameter .
60,456
def pre_serialize ( self , raw , pkt , i ) : self . length = len ( raw ) + OpenflowHeader . _MINLEN
Set length of the header based on
60,457
def discoverdevs ( self ) : if len ( self . _interfaces ) : raise PcapException ( "Device discovery should only be done once." ) ppintf = self . _ffi . new ( "pcap_if_t * *" ) errbuf = self . _ffi . new ( "char []" , 128 ) rv = self . _libpcap . pcap_findalldevs ( ppintf , errbuf ) if rv : raise PcapException ( "pcap_findalldevs returned failure: {}" . format ( self . _ffi . string ( errbuf ) ) ) pintf = ppintf [ 0 ] tmp = pintf pindex = 0 while tmp != self . _ffi . NULL : xname = self . _ffi . string ( tmp . name ) xname = xname . decode ( 'ascii' , 'ignore' ) if self . _windoze : ext_name = "port{}" . format ( pindex ) else : ext_name = xname pindex += 1 if tmp . description == self . _ffi . NULL : xdesc = ext_name else : xdesc = self . _ffi . string ( tmp . description ) xdesc = xdesc . decode ( 'ascii' , 'ignore' ) isloop = ( tmp . flags & 0x1 ) == 0x1 isup = ( tmp . flags & 0x2 ) == 0x2 isrunning = ( tmp . flags & 0x4 ) == 0x4 xif = PcapInterface ( ext_name , xname , xdesc , isloop , isup , isrunning ) self . _interfaces . append ( xif ) tmp = tmp . next self . _libpcap . pcap_freealldevs ( pintf )
Find all the pcap - eligible devices on the local system .
60,458
def set_bpf_filter_on_all_devices ( filterstr ) : with PcapLiveDevice . _lock : for dev in PcapLiveDevice . _OpenDevices . values ( ) : _PcapFfi . instance ( ) . _set_filter ( dev , filterstr )
Long method name but self - explanatory . Set the bpf filter on all devices that have been opened .
60,459
def create_ip_arp_request ( srchw , srcip , targetip ) : ether = Ethernet ( ) ether . src = srchw ether . dst = SpecialEthAddr . ETHER_BROADCAST . value ether . ethertype = EtherType . ARP arp = Arp ( ) arp . operation = ArpOperation . Request arp . senderhwaddr = srchw arp . senderprotoaddr = srcip arp . targethwaddr = SpecialEthAddr . ETHER_BROADCAST . value arp . targetprotoaddr = targetip return ether + arp
Create and return a packet containing an Ethernet header and ARP header .
60,460
def setup_logging ( debug , logfile = None ) : if debug : level = logging . DEBUG else : level = logging . INFO if logfile is not None : logging . basicConfig ( format = "%(asctime)s %(levelname)8s %(message)s" , datefmt = "%H:%M:%S %Y/%m/%d" , level = level , filename = logfile ) else : logging . basicConfig ( format = "%(asctime)s %(levelname)8s %(message)s" , datefmt = "%H:%M:%S %Y/%m/%d" , level = level )
Setup logging format and log level .
60,461
def _spawn_threads ( self ) : for devname , pdev in self . _pcaps . items ( ) : t = threading . Thread ( target = LLNetReal . _low_level_dispatch , args = ( pdev , devname , self . _pktqueue ) ) t . start ( ) self . _threads . append ( t )
Internal method . Creates threads to handle low - level network receive .
60,462
def _make_pcaps ( self ) : self . _pcaps = { } for devname , intf in self . _devinfo . items ( ) : if intf . iftype == InterfaceType . Loopback : senddev = _RawSocket ( devname , protocol = IPProtocol . UDP ) self . _localsend [ devname ] = senddev pdev = PcapLiveDevice ( devname ) self . _pcaps [ devname ] = pdev
Internal method . Create libpcap devices for every network interface we care about and set them in non - blocking mode .
60,463
def _sig_handler ( self , signum , stack ) : log_debug ( "Got SIGINT." ) if signum == signal . SIGINT : LLNetReal . running = False if self . _pktqueue . qsize ( ) == 0 : self . _pktqueue . put ( ( None , None , None ) )
Handle process INT signal .
60,464
def _low_level_dispatch ( pcapdev , devname , pktqueue ) : while LLNetReal . running : pktinfo = pcapdev . recv_packet ( timeout = 0.2 ) if pktinfo is None : continue pktqueue . put ( ( devname , pcapdev . dlt , pktinfo ) ) log_debug ( "Receiver thread for {} exiting" . format ( devname ) ) stats = pcapdev . stats ( ) log_debug ( "Final device statistics {}: {} received, {} dropped, {} dropped/if" . format ( devname , stats . ps_recv , stats . ps_drop , stats . ps_ifdrop ) )
Thread entrypoint for doing low - level receive and dispatch for a single pcap device .
60,465
def __output_see ( self , see ) : if see . startswith ( '<a href' ) : return self . __html_to_rst ( see ) elif '"' in see : return see else : return ':java:ref:`%s`' % ( see . replace ( '#' , '.' ) . replace ( ' ' , '' ) , )
Convert the argument to a
60,466
def compile_type_document ( self , imports_block , package , name , declaration ) : outer_type = name . rpartition ( '.' ) [ 0 ] document = util . Document ( ) document . add ( imports_block ) document . add_heading ( name , '=' ) method_summary = util . StringBuilder ( ) document . add_object ( method_summary ) package_dir = util . Directive ( 'java:package' , package ) package_dir . add_option ( 'noindex' ) document . add_object ( package_dir ) type_dir = self . compile_type ( declaration ) if outer_type : type_dir . add_option ( 'outertype' , outer_type ) document . add_object ( type_dir ) if isinstance ( declaration , javalang . tree . EnumDeclaration ) : enum_constants = list ( declaration . body . constants ) enum_constants . sort ( key = lambda c : c . name ) document . add_heading ( 'Enum Constants' ) for enum_constant in enum_constants : if self . member_headers : document . add_heading ( enum_constant . name , '^' ) c = self . compile_enum_constant ( name , enum_constant ) c . add_option ( 'outertype' , name ) document . add_object ( c ) fields = list ( filter ( self . filter , declaration . fields ) ) if fields : document . add_heading ( 'Fields' , '-' ) fields . sort ( key = lambda f : f . declarators [ 0 ] . name ) for field in fields : if self . member_headers : document . add_heading ( field . declarators [ 0 ] . name , '^' ) f = self . compile_field ( field ) f . add_option ( 'outertype' , name ) document . add_object ( f ) constructors = list ( filter ( self . filter , declaration . constructors ) ) if constructors : document . add_heading ( 'Constructors' , '-' ) constructors . sort ( key = lambda c : c . name ) for constructor in constructors : if self . member_headers : document . add_heading ( constructor . name , '^' ) c = self . compile_constructor ( constructor ) c . add_option ( 'outertype' , name ) document . add_object ( c ) methods = list ( filter ( self . filter , declaration . methods ) ) if methods : document . add_heading ( 'Methods' , '-' ) methods . sort ( key = lambda m : m . name ) for method in methods : if self . member_headers : document . add_heading ( method . name , '^' ) m = self . compile_method ( method ) m . add_option ( 'outertype' , name ) document . add_object ( m ) return document
Compile a complete document documenting a type and its members
60,467
def compile ( self , ast ) : documents = { } imports = util . StringBuilder ( ) for imp in ast . imports : if imp . static or imp . wildcard : continue package_parts = [ ] cls_parts = [ ] for part in imp . path . split ( '.' ) : if cls_parts or part [ 0 ] . isupper ( ) : cls_parts . append ( part ) else : package_parts . append ( part ) if cls_parts == [ ] : cls_parts . append ( package_parts . pop ( ) ) package = '.' . join ( package_parts ) cls = '.' . join ( cls_parts ) imports . append ( util . Directive ( 'java:import' , package + ' ' + cls ) . build ( ) ) import_block = imports . build ( ) if not ast . package : raise ValueError ( 'File must have package declaration' ) package = ast . package . name type_declarations = [ ] for path , node in ast . filter ( javalang . tree . TypeDeclaration ) : if not self . filter ( node ) : continue classes = [ n . name for n in path if isinstance ( n , javalang . tree . TypeDeclaration ) ] classes . append ( node . name ) name = '.' . join ( classes ) type_declarations . append ( ( package , name , node ) ) for package , name , declaration in type_declarations : full_name = package + '.' + name document = self . compile_type_document ( import_block , package , name , declaration ) documents [ full_name ] = ( package , name , document . build ( ) ) return documents
Compile autodocs for the given Java syntax tree . Documents will be returned documenting each separate type .
60,468
def checksum ( data , start = 0 , skip_word = None ) : if len ( data ) % 2 != 0 : arr = array . array ( 'H' , data [ : - 1 ] ) else : arr = array . array ( 'H' , data ) if skip_word is not None : for i in range ( 0 , len ( arr ) ) : if i == skip_word : continue start += arr [ i ] else : for i in range ( 0 , len ( arr ) ) : start += arr [ i ] if len ( data ) % 2 != 0 : start += struct . unpack ( 'H' , data [ - 1 : ] + b'\x00' ) [ 0 ] start = ( start >> 16 ) + ( start & 0xffff ) start += ( start >> 16 ) return ntohs ( ~ start & 0xffff )
Calculate standard internet checksum over data starting at start th byte
60,469
def javadoc_role ( name , rawtext , text , lineno , inliner , options = { } , content = [ ] ) : has_explicit_title , title , target = split_explicit_title ( text ) title = utils . unescape ( title ) target = utils . unescape ( target ) if not has_explicit_title : target = target . lstrip ( '~' ) if title [ 0 ] == '~' : title = title [ 1 : ] . rpartition ( '.' ) [ 2 ] app = inliner . document . settings . env . app ref = get_javadoc_ref ( app , rawtext , target ) if not ref : raise ValueError ( "no Javadoc source found for %s in javadoc_url_map" % ( target , ) ) ref . append ( nodes . Text ( title , title ) ) return [ ref ] , [ ]
Role for linking to external Javadoc
60,470
def add ( self , port , pkt ) : id = len ( self . _buffer ) + 1 if id > self . _buffsize : raise FullBuffer ( ) self . _buffer [ id ] = ( port , deepcopy ( pkt ) ) return id
Add new input port + packet to buffer .
60,471
def _handle_datapath ( self , inport , packet ) : inport = self . _switchyard_net . port_by_name ( inport ) portnum = inport . ifnum log_info ( "Processing packet: {}->{}" . format ( portnum , packet ) ) actions = None for tnum , t in enumerate ( self . _tables ) : actions = t . match_packet ( portnum , packet ) if actions is None : self . _send_packet_in ( portnum , packet ) else : self . _datapath_action ( portnum , packet , actions = actions )
Handle single packet on the data plane .
60,472
def to_bytes ( self ) : header = self . _make_header ( self . _checksum ) return header + self . _options . to_bytes ( )
Return packed byte representation of the TCP header .
60,473
def infer_netmask ( addr ) : addr = int ( addr ) if addr == 0 : return 32 - 32 if ( addr & ( 1 << 31 ) ) == 0 : return 32 - 24 if ( addr & ( 3 << 30 ) ) == 2 << 30 : return 32 - 16 if ( addr & ( 7 << 29 ) ) == 6 << 29 : return 32 - 8 if ( addr & ( 15 << 28 ) ) == 14 << 28 : return 32 - 0 return 32 - 0
Uses network classes to guess the number of network bits
60,474
def isBridgeFiltered ( self ) : return ( ( self . __value [ 0 ] == 0x01 ) and ( self . __value [ 1 ] == 0x80 ) and ( self . __value [ 2 ] == 0xC2 ) and ( self . __value [ 3 ] == 0x00 ) and ( self . __value [ 4 ] == 0x00 ) and ( self . __value [ 5 ] <= 0x0F ) )
Checks if address is an IEEE 802 . 1D MAC Bridge Filtered MAC Group Address
60,475
def toStr ( self , separator = ':' ) : return separator . join ( ( '{:02x}' . format ( x ) for x in self . __value ) )
Returns the address as string consisting of 12 hex chars separated by separator .
60,476
def to_bytes ( self ) : return struct . pack ( Ethernet . _PACKFMT , self . _dst . packed , self . _src . packed , self . _ethertype . value )
Return packed byte representation of the Ethernet header .
60,477
def _init ( ) : if ApplicationLayer . _isinit : return ApplicationLayer . _isinit = True ApplicationLayer . _to_app = { } ApplicationLayer . _from_app = Queue ( )
Internal switchyard static initialization method .
60,478
def recv_from_app ( timeout = _default_timeout ) : try : return ApplicationLayer . _from_app . get ( timeout = timeout ) except Empty : pass raise NoPackets ( )
Called by a network stack implementer to receive application - layer data for sending on to a remote location .
60,479
def send_to_app ( proto , local_addr , remote_addr , data ) : proto = IPProtocol ( proto ) local_addr = _normalize_addrs ( local_addr ) remote_addr = _normalize_addrs ( remote_addr ) xtup = ( proto , local_addr [ 0 ] , local_addr [ 1 ] ) with _lock : sockqueue = ApplicationLayer . _to_app . get ( xtup , None ) if sockqueue is not None : sockqueue . put ( ( local_addr , remote_addr , data ) ) return True local2 = _normalize_addrs ( ( "0.0.0.0" , local_addr [ 1 ] ) ) xtup = ( proto , local2 [ 0 ] , local2 [ 1 ] ) with _lock : sockqueue = ApplicationLayer . _to_app . get ( xtup , None ) if sockqueue is not None : sockqueue . put ( ( local_addr , remote_addr , data ) ) return True log_warn ( "No socket queue found for local proto/address: {}" . format ( xtup ) ) return False
Called by a network stack implementer to push application - layer data up from the stack .
60,480
def _registry_update ( s , oldid ) : with _lock : sock_queue = ApplicationLayer . _to_app . pop ( oldid ) ApplicationLayer . _to_app [ s . _sockid ( ) ] = sock_queue
Internal method used to update an existing socket registry when the socket is re - bound to a different local port number . Requires the socket object and old sockid . Returns None .
60,481
def _unregister_socket ( s ) : with _lock : sock_queue = ApplicationLayer . _to_app . pop ( s . _sockid ( ) ) if not sock_queue . empty ( ) : log_warn ( "Socket being destroyed still has data enqueued for application layer." )
Internal method used to remove the socket from AppLayer registry . Warns if the upward socket queue has any left - over data .
60,482
def bind ( self , address ) : portset = _gather_ports ( ) . union ( ApplicationLayer . _emuports ( ) ) if address [ 1 ] in portset : log_warn ( "Port is already in use." ) return - 1 oldid = self . _sockid ( ) self . _local_addr = _normalize_addrs ( address ) self . __set_fw_rules ( ) ApplicationLayer . _registry_update ( self , oldid ) return 0
Alter the local address with which this socket is associated . The address parameter is a 2 - tuple consisting of an IP address and port number .
60,483
def recv ( self , buffersize , flags = 0 ) : _ , _ , data = self . _recv ( buffersize ) return data
Receive data on the socket . The buffersize and flags arguments are currently ignored . Only returns the data .
60,484
def settimeout ( self , timeout ) : if timeout is None : self . _block = True elif float ( timeout ) == 0.0 : self . _block = False else : self . _timeout = float ( timeout ) self . _block = True
Set the timeout value for this socket .
60,485
def to_bytes ( self ) : return struct . pack ( Arp . _PACKFMT , self . _hwtype . value , self . _prototype . value , self . _hwaddrlen , self . _protoaddrlen , self . _operation . value , self . _senderhwaddr . packed , self . _senderprotoaddr . packed , self . _targethwaddr . packed , self . _targetprotoaddr . packed )
Return packed byte representation of the ARP header .
60,486
def _process_table_cells ( self , table ) : rows = [ ] for i , tr in enumerate ( table . find_all ( 'tr' ) ) : row = [ ] for c in tr . contents : cell_type = getattr ( c , 'name' , None ) if cell_type not in ( 'td' , 'th' ) : continue rowspan = int ( c . attrs . get ( 'rowspan' , 1 ) ) colspan = int ( c . attrs . get ( 'colspan' , 1 ) ) contents = self . _process_children ( c ) . strip ( ) if cell_type == 'th' and i > 0 : contents = self . _inline ( '**' , contents ) row . append ( Cell ( cell_type , rowspan , colspan , contents ) ) rows . append ( row ) return rows
Compile all the table cells .
60,487
def import_or_die ( module_name , entrypoint_names ) : log_debug ( "Importing {}" . format ( module_name ) ) module_name = os . path . abspath ( module_name ) if module_name . endswith ( '.py' ) : module_name , ext = os . path . splitext ( module_name ) modname = os . path . basename ( module_name ) dirname = os . path . dirname ( module_name ) if dirname and dirname not in sys . path : sys . path . append ( dirname ) if modname in sys . modules : user_module = sys . modules . get ( modname ) user_module = importlib . reload ( user_module ) else : try : mypaths = [ x for x in sys . path if ( "Cellar" not in x and "packages" not in x ) ] user_module = importlib . __import__ ( modname ) except ImportError as e : log_failure ( "Fatal error: couldn't import module (error: {}) while executing {}" . format ( str ( e ) , modname ) ) raise ImportError ( e ) if not entrypoint_names : return existing_names = dir ( user_module ) for method in entrypoint_names : if method in existing_names : return getattr ( user_module , method ) if len ( entrypoint_names ) > 1 : entrypoints = "one of {}" . format ( ', ' . join ( entrypoint_names ) ) else : entrypoints = entrypoint_names [ 0 ] raise ImportError ( "Required entrypoint function or symbol ({}) not found in your code" . format ( entrypoints ) )
Import user code ; return reference to usercode function .
60,488
def _runcmd ( progargs , stdinput = None ) : stdin = None if stdinput is not None : assert ( isinstance ( stdinput , list ) ) stdin = PIPE err = 0 output = b'' log_debug ( "Calling {} with input {}" . format ( ' ' . join ( progargs ) , stdinput ) ) try : p = Popen ( progargs , shell = True , stdin = stdin , stderr = STDOUT , stdout = PIPE , universal_newlines = True ) if stdinput is not None : for cmd in stdinput : print ( cmd , file = p . stdin ) p . stdin . close ( ) output = p . stdout . read ( ) p . stdout . close ( ) err = p . wait ( timeout = 1.0 ) except OSError as e : err = e . errno log_warn ( "Error calling {}: {}" . format ( progargs , e . stderror ) ) except Exception as e : errstr = str ( e ) log_warn ( "Error calling {}: {}" . format ( progargs , errstr ) ) err = - 1 log_debug ( "Result of command (errcode {}): {}" . format ( err , output ) ) return err , output
Run the command progargs with optional input to be fed in to stdin .
60,489
def block ( self ) : st , output = _runcmd ( "/sbin/pfctl -aswitchyard -f -" , self . _rules ) log_debug ( "Installing rules: {}" . format ( output ) )
pfctl - a switchyard - f - < rules . txt pfctl - a switchyard - F rules pfctl - t switchyard - F r
60,490
def show_graph ( cn_topo , showintfs = False , showaddrs = False ) : __do_draw ( cn_topo , showintfs = showintfs , showaddrs = showaddrs ) pyp . show ( )
Display the topology
60,491
def save_graph ( cn_topo , filename , showintfs = False , showaddrs = False ) : __do_draw ( cn_topo , showintfs = showintfs , showaddrs = showaddrs ) pyp . savefig ( filename )
Save the topology to an image file
60,492
def load_from_file ( filename ) : t = None with open ( filename , 'rU' ) as infile : tdata = infile . read ( ) t = Topology . unserialize ( tdata ) return t
Load a topology from filename and return it .
60,493
def save_to_file ( cn_topo , filename ) : jstr = cn_topo . serialize ( ) with open ( filename , 'w' ) as outfile : outfile . write ( jstr )
Save a topology to a file .
60,494
def __addNode ( self , name , cls ) : if name in self . nodes : raise Exception ( "A node by the name {} already exists. Can't add a duplicate." . format ( name ) ) self . __nxgraph . add_node ( name ) self . __nxgraph . node [ name ] [ 'label' ] = name self . __nxgraph . node [ name ] [ 'nodeobj' ] = cls ( ) self . __nxgraph . node [ name ] [ 'type' ] = cls . __name__
Add a node to the topology
60,495
def addHost ( self , name = None ) : if name is None : while True : name = 'h' + str ( self . __hnum ) self . __hnum += 1 if name not in self . __nxgraph : break self . __addNode ( name , Host ) return name
Add a new host node to the topology .
60,496
def addLink ( self , node1 , node2 , capacity , delay ) : for n in ( node1 , node2 ) : if not self . __nxgraph . has_node ( n ) : raise Exception ( "No node {} exists for building a link" . format ( n ) ) macs = [ None , None ] if self . __auto_macs : for i in range ( len ( macs ) ) : macstr = '{:012x}' . format ( self . __ifnum ) self . __ifnum += 1 macaddr = ':' . join ( [ macstr [ j : ( j + 2 ) ] for j in range ( 0 , len ( macstr ) , 2 ) ] ) macs [ i ] = macaddr node1if = self . __nxgraph . node [ node1 ] [ 'nodeobj' ] . addInterface ( ethaddr = macs [ 0 ] ) node2if = self . __nxgraph . node [ node2 ] [ 'nodeobj' ] . addInterface ( ethaddr = macs [ 1 ] ) self . __nxgraph . add_edge ( node1 , node2 ) self . __nxgraph [ node1 ] [ node2 ] [ node1 ] = node1if self . __nxgraph [ node1 ] [ node2 ] [ node2 ] = node2if self . setLinkCharacteristics ( node1 , node2 , capacity , delay )
Add a bidirectional link between node1 and node2 with the given capacity and delay to the topology .
60,497
def serialize ( self ) : return json . dumps ( json_graph . node_link_data ( self . __nxgraph ) , cls = Encoder )
Return a JSON string of the serialized topology
60,498
def unserialize ( jsonstr ) : topod = json . loads ( jsonstr ) G = json_graph . node_link_graph ( topod ) for n , ndict in G . nodes ( data = True ) : if 'nodeobj' not in ndict or 'type' not in ndict : raise Exception ( "Required type information is not present in serialized node {} :{}" . format ( n , ndict ) ) nobj = ndict [ 'nodeobj' ] cls = eval ( ndict [ 'type' ] ) ndict [ 'nodeobj' ] = cls ( ** dict ( nobj ) ) t = Topology ( nxgraph = G ) return t
Unserialize a JSON string representation of a topology
60,499
def getInterfaceAddresses ( self , node , interface ) : intf = self . getNode ( node ) [ 'nodeobj' ] . getInterface ( interface ) return intf . ethaddr , intf . ipaddr , intf . netmask
Return the Ethernet and IP + mask addresses assigned to a given interface on a node .