idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
56,600
def get_platform ( ) : tuple_platform = platform . linux_distribution ( ) current_platform = tuple_platform [ 0 ] if "Ubuntu" in current_platform : return "ubuntu" elif "CentOS" in current_platform : return "centos" elif "debian" in current_platform : return "ubuntu" else : raise RuntimeError ( "This module is not supported on {}." . format ( current_platform ) )
Return the current OS platform .
56,601
def current_version_string ( ) : return "{0}.{1}.{2}" . format ( sys . version_info . major , sys . version_info . minor , sys . version_info . micro )
Current system python version as string major . minor . micro
56,602
def get_audits ( ) : if subprocess . call ( [ 'which' , 'mysql' ] , stdout = subprocess . PIPE ) != 0 : log ( "MySQL does not appear to be installed on this node - " "skipping mysql hardening" , level = WARNING ) return [ ] settings = utils . get_settings ( 'mysql' ) hardening_settings = settings [ 'hardening' ] my_cnf = hardening_settings [ 'mysql-conf' ] audits = [ FilePermissionAudit ( paths = [ my_cnf ] , user = 'root' , group = 'root' , mode = 0o0600 ) , TemplatedFile ( hardening_settings [ 'hardening-conf' ] , MySQLConfContext ( ) , TEMPLATES_DIR , mode = 0o0750 , user = 'mysql' , group = 'root' , service_actions = [ { 'service' : 'mysql' , 'actions' : [ 'restart' ] } ] ) , DirectoryPermissionAudit ( '/var/lib/mysql' , user = 'mysql' , group = 'mysql' , recursive = False , mode = 0o755 ) , DirectoryPermissionAudit ( '/etc/mysql' , user = 'root' , group = 'root' , recursive = False , mode = 0o700 ) , ] return audits
Get MySQL hardening config audits .
56,603
def service_reload ( service_name , restart_on_failure = False , ** kwargs ) : service_result = service ( 'reload' , service_name , ** kwargs ) if not service_result and restart_on_failure : service_result = service ( 'restart' , service_name , ** kwargs ) return service_result
Reload a system service optionally falling back to restart if reload fails .
56,604
def service_pause ( service_name , init_dir = "/etc/init" , initd_dir = "/etc/init.d" , ** kwargs ) : stopped = True if service_running ( service_name , ** kwargs ) : stopped = service_stop ( service_name , ** kwargs ) upstart_file = os . path . join ( init_dir , "{}.conf" . format ( service_name ) ) sysv_file = os . path . join ( initd_dir , service_name ) if init_is_systemd ( ) : service ( 'disable' , service_name ) service ( 'mask' , service_name ) elif os . path . exists ( upstart_file ) : override_path = os . path . join ( init_dir , '{}.override' . format ( service_name ) ) with open ( override_path , 'w' ) as fh : fh . write ( "manual\n" ) elif os . path . exists ( sysv_file ) : subprocess . check_call ( [ "update-rc.d" , service_name , "disable" ] ) else : raise ValueError ( "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}" . format ( service_name , upstart_file , sysv_file ) ) return stopped
Pause a system service .
56,605
def service_resume ( service_name , init_dir = "/etc/init" , initd_dir = "/etc/init.d" , ** kwargs ) : upstart_file = os . path . join ( init_dir , "{}.conf" . format ( service_name ) ) sysv_file = os . path . join ( initd_dir , service_name ) if init_is_systemd ( ) : service ( 'unmask' , service_name ) service ( 'enable' , service_name ) elif os . path . exists ( upstart_file ) : override_path = os . path . join ( init_dir , '{}.override' . format ( service_name ) ) if os . path . exists ( override_path ) : os . unlink ( override_path ) elif os . path . exists ( sysv_file ) : subprocess . check_call ( [ "update-rc.d" , service_name , "enable" ] ) else : raise ValueError ( "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}" . format ( service_name , upstart_file , sysv_file ) ) started = service_running ( service_name , ** kwargs ) if not started : started = service_start ( service_name , ** kwargs ) return started
Resume a system service .
56,606
def service ( action , service_name , ** kwargs ) : if init_is_systemd ( ) : cmd = [ 'systemctl' , action , service_name ] else : cmd = [ 'service' , service_name , action ] for key , value in six . iteritems ( kwargs ) : parameter = '%s=%s' % ( key , value ) cmd . append ( parameter ) return subprocess . call ( cmd ) == 0
Control a system service .
56,607
def service_running ( service_name , ** kwargs ) : if init_is_systemd ( ) : return service ( 'is-active' , service_name ) else : if os . path . exists ( _UPSTART_CONF . format ( service_name ) ) : try : cmd = [ 'status' , service_name ] for key , value in six . iteritems ( kwargs ) : parameter = '%s=%s' % ( key , value ) cmd . append ( parameter ) output = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) except subprocess . CalledProcessError : return False else : if ( "start/running" in output or "is running" in output or "up and running" in output ) : return True elif os . path . exists ( _INIT_D_CONF . format ( service_name ) ) : return service ( 'status' , service_name ) return False
Determine whether a system service is running .
56,608
def adduser ( username , password = None , shell = '/bin/bash' , system_user = False , primary_group = None , secondary_groups = None , uid = None , home_dir = None ) : try : user_info = pwd . getpwnam ( username ) log ( 'user {0} already exists!' . format ( username ) ) if uid : user_info = pwd . getpwuid ( int ( uid ) ) log ( 'user with uid {0} already exists!' . format ( uid ) ) except KeyError : log ( 'creating user {0}' . format ( username ) ) cmd = [ 'useradd' ] if uid : cmd . extend ( [ '--uid' , str ( uid ) ] ) if home_dir : cmd . extend ( [ '--home' , str ( home_dir ) ] ) if system_user or password is None : cmd . append ( '--system' ) else : cmd . extend ( [ '--create-home' , '--shell' , shell , '--password' , password , ] ) if not primary_group : try : grp . getgrnam ( username ) primary_group = username except KeyError : pass if primary_group : cmd . extend ( [ '-g' , primary_group ] ) if secondary_groups : cmd . extend ( [ '-G' , ',' . join ( secondary_groups ) ] ) cmd . append ( username ) subprocess . check_call ( cmd ) user_info = pwd . getpwnam ( username ) return user_info
Add a user to the system .
56,609
def user_exists ( username ) : try : pwd . getpwnam ( username ) user_exists = True except KeyError : user_exists = False return user_exists
Check if a user exists
56,610
def uid_exists ( uid ) : try : pwd . getpwuid ( uid ) uid_exists = True except KeyError : uid_exists = False return uid_exists
Check if a uid exists
56,611
def group_exists ( groupname ) : try : grp . getgrnam ( groupname ) group_exists = True except KeyError : group_exists = False return group_exists
Check if a group exists
56,612
def gid_exists ( gid ) : try : grp . getgrgid ( gid ) gid_exists = True except KeyError : gid_exists = False return gid_exists
Check if a gid exists
56,613
def add_group ( group_name , system_group = False , gid = None ) : try : group_info = grp . getgrnam ( group_name ) log ( 'group {0} already exists!' . format ( group_name ) ) if gid : group_info = grp . getgrgid ( gid ) log ( 'group with gid {0} already exists!' . format ( gid ) ) except KeyError : log ( 'creating group {0}' . format ( group_name ) ) add_new_group ( group_name , system_group , gid ) group_info = grp . getgrnam ( group_name ) return group_info
Add a group to the system
56,614
def chage ( username , lastday = None , expiredate = None , inactive = None , mindays = None , maxdays = None , root = None , warndays = None ) : cmd = [ 'chage' ] if root : cmd . extend ( [ '--root' , root ] ) if lastday : cmd . extend ( [ '--lastday' , lastday ] ) if expiredate : cmd . extend ( [ '--expiredate' , expiredate ] ) if inactive : cmd . extend ( [ '--inactive' , inactive ] ) if mindays : cmd . extend ( [ '--mindays' , mindays ] ) if maxdays : cmd . extend ( [ '--maxdays' , maxdays ] ) if warndays : cmd . extend ( [ '--warndays' , warndays ] ) cmd . append ( username ) subprocess . check_call ( cmd )
Change user password expiry information
56,615
def rsync ( from_path , to_path , flags = '-r' , options = None , timeout = None ) : options = options or [ '--delete' , '--executability' ] cmd = [ '/usr/bin/rsync' , flags ] if timeout : cmd = [ 'timeout' , str ( timeout ) ] + cmd cmd . extend ( options ) cmd . append ( from_path ) cmd . append ( to_path ) log ( " " . join ( cmd ) ) return subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) . strip ( )
Replicate the contents of a path
56,616
def write_file ( path , content , owner = 'root' , group = 'root' , perms = 0o444 ) : uid = pwd . getpwnam ( owner ) . pw_uid gid = grp . getgrnam ( group ) . gr_gid existing_content = None existing_uid , existing_gid , existing_perms = None , None , None try : with open ( path , 'rb' ) as target : existing_content = target . read ( ) stat = os . stat ( path ) existing_uid , existing_gid , existing_perms = ( stat . st_uid , stat . st_gid , stat . st_mode ) except Exception : pass if content != existing_content : log ( "Writing file {} {}:{} {:o}" . format ( path , owner , group , perms ) , level = DEBUG ) with open ( path , 'wb' ) as target : os . fchown ( target . fileno ( ) , uid , gid ) os . fchmod ( target . fileno ( ) , perms ) if six . PY3 and isinstance ( content , six . string_types ) : content = content . encode ( 'UTF-8' ) target . write ( content ) return if existing_uid != uid : log ( "Changing uid on already existing content: {} -> {}" . format ( existing_uid , uid ) , level = DEBUG ) os . chown ( path , uid , - 1 ) if existing_gid != gid : log ( "Changing gid on already existing content: {} -> {}" . format ( existing_gid , gid ) , level = DEBUG ) os . chown ( path , - 1 , gid ) if existing_perms != perms : log ( "Changing permissions on existing content: {} -> {}" . format ( existing_perms , perms ) , level = DEBUG ) os . chmod ( path , perms )
Create or overwrite a file with the contents of a byte string .
56,617
def mount ( device , mountpoint , options = None , persist = False , filesystem = "ext3" ) : cmd_args = [ 'mount' ] if options is not None : cmd_args . extend ( [ '-o' , options ] ) cmd_args . extend ( [ device , mountpoint ] ) try : subprocess . check_output ( cmd_args ) except subprocess . CalledProcessError as e : log ( 'Error mounting {} at {}\n{}' . format ( device , mountpoint , e . output ) ) return False if persist : return fstab_add ( device , mountpoint , filesystem , options = options ) return True
Mount a filesystem at a particular mountpoint
56,618
def umount ( mountpoint , persist = False ) : cmd_args = [ 'umount' , mountpoint ] try : subprocess . check_output ( cmd_args ) except subprocess . CalledProcessError as e : log ( 'Error unmounting {}\n{}' . format ( mountpoint , e . output ) ) return False if persist : return fstab_remove ( mountpoint ) return True
Unmount a filesystem
56,619
def fstab_mount ( mountpoint ) : cmd_args = [ 'mount' , mountpoint ] try : subprocess . check_output ( cmd_args ) except subprocess . CalledProcessError as e : log ( 'Error unmounting {}\n{}' . format ( mountpoint , e . output ) ) return False return True
Mount filesystem using fstab
56,620
def file_hash ( path , hash_type = 'md5' ) : if os . path . exists ( path ) : h = getattr ( hashlib , hash_type ) ( ) with open ( path , 'rb' ) as source : h . update ( source . read ( ) ) return h . hexdigest ( ) else : return None
Generate a hash checksum of the contents of path or None if not found .
56,621
def check_hash ( path , checksum , hash_type = 'md5' ) : actual_checksum = file_hash ( path , hash_type ) if checksum != actual_checksum : raise ChecksumError ( "'%s' != '%s'" % ( checksum , actual_checksum ) )
Validate a file using a cryptographic checksum .
56,622
def restart_on_change ( restart_map , stopstart = False , restart_functions = None ) : def wrap ( f ) : @ functools . wraps ( f ) def wrapped_f ( * args , ** kwargs ) : return restart_on_change_helper ( ( lambda : f ( * args , ** kwargs ) ) , restart_map , stopstart , restart_functions ) return wrapped_f return wrap
Restart services based on configuration files changing
56,623
def restart_on_change_helper ( lambda_f , restart_map , stopstart = False , restart_functions = None ) : if restart_functions is None : restart_functions = { } checksums = { path : path_hash ( path ) for path in restart_map } r = lambda_f ( ) restarts = [ restart_map [ path ] for path in restart_map if path_hash ( path ) != checksums [ path ] ] services_list = list ( OrderedDict . fromkeys ( itertools . chain ( * restarts ) ) ) if services_list : actions = ( 'stop' , 'start' ) if stopstart else ( 'restart' , ) for service_name in services_list : if service_name in restart_functions : restart_functions [ service_name ] ( service_name ) else : for action in actions : service ( action , service_name ) return r
Helper function to perform the restart_on_change function .
56,624
def pwgen ( length = None ) : if length is None : length = random . choice ( range ( 35 , 45 ) ) alphanumeric_chars = [ l for l in ( string . ascii_letters + string . digits ) if l not in 'l0QD1vAEIOUaeiou' ] random_generator = random . SystemRandom ( ) random_chars = [ random_generator . choice ( alphanumeric_chars ) for _ in range ( length ) ] return ( '' . join ( random_chars ) )
Generate a random pasword .
56,625
def is_phy_iface ( interface ) : if interface : sys_net = '/sys/class/net' if os . path . isdir ( sys_net ) : for iface in glob . glob ( os . path . join ( sys_net , '*' ) ) : if '/virtual/' in os . path . realpath ( iface ) : continue if interface == os . path . basename ( iface ) : return True return False
Returns True if interface is not virtual otherwise False .
56,626
def get_bond_master ( interface ) : if interface : iface_path = '/sys/class/net/%s' % ( interface ) if os . path . exists ( iface_path ) : if '/virtual/' in os . path . realpath ( iface_path ) : return None master = os . path . join ( iface_path , 'master' ) if os . path . exists ( master ) : master = os . path . realpath ( master ) if os . path . exists ( os . path . join ( master , 'bonding' ) ) : return os . path . basename ( master ) return None
Returns bond master if interface is bond slave otherwise None .
56,627
def chdir ( directory ) : cur = os . getcwd ( ) try : yield os . chdir ( directory ) finally : os . chdir ( cur )
Change the current working directory to a different directory for a code block and return the previous directory after the block exits . Useful to run commands from a specificed directory .
56,628
def chownr ( path , owner , group , follow_links = True , chowntopdir = False ) : uid = pwd . getpwnam ( owner ) . pw_uid gid = grp . getgrnam ( group ) . gr_gid if follow_links : chown = os . chown else : chown = os . lchown if chowntopdir : broken_symlink = os . path . lexists ( path ) and not os . path . exists ( path ) if not broken_symlink : chown ( path , uid , gid ) for root , dirs , files in os . walk ( path , followlinks = follow_links ) : for name in dirs + files : full = os . path . join ( root , name ) broken_symlink = os . path . lexists ( full ) and not os . path . exists ( full ) if not broken_symlink : chown ( full , uid , gid )
Recursively change user and group ownership of files and directories in given path . Doesn t chown path itself by default only its children .
56,629
def lchownr ( path , owner , group ) : chownr ( path , owner , group , follow_links = False )
Recursively change user and group ownership of files and directories in a given path not following symbolic links . See the documentation for os . lchown for more information .
56,630
def owner ( path ) : stat = os . stat ( path ) username = pwd . getpwuid ( stat . st_uid ) [ 0 ] groupname = grp . getgrgid ( stat . st_gid ) [ 0 ] return username , groupname
Returns a tuple containing the username & groupname owning the path .
56,631
def get_total_ram ( ) : with open ( '/proc/meminfo' , 'r' ) as f : for line in f . readlines ( ) : if line : key , value , unit = line . split ( ) if key == 'MemTotal:' : assert unit == 'kB' , 'Unknown unit' return int ( value ) * 1024 raise NotImplementedError ( )
The total amount of system RAM in bytes .
56,632
def add_to_updatedb_prunepath ( path , updatedb_path = UPDATEDB_PATH ) : if not os . path . exists ( updatedb_path ) or os . path . isdir ( updatedb_path ) : return with open ( updatedb_path , 'r+' ) as f_id : updatedb_text = f_id . read ( ) output = updatedb ( updatedb_text , path ) f_id . seek ( 0 ) f_id . write ( output ) f_id . truncate ( )
Adds the specified path to the mlocate s udpatedb . conf PRUNEPATH list .
56,633
def install_ca_cert ( ca_cert , name = None ) : if not ca_cert : return if not isinstance ( ca_cert , bytes ) : ca_cert = ca_cert . encode ( 'utf8' ) if not name : name = 'juju-{}' . format ( charm_name ( ) ) cert_file = '/usr/local/share/ca-certificates/{}.crt' . format ( name ) new_hash = hashlib . md5 ( ca_cert ) . hexdigest ( ) if file_hash ( cert_file ) == new_hash : return log ( "Installing new CA cert at: {}" . format ( cert_file ) , level = INFO ) write_file ( cert_file , ca_cert ) subprocess . check_call ( [ 'update-ca-certificates' , '--fresh' ] )
Install the given cert as a trusted CA .
56,634
def get_audits ( ) : audits = [ ] audits . append ( TemplatedFile ( '/etc/securetty' , SecureTTYContext ( ) , template_dir = TEMPLATES_DIR , mode = 0o0400 , user = 'root' , group = 'root' ) ) return audits
Get OS hardening Secure TTY audits .
56,635
def dict_keys_without_hyphens ( a_dict ) : return dict ( ( key . replace ( '-' , '_' ) , val ) for key , val in a_dict . items ( ) )
Return the a new dict with underscores instead of hyphens in keys .
56,636
def update_relations ( context , namespace_separator = ':' ) : relation_type = charmhelpers . core . hookenv . relation_type ( ) relations = [ ] context [ 'current_relation' ] = { } if relation_type is not None : relation_data = charmhelpers . core . hookenv . relation_get ( ) context [ 'current_relation' ] = relation_data relation_data = dict ( ( "{relation_type}{namespace_separator}{key}" . format ( relation_type = relation_type , key = key , namespace_separator = namespace_separator ) , val ) for key , val in relation_data . items ( ) ) relation_data = dict_keys_without_hyphens ( relation_data ) context . update ( relation_data ) relations = charmhelpers . core . hookenv . relations_of_type ( relation_type ) relations = [ dict_keys_without_hyphens ( rel ) for rel in relations ] context [ 'relations_full' ] = charmhelpers . core . hookenv . relations ( ) local_unit = charmhelpers . core . hookenv . local_unit ( ) relations = { } for rname , rids in context [ 'relations_full' ] . items ( ) : relations [ rname ] = [ ] for rid , rdata in rids . items ( ) : data = rdata . copy ( ) if local_unit in rdata : data . pop ( local_unit ) for unit_name , rel_data in data . items ( ) : new_data = { '__relid__' : rid , '__unit__' : unit_name } new_data . update ( rel_data ) relations [ rname ] . append ( new_data ) context [ 'relations' ] = relations
Update the context with the relation data .
56,637
def juju_state_to_yaml ( yaml_path , namespace_separator = ':' , allow_hyphens_in_keys = True , mode = None ) : config = charmhelpers . core . hookenv . config ( ) config [ 'charm_dir' ] = charm_dir config [ 'local_unit' ] = charmhelpers . core . hookenv . local_unit ( ) config [ 'unit_private_address' ] = charmhelpers . core . hookenv . unit_private_ip ( ) config [ 'unit_public_address' ] = charmhelpers . core . hookenv . unit_get ( 'public-address' ) yaml . add_representer ( six . text_type , lambda dumper , value : dumper . represent_scalar ( six . u ( 'tag:yaml.org,2002:str' ) , value ) ) yaml_dir = os . path . dirname ( yaml_path ) if not os . path . exists ( yaml_dir ) : os . makedirs ( yaml_dir ) if os . path . exists ( yaml_path ) : with open ( yaml_path , "r" ) as existing_vars_file : existing_vars = yaml . load ( existing_vars_file . read ( ) ) else : with open ( yaml_path , "w+" ) : pass existing_vars = { } if mode is not None : os . chmod ( yaml_path , mode ) if not allow_hyphens_in_keys : config = dict_keys_without_hyphens ( config ) existing_vars . update ( config ) update_relations ( existing_vars , namespace_separator ) with open ( yaml_path , "w+" ) as fp : fp . write ( yaml . dump ( existing_vars , default_flow_style = False ) )
Update the juju config and state in a yaml file .
56,638
def get_audits ( ) : if subprocess . call ( [ 'which' , 'apache2' ] , stdout = subprocess . PIPE ) != 0 : log ( "Apache server does not appear to be installed on this node - " "skipping apache hardening" , level = INFO ) return [ ] context = ApacheConfContext ( ) settings = utils . get_settings ( 'apache' ) audits = [ FilePermissionAudit ( paths = os . path . join ( settings [ 'common' ] [ 'apache_dir' ] , 'apache2.conf' ) , user = 'root' , group = 'root' , mode = 0o0640 ) , TemplatedFile ( os . path . join ( settings [ 'common' ] [ 'apache_dir' ] , 'mods-available/alias.conf' ) , context , TEMPLATES_DIR , mode = 0o0640 , user = 'root' , service_actions = [ { 'service' : 'apache2' , 'actions' : [ 'restart' ] } ] ) , TemplatedFile ( os . path . join ( settings [ 'common' ] [ 'apache_dir' ] , 'conf-enabled/99-hardening.conf' ) , context , TEMPLATES_DIR , mode = 0o0640 , user = 'root' , service_actions = [ { 'service' : 'apache2' , 'actions' : [ 'restart' ] } ] ) , DirectoryPermissionAudit ( settings [ 'common' ] [ 'apache_dir' ] , user = 'root' , group = 'root' , mode = 0o0750 ) , DisabledModuleAudit ( settings [ 'hardening' ] [ 'modules_to_disable' ] ) , NoReadWriteForOther ( settings [ 'common' ] [ 'apache_dir' ] ) , DeletedFile ( [ '/var/www/html/index.html' ] ) ] return audits
Get Apache hardening config audits .
56,639
def get_audits ( ) : audits = [ ] settings = utils . get_settings ( 'os' ) path_folders = { '/usr/local/sbin' , '/usr/local/bin' , '/usr/sbin' , '/usr/bin' , '/bin' } extra_user_paths = settings [ 'environment' ] [ 'extra_user_paths' ] path_folders . update ( extra_user_paths ) audits . append ( ReadOnly ( path_folders ) ) audits . append ( FilePermissionAudit ( '/etc/shadow' , 'root' , 'root' , 0o0600 ) ) if 'change_user' not in settings [ 'security' ] [ 'users_allow' ] : audits . append ( FilePermissionAudit ( '/bin/su' , 'root' , 'root' , 0o750 ) ) return audits
Get OS hardening access audits .
56,640
def harden ( overrides = None ) : if overrides is None : overrides = [ ] def _harden_inner1 ( f ) : _logged = { 'done' : False } def _harden_inner2 ( * args , ** kwargs ) : if _DISABLE_HARDENING_FOR_UNIT_TEST : return f ( * args , ** kwargs ) if not _logged [ 'done' ] : log ( "Hardening function '%s'" % ( f . __name__ ) , level = DEBUG ) _logged [ 'done' ] = True RUN_CATALOG = OrderedDict ( [ ( 'os' , run_os_checks ) , ( 'ssh' , run_ssh_checks ) , ( 'mysql' , run_mysql_checks ) , ( 'apache' , run_apache_checks ) ] ) enabled = overrides [ : ] or ( config ( "harden" ) or "" ) . split ( ) if enabled : modules_to_run = [ ] for module , func in six . iteritems ( RUN_CATALOG ) : if module in enabled : enabled . remove ( module ) modules_to_run . append ( func ) if enabled : log ( "Unknown hardening modules '%s' - ignoring" % ( ', ' . join ( enabled ) ) , level = WARNING ) for hardener in modules_to_run : log ( "Executing hardening module '%s'" % ( hardener . __name__ ) , level = DEBUG ) hardener ( ) else : log ( "No hardening applied to '%s'" % ( f . __name__ ) , level = DEBUG ) return f ( * args , ** kwargs ) return _harden_inner2 return _harden_inner1
Hardening decorator .
56,641
def parse_mappings ( mappings , key_rvalue = False ) : parsed = { } if mappings : mappings = mappings . split ( ) for m in mappings : p = m . partition ( ':' ) if key_rvalue : key_index = 2 val_index = 0 if not p [ 1 ] : continue else : key_index = 0 val_index = 2 key = p [ key_index ] . strip ( ) parsed [ key ] = p [ val_index ] . strip ( ) return parsed
By default mappings are lvalue keyed .
56,642
def parse_data_port_mappings ( mappings , default_bridge = 'br-data' ) : _mappings = parse_mappings ( mappings , key_rvalue = True ) if not _mappings or list ( _mappings . values ( ) ) == [ '' ] : if not mappings : return { } _mappings = { mappings . split ( ) [ 0 ] : default_bridge } ports = _mappings . keys ( ) if len ( set ( ports ) ) != len ( ports ) : raise Exception ( "It is not allowed to have the same port configured " "on more than one bridge" ) return _mappings
Parse data port mappings .
56,643
def parse_vlan_range_mappings ( mappings ) : _mappings = parse_mappings ( mappings ) if not _mappings : return { } mappings = { } for p , r in six . iteritems ( _mappings ) : mappings [ p ] = tuple ( r . split ( ':' ) ) return mappings
Parse vlan range mappings .
56,644
def extract_tarfile ( archive_name , destpath ) : "Unpack a tar archive, optionally compressed" archive = tarfile . open ( archive_name ) archive . extractall ( destpath )
Unpack a tar archive optionally compressed
56,645
def extract_zipfile ( archive_name , destpath ) : "Unpack a zip file" archive = zipfile . ZipFile ( archive_name ) archive . extractall ( destpath )
Unpack a zip file
56,646
def get_address_in_network ( network , fallback = None , fatal = False ) : if network is None : if fallback is not None : return fallback if fatal : no_ip_found_error_out ( network ) else : return None networks = network . split ( ) or [ network ] for network in networks : _validate_cidr ( network ) network = netaddr . IPNetwork ( network ) for iface in netifaces . interfaces ( ) : try : addresses = netifaces . ifaddresses ( iface ) except ValueError : continue if network . version == 4 and netifaces . AF_INET in addresses : for addr in addresses [ netifaces . AF_INET ] : cidr = netaddr . IPNetwork ( "%s/%s" % ( addr [ 'addr' ] , addr [ 'netmask' ] ) ) if cidr in network : return str ( cidr . ip ) if network . version == 6 and netifaces . AF_INET6 in addresses : for addr in addresses [ netifaces . AF_INET6 ] : cidr = _get_ipv6_network_from_address ( addr ) if cidr and cidr in network : return str ( cidr . ip ) if fallback is not None : return fallback if fatal : no_ip_found_error_out ( network ) return None
Get an IPv4 or IPv6 address within the network from the host .
56,647
def is_ipv6 ( address ) : try : address = netaddr . IPAddress ( address ) except netaddr . AddrFormatError : return False return address . version == 6
Determine whether provided address is IPv6 or not .
56,648
def is_address_in_network ( network , address ) : try : network = netaddr . IPNetwork ( network ) except ( netaddr . core . AddrFormatError , ValueError ) : raise ValueError ( "Network (%s) is not in CIDR presentation format" % network ) try : address = netaddr . IPAddress ( address ) except ( netaddr . core . AddrFormatError , ValueError ) : raise ValueError ( "Address (%s) is not in correct presentation format" % address ) if address in network : return True else : return False
Determine whether the provided address is within a network range .
56,649
def _get_for_address ( address , key ) : address = netaddr . IPAddress ( address ) for iface in netifaces . interfaces ( ) : addresses = netifaces . ifaddresses ( iface ) if address . version == 4 and netifaces . AF_INET in addresses : addr = addresses [ netifaces . AF_INET ] [ 0 ] [ 'addr' ] netmask = addresses [ netifaces . AF_INET ] [ 0 ] [ 'netmask' ] network = netaddr . IPNetwork ( "%s/%s" % ( addr , netmask ) ) cidr = network . cidr if address in cidr : if key == 'iface' : return iface else : return addresses [ netifaces . AF_INET ] [ 0 ] [ key ] if address . version == 6 and netifaces . AF_INET6 in addresses : for addr in addresses [ netifaces . AF_INET6 ] : network = _get_ipv6_network_from_address ( addr ) if not network : continue cidr = network . cidr if address in cidr : if key == 'iface' : return iface elif key == 'netmask' and cidr : return str ( cidr ) . split ( '/' ) [ 1 ] else : return addr [ key ] return None
Retrieve an attribute of or the physical interface that the IP address provided could be bound to .
56,650
def resolve_network_cidr ( ip_address ) : netmask = get_netmask_for_address ( ip_address ) return str ( netaddr . IPNetwork ( "%s/%s" % ( ip_address , netmask ) ) . cidr )
Resolves the full address cidr of an ip_address based on configured network interfaces
56,651
def get_iface_addr ( iface = 'eth0' , inet_type = 'AF_INET' , inc_aliases = False , fatal = True , exc_list = None ) : if '/' in iface : iface = iface . split ( '/' ) [ - 1 ] if not exc_list : exc_list = [ ] try : inet_num = getattr ( netifaces , inet_type ) except AttributeError : raise Exception ( "Unknown inet type '%s'" % str ( inet_type ) ) interfaces = netifaces . interfaces ( ) if inc_aliases : ifaces = [ ] for _iface in interfaces : if iface == _iface or _iface . split ( ':' ) [ 0 ] == iface : ifaces . append ( _iface ) if fatal and not ifaces : raise Exception ( "Invalid interface '%s'" % iface ) ifaces . sort ( ) else : if iface not in interfaces : if fatal : raise Exception ( "Interface '%s' not found " % ( iface ) ) else : return [ ] else : ifaces = [ iface ] addresses = [ ] for netiface in ifaces : net_info = netifaces . ifaddresses ( netiface ) if inet_num in net_info : for entry in net_info [ inet_num ] : if 'addr' in entry and entry [ 'addr' ] not in exc_list : addresses . append ( entry [ 'addr' ] ) if fatal and not addresses : raise Exception ( "Interface '%s' doesn't have any %s addresses." % ( iface , inet_type ) ) return sorted ( addresses )
Return the assigned IP address for a given interface if any .
56,652
def get_iface_from_addr ( addr ) : for iface in netifaces . interfaces ( ) : addresses = netifaces . ifaddresses ( iface ) for inet_type in addresses : for _addr in addresses [ inet_type ] : _addr = _addr [ 'addr' ] ll_key = re . compile ( "(.+)%.*" ) raw = re . match ( ll_key , _addr ) if raw : _addr = raw . group ( 1 ) if _addr == addr : log ( "Address '%s' is configured on iface '%s'" % ( addr , iface ) ) return iface msg = "Unable to infer net iface on which '%s' is configured" % ( addr ) raise Exception ( msg )
Work out on which interface the provided address is configured .
56,653
def sniff_iface ( f ) : def iface_sniffer ( * args , ** kwargs ) : if not kwargs . get ( 'iface' , None ) : kwargs [ 'iface' ] = get_iface_from_addr ( unit_get ( 'private-address' ) ) return f ( * args , ** kwargs ) return iface_sniffer
Ensure decorated function is called with a value for iface .
56,654
def get_ipv6_addr ( iface = None , inc_aliases = False , fatal = True , exc_list = None , dynamic_only = True ) : addresses = get_iface_addr ( iface = iface , inet_type = 'AF_INET6' , inc_aliases = inc_aliases , fatal = fatal , exc_list = exc_list ) if addresses : global_addrs = [ ] for addr in addresses : key_scope_link_local = re . compile ( "^fe80::..(.+)%(.+)" ) m = re . match ( key_scope_link_local , addr ) if m : eui_64_mac = m . group ( 1 ) iface = m . group ( 2 ) else : global_addrs . append ( addr ) if global_addrs : cmd = [ 'ip' , 'addr' , 'show' , iface ] out = subprocess . check_output ( cmd ) . decode ( 'UTF-8' ) if dynamic_only : key = re . compile ( "inet6 (.+)/[0-9]+ scope global.* dynamic.*" ) else : key = re . compile ( "inet6 (.+)/[0-9]+ scope global.*" ) addrs = [ ] for line in out . split ( '\n' ) : line = line . strip ( ) m = re . match ( key , line ) if m and 'temporary' not in line : for addr in global_addrs : if m . group ( 1 ) == addr : if not dynamic_only or m . group ( 1 ) . endswith ( eui_64_mac ) : addrs . append ( addr ) if addrs : return addrs if fatal : raise Exception ( "Interface '%s' does not have a scope global " "non-temporary ipv6 address." % iface ) return [ ]
Get assigned IPv6 address for a given interface .
56,655
def get_bridges ( vnic_dir = '/sys/devices/virtual/net' ) : b_regex = "%s/*/bridge" % vnic_dir return [ x . replace ( vnic_dir , '' ) . split ( '/' ) [ 1 ] for x in glob . glob ( b_regex ) ]
Return a list of bridges on the system .
56,656
def get_bridge_nics ( bridge , vnic_dir = '/sys/devices/virtual/net' ) : brif_regex = "%s/%s/brif/*" % ( vnic_dir , bridge ) return [ x . split ( '/' ) [ - 1 ] for x in glob . glob ( brif_regex ) ]
Return a list of nics comprising a given bridge on the system .
56,657
def is_ip ( address ) : try : address = netaddr . IPAddress ( address ) return True except ( netaddr . AddrFormatError , ValueError ) : return False
Returns True if address is a valid IP address .
56,658
def get_host_ip ( hostname , fallback = None ) : if is_ip ( hostname ) : return hostname ip_addr = ns_query ( hostname ) if not ip_addr : try : ip_addr = socket . gethostbyname ( hostname ) except Exception : log ( "Failed to resolve hostname '%s'" % ( hostname ) , level = WARNING ) return fallback return ip_addr
Resolves the IP for a given hostname or returns the input if it is already an IP .
56,659
def get_hostname ( address , fqdn = True ) : if is_ip ( address ) : try : import dns . reversename except ImportError : if six . PY2 : apt_install ( "python-dnspython" , fatal = True ) else : apt_install ( "python3-dnspython" , fatal = True ) import dns . reversename rev = dns . reversename . from_address ( address ) result = ns_query ( rev ) if not result : try : result = socket . gethostbyaddr ( address ) [ 0 ] except Exception : return None else : result = address if fqdn : if result . endswith ( '.' ) : return result [ : - 1 ] else : return result else : return result . split ( '.' ) [ 0 ]
Resolves hostname for given IP or returns the input if it is already a hostname .
56,660
def get_relation_ip ( interface , cidr_network = None ) : try : address = network_get_primary_address ( interface ) except NotImplementedError : address = get_host_ip ( unit_get ( 'private-address' ) ) except NoNetworkBinding : log ( "No network binding for {}" . format ( interface ) , WARNING ) address = get_host_ip ( unit_get ( 'private-address' ) ) if config ( 'prefer-ipv6' ) : assert_charm_supports_ipv6 ( ) return get_ipv6_addr ( ) [ 0 ] elif cidr_network : return get_address_in_network ( cidr_network , address ) return address
Return this unit s IP for the given interface .
56,661
def ensure_compliance ( self ) : for p in self . paths : if os . path . exists ( p ) : if self . is_compliant ( p ) : continue log ( 'File %s is not in compliance.' % p , level = INFO ) else : if not self . always_comply : log ( "Non-existent path '%s' - skipping compliance check" % ( p ) , level = INFO ) continue if self . _take_action ( ) : log ( "Applying compliance criteria to '%s'" % ( p ) , level = INFO ) self . comply ( p )
Ensure that the all registered files comply to registered criteria .
56,662
def is_compliant ( self , path ) : stat = self . _get_stat ( path ) user = self . user group = self . group compliant = True if stat . st_uid != user . pw_uid or stat . st_gid != group . gr_gid : log ( 'File %s is not owned by %s:%s.' % ( path , user . pw_name , group . gr_name ) , level = INFO ) compliant = False perms = stat . st_mode & 0o7777 if perms != self . mode : log ( 'File %s has incorrect permissions, currently set to %s' % ( path , oct ( stat . st_mode & 0o7777 ) ) , level = INFO ) compliant = False return compliant
Checks if the path is in compliance .
56,663
def comply ( self , path ) : utils . ensure_permissions ( path , self . user . pw_name , self . group . gr_name , self . mode )
Issues a chown and chmod to the file paths specified .
56,664
def is_compliant ( self , path ) : if not os . path . isdir ( path ) : log ( 'Path specified %s is not a directory.' % path , level = ERROR ) raise ValueError ( "%s is not a directory." % path ) if not self . recursive : return super ( DirectoryPermissionAudit , self ) . is_compliant ( path ) compliant = True for root , dirs , _ in os . walk ( path ) : if len ( dirs ) > 0 : continue if not super ( DirectoryPermissionAudit , self ) . is_compliant ( root ) : compliant = False continue return compliant
Checks if the directory is compliant .
56,665
def is_compliant ( self , path ) : same_templates = self . templates_match ( path ) same_content = self . contents_match ( path ) same_permissions = self . permissions_match ( path ) if same_content and same_permissions and same_templates : return True return False
Determines if the templated file is compliant .
56,666
def run_service_actions ( self ) : if not self . service_actions : return for svc_action in self . service_actions : name = svc_action [ 'service' ] actions = svc_action [ 'actions' ] log ( "Running service '%s' actions '%s'" % ( name , actions ) , level = DEBUG ) for action in actions : cmd = [ 'service' , name , action ] try : check_call ( cmd ) except CalledProcessError as exc : log ( "Service name='%s' action='%s' failed - %s" % ( name , action , exc ) , level = WARNING )
Run any actions on services requested .
56,667
def comply ( self , path ) : dirname = os . path . dirname ( path ) if not os . path . exists ( dirname ) : os . makedirs ( dirname ) self . pre_write ( ) render_and_write ( self . template_dir , path , self . context ( ) ) utils . ensure_permissions ( path , self . user , self . group , self . mode ) self . run_service_actions ( ) self . save_checksum ( path ) self . post_write ( )
Ensures the contents and the permissions of the file .
56,668
def templates_match ( self , path ) : template_path = get_template_path ( self . template_dir , path ) key = 'hardening:template:%s' % template_path template_checksum = file_hash ( template_path ) kv = unitdata . kv ( ) stored_tmplt_checksum = kv . get ( key ) if not stored_tmplt_checksum : kv . set ( key , template_checksum ) kv . flush ( ) log ( 'Saved template checksum for %s.' % template_path , level = DEBUG ) return False elif stored_tmplt_checksum != template_checksum : kv . set ( key , template_checksum ) kv . flush ( ) log ( 'Updated template checksum for %s.' % template_path , level = DEBUG ) return False return True
Determines if the template files are the same .
56,669
def contents_match ( self , path ) : checksum = file_hash ( path ) kv = unitdata . kv ( ) stored_checksum = kv . get ( 'hardening:%s' % path ) if not stored_checksum : log ( 'Checksum for %s has not been calculated.' % path , level = DEBUG ) return False elif stored_checksum != checksum : log ( 'Checksum mismatch for %s.' % path , level = DEBUG ) return False return True
Determines if the file content is the same .
56,670
def permissions_match ( self , path ) : audit = FilePermissionAudit ( path , self . user , self . group , self . mode ) return audit . is_compliant ( path )
Determines if the file owner and permissions match .
56,671
def save_checksum ( self , path ) : checksum = file_hash ( path ) kv = unitdata . kv ( ) kv . set ( 'hardening:%s' % path , checksum ) kv . flush ( )
Calculates and saves the checksum for the path specified .
56,672
def bool_from_string ( value ) : if isinstance ( value , six . string_types ) : value = six . text_type ( value ) else : msg = "Unable to interpret non-string value '%s' as boolean" % ( value ) raise ValueError ( msg ) value = value . strip ( ) . lower ( ) if value in [ 'y' , 'yes' , 'true' , 't' , 'on' ] : return True elif value in [ 'n' , 'no' , 'false' , 'f' , 'off' ] : return False msg = "Unable to interpret string value '%s' as boolean" % ( value ) raise ValueError ( msg )
Interpret string value as boolean .
56,673
def bytes_from_string ( value ) : BYTE_POWER = { 'K' : 1 , 'KB' : 1 , 'M' : 2 , 'MB' : 2 , 'G' : 3 , 'GB' : 3 , 'T' : 4 , 'TB' : 4 , 'P' : 5 , 'PB' : 5 , } if isinstance ( value , six . string_types ) : value = six . text_type ( value ) else : msg = "Unable to interpret non-string value '%s' as bytes" % ( value ) raise ValueError ( msg ) matches = re . match ( "([0-9]+)([a-zA-Z]+)" , value ) if matches : size = int ( matches . group ( 1 ) ) * ( 1024 ** BYTE_POWER [ matches . group ( 2 ) ] ) else : try : size = int ( value ) except ValueError : msg = "Unable to interpret string value '%s' as bytes" % ( value ) raise ValueError ( msg ) return size
Interpret human readable string value as bytes .
56,674
def audit ( * args ) : def wrapper ( f ) : test_name = f . __name__ if _audits . get ( test_name ) : raise RuntimeError ( "Test name '{}' used more than once" . format ( test_name ) ) non_callables = [ fn for fn in args if not callable ( fn ) ] if non_callables : raise RuntimeError ( "Configuration includes non-callable filters: {}" . format ( non_callables ) ) _audits [ test_name ] = Audit ( func = f , filters = args ) return f return wrapper
Decorator to register an audit .
56,675
def is_audit_type ( * args ) : def _is_audit_type ( audit_options ) : if audit_options . get ( 'audit_type' ) in args : return True else : return False return _is_audit_type
This audit is included in the specified kinds of audits .
56,676
def run ( audit_options ) : errors = { } results = { } for name , audit in sorted ( _audits . items ( ) ) : result_name = name . replace ( '_' , '-' ) if result_name in audit_options . get ( 'excludes' , [ ] ) : print ( "Skipping {} because it is" "excluded in audit config" . format ( result_name ) ) continue if all ( p ( audit_options ) for p in audit . filters ) : try : audit . func ( audit_options ) print ( "{}: PASS" . format ( name ) ) results [ result_name ] = { 'success' : True , } except AssertionError as e : print ( "{}: FAIL ({})" . format ( name , e ) ) results [ result_name ] = { 'success' : False , 'message' : e , } except Exception as e : print ( "{}: ERROR ({})" . format ( name , e ) ) errors [ name ] = e results [ result_name ] = { 'success' : False , 'message' : e , } for name , error in errors . items ( ) : print ( "=" * 20 ) print ( "Error in {}: " . format ( name ) ) traceback . print_tb ( error . __traceback__ ) print ( ) return results
Run the configured audits with the specified audit_options .
56,677
def action_parse_results ( result ) : passed = True for test , result in result . items ( ) : if result [ 'success' ] : hookenv . action_set ( { test : 'PASS' } ) else : hookenv . action_set ( { test : 'FAIL - {}' . format ( result [ 'message' ] ) } ) passed = False if not passed : hookenv . action_fail ( "One or more tests failed" ) return 0 if passed else 1
Parse the result of run in the context of an action .
56,678
def generate_selfsigned ( keyfile , certfile , keysize = "1024" , config = None , subject = None , cn = None ) : cmd = [ ] if config : cmd = [ "/usr/bin/openssl" , "req" , "-new" , "-newkey" , "rsa:{}" . format ( keysize ) , "-days" , "365" , "-nodes" , "-x509" , "-keyout" , keyfile , "-out" , certfile , "-config" , config ] elif subject : ssl_subject = "" if "country" in subject : ssl_subject = ssl_subject + "/C={}" . format ( subject [ "country" ] ) if "state" in subject : ssl_subject = ssl_subject + "/ST={}" . format ( subject [ "state" ] ) if "locality" in subject : ssl_subject = ssl_subject + "/L={}" . format ( subject [ "locality" ] ) if "organization" in subject : ssl_subject = ssl_subject + "/O={}" . format ( subject [ "organization" ] ) if "organizational_unit" in subject : ssl_subject = ssl_subject + "/OU={}" . format ( subject [ "organizational_unit" ] ) if "cn" in subject : ssl_subject = ssl_subject + "/CN={}" . format ( subject [ "cn" ] ) else : hookenv . log ( "When using \"subject\" argument you must " "provide \"cn\" field at very least" ) return False if "email" in subject : ssl_subject = ssl_subject + "/emailAddress={}" . format ( subject [ "email" ] ) cmd = [ "/usr/bin/openssl" , "req" , "-new" , "-newkey" , "rsa:{}" . format ( keysize ) , "-days" , "365" , "-nodes" , "-x509" , "-keyout" , keyfile , "-out" , certfile , "-subj" , ssl_subject ] elif cn : cmd = [ "/usr/bin/openssl" , "req" , "-new" , "-newkey" , "rsa:{}" . format ( keysize ) , "-days" , "365" , "-nodes" , "-x509" , "-keyout" , keyfile , "-out" , certfile , "-subj" , "/CN={}" . format ( cn ) ] if not cmd : hookenv . log ( "No config, subject or cn provided," "unable to generate self signed SSL certificates" ) return False try : subprocess . check_call ( cmd ) return True except Exception as e : print ( "Execution of openssl command failed:\n{}" . format ( e ) ) return False
Generate selfsigned SSL keypair
56,679
def ssh_directory_for_unit ( application_name , user = None ) : if user : application_name = "{}_{}" . format ( application_name , user ) _dir = os . path . join ( NOVA_SSH_DIR , application_name ) for d in [ NOVA_SSH_DIR , _dir ] : if not os . path . isdir ( d ) : os . mkdir ( d ) for f in [ 'authorized_keys' , 'known_hosts' ] : f = os . path . join ( _dir , f ) if not os . path . isfile ( f ) : open ( f , 'w' ) . close ( ) return _dir
Return the directory used to store ssh assets for the application .
56,680
def ssh_known_host_key ( host , application_name , user = None ) : cmd = [ 'ssh-keygen' , '-f' , known_hosts ( application_name , user ) , '-H' , '-F' , host ] try : output = subprocess . check_output ( cmd ) except subprocess . CalledProcessError as e : if e . returncode == 1 : output = e . output else : raise output = output . strip ( ) if output : lines = output . split ( '\n' ) if len ( lines ) >= 1 : return lines [ 0 ] return None
Return the first entry in known_hosts for host .
56,681
def remove_known_host ( host , application_name , user = None ) : log ( 'Removing SSH known host entry for compute host at %s' % host ) cmd = [ 'ssh-keygen' , '-f' , known_hosts ( application_name , user ) , '-R' , host ] subprocess . check_call ( cmd )
Remove the entry in known_hosts for host .
56,682
def is_same_key ( key_1 , key_2 ) : k_1 = key_1 . split ( '= ' ) [ 1 ] k_2 = key_2 . split ( '= ' ) [ 1 ] return k_1 == k_2
Extract the key from two host entries and compare them .
56,683
def add_known_host ( host , application_name , user = None ) : cmd = [ 'ssh-keyscan' , '-H' , '-t' , 'rsa' , host ] try : remote_key = subprocess . check_output ( cmd ) . strip ( ) except Exception as e : log ( 'Could not obtain SSH host key from %s' % host , level = ERROR ) raise e current_key = ssh_known_host_key ( host , application_name , user ) if current_key and remote_key : if is_same_key ( remote_key , current_key ) : log ( 'Known host key for compute host %s up to date.' % host ) return else : remove_known_host ( host , application_name , user ) log ( 'Adding SSH host key to known hosts for compute node at %s.' % host ) with open ( known_hosts ( application_name , user ) , 'a' ) as out : out . write ( "{}\n" . format ( remote_key ) )
Add the given host key to the known hosts file .
56,684
def ssh_authorized_key_exists ( public_key , application_name , user = None ) : with open ( authorized_keys ( application_name , user ) ) as keys : return ( '%s' % public_key ) in keys . read ( )
Check if given key is in the authorized_key file .
56,685
def add_authorized_key ( public_key , application_name , user = None ) : with open ( authorized_keys ( application_name , user ) , 'a' ) as keys : keys . write ( "{}\n" . format ( public_key ) )
Add given key to the authorized_key file .
56,686
def ssh_known_hosts_lines ( application_name , user = None ) : known_hosts_list = [ ] with open ( known_hosts ( application_name , user ) ) as hosts : for hosts_line in hosts : if hosts_line . rstrip ( ) : known_hosts_list . append ( hosts_line . rstrip ( ) ) return ( known_hosts_list )
Return contents of known_hosts file for given application .
56,687
def ssh_authorized_keys_lines ( application_name , user = None ) : authorized_keys_list = [ ] with open ( authorized_keys ( application_name , user ) ) as keys : for authkey_line in keys : if authkey_line . rstrip ( ) : authorized_keys_list . append ( authkey_line . rstrip ( ) ) return ( authorized_keys_list )
Return contents of authorized_keys file for given application .
56,688
def ssh_compute_remove ( public_key , application_name , user = None ) : if not ( os . path . isfile ( authorized_keys ( application_name , user ) ) or os . path . isfile ( known_hosts ( application_name , user ) ) ) : return keys = ssh_authorized_keys_lines ( application_name , user = None ) keys = [ k . strip ( ) for k in keys ] if public_key not in keys : return [ keys . remove ( key ) for key in keys if key == public_key ] with open ( authorized_keys ( application_name , user ) , 'w' ) as _keys : keys = '\n' . join ( keys ) if not keys . endswith ( '\n' ) : keys += '\n' _keys . write ( keys )
Remove given public key from authorized_keys file .
56,689
def apt_cache ( in_memory = True , progress = None ) : from apt import apt_pkg apt_pkg . init ( ) if in_memory : apt_pkg . config . set ( "Dir::Cache::pkgcache" , "" ) apt_pkg . config . set ( "Dir::Cache::srcpkgcache" , "" ) return apt_pkg . Cache ( progress )
Build and return an apt cache .
56,690
def apt_mark ( packages , mark , fatal = False ) : log ( "Marking {} as {}" . format ( packages , mark ) ) cmd = [ 'apt-mark' , mark ] if isinstance ( packages , six . string_types ) : cmd . append ( packages ) else : cmd . extend ( packages ) if fatal : subprocess . check_call ( cmd , universal_newlines = True ) else : subprocess . call ( cmd , universal_newlines = True )
Flag one or more packages using apt - mark .
56,691
def import_key ( key ) : key = key . strip ( ) if '-' in key or '\n' in key : log ( "PGP key found (looks like ASCII Armor format)" , level = DEBUG ) if ( '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key ) : log ( "Writing provided PGP key in the binary format" , level = DEBUG ) if six . PY3 : key_bytes = key . encode ( 'utf-8' ) else : key_bytes = key key_name = _get_keyid_by_gpg_key ( key_bytes ) key_gpg = _dearmor_gpg_key ( key_bytes ) _write_apt_gpg_keyfile ( key_name = key_name , key_material = key_gpg ) else : raise GPGKeyError ( "ASCII armor markers missing from GPG key" ) else : log ( "PGP key found (looks like Radix64 format)" , level = WARNING ) log ( "SECURELY importing PGP key from keyserver; " "full key not provided." , level = WARNING ) key_asc = _get_key_by_keyid ( key ) key_gpg = _dearmor_gpg_key ( key_asc ) _write_apt_gpg_keyfile ( key_name = key , key_material = key_gpg )
Import an ASCII Armor key .
56,692
def _dearmor_gpg_key ( key_asc ) : ps = subprocess . Popen ( [ 'gpg' , '--dearmor' ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE , stdin = subprocess . PIPE ) out , err = ps . communicate ( input = key_asc ) if six . PY3 : err = err . decode ( 'utf-8' ) if 'gpg: no valid OpenPGP data found.' in err : raise GPGKeyError ( 'Invalid GPG key material. Check your network setup' ' (MTU, routing, DNS) and/or proxy server settings' ' as well as destination keyserver status.' ) else : return out
Converts a GPG key in the ASCII armor format to the binary format .
56,693
def _write_apt_gpg_keyfile ( key_name , key_material ) : with open ( '/etc/apt/trusted.gpg.d/{}.gpg' . format ( key_name ) , 'wb' ) as keyf : keyf . write ( key_material )
Writes GPG key material into a file at a provided path .
56,694
def _add_apt_repository ( spec ) : if '{series}' in spec : series = get_distrib_codename ( ) spec = spec . replace ( '{series}' , series ) _run_with_retries ( [ 'add-apt-repository' , '--yes' , spec ] , cmd_env = env_proxy_settings ( [ 'https' ] ) )
Add the spec using add_apt_repository
56,695
def _add_cloud_distro_check ( cloud_archive_release , openstack_release ) : _verify_is_ubuntu_rel ( cloud_archive_release , openstack_release ) _add_cloud_pocket ( "{}-{}" . format ( cloud_archive_release , openstack_release ) )
Add the cloud pocket but also check the cloud_archive_release against the current distro and use the openstack_release as the full lookup .
56,696
def _verify_is_ubuntu_rel ( release , os_release ) : ubuntu_rel = get_distrib_codename ( ) if release != ubuntu_rel : raise SourceConfigError ( 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' 'version ({})' . format ( release , os_release , ubuntu_rel ) )
Verify that the release is in the same as the current ubuntu release .
56,697
def _run_with_retries ( cmd , max_retries = CMD_RETRY_COUNT , retry_exitcodes = ( 1 , ) , retry_message = "" , cmd_env = None ) : env = None kwargs = { } if cmd_env : env = os . environ . copy ( ) env . update ( cmd_env ) kwargs [ 'env' ] = env if not retry_message : retry_message = "Failed executing '{}'" . format ( " " . join ( cmd ) ) retry_message += ". Will retry in {} seconds" . format ( CMD_RETRY_DELAY ) retry_count = 0 result = None retry_results = ( None , ) + retry_exitcodes while result in retry_results : try : result = subprocess . check_call ( cmd , ** kwargs ) except subprocess . CalledProcessError as e : retry_count = retry_count + 1 if retry_count > max_retries : raise result = e . returncode log ( retry_message ) time . sleep ( CMD_RETRY_DELAY )
Run a command and retry until success or max_retries is reached .
56,698
def _run_apt_command ( cmd , fatal = False ) : cmd_env = { 'DEBIAN_FRONTEND' : os . environ . get ( 'DEBIAN_FRONTEND' , 'noninteractive' ) } if fatal : _run_with_retries ( cmd , cmd_env = cmd_env , retry_exitcodes = ( 1 , APT_NO_LOCK , ) , retry_message = "Couldn't acquire DPKG lock" ) else : env = os . environ . copy ( ) env . update ( cmd_env ) subprocess . call ( cmd , env = env )
Run an apt command with optional retries .
56,699
def get_upstream_version ( package ) : import apt_pkg cache = apt_cache ( ) try : pkg = cache [ package ] except Exception : return None if not pkg . current_ver : return None return apt_pkg . upstream_version ( pkg . current_ver . ver_str )
Determine upstream version based on installed package