idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
56,800
def config_flags_parser ( config_flags ) : colon = config_flags . find ( ':' ) equals = config_flags . find ( '=' ) if colon > 0 : if colon < equals or equals < 0 : return ordered ( yaml . safe_load ( config_flags ) ) if config_flags . find ( '==' ) >= 0 : juju_log ( "config_flags is not in expected format (key=value)" , level = ERROR ) raise OSContextError post_strippers = ' ,' split = config_flags . strip ( ' =' ) . split ( '=' ) limit = len ( split ) flags = OrderedDict ( ) for i in range ( 0 , limit - 1 ) : current = split [ i ] next = split [ i + 1 ] vindex = next . rfind ( ',' ) if ( i == limit - 2 ) or ( vindex < 0 ) : value = next else : value = next [ : vindex ] if i == 0 : key = current else : index = current . rfind ( ',' ) if index < 0 : juju_log ( "Invalid config value(s) at index %s" % ( i ) , level = ERROR ) raise OSContextError key = current [ index + 1 : ] flags [ key . strip ( post_strippers ) ] = value . rstrip ( post_strippers ) return flags
Parses config flags string into dict .
56,801
def os_application_version_set ( package ) : application_version = get_upstream_version ( package ) if not application_version : application_version_set ( os_release ( package ) ) else : application_version_set ( application_version )
Set version of application for Juju 2 . 0 and later
56,802
def enable_memcache ( source = None , release = None , package = None ) : _release = None if release : _release = release else : _release = os_release ( package , base = 'icehouse' ) if not _release : _release = get_os_codename_install_source ( source ) return CompareOpenStackReleases ( _release ) >= 'mitaka'
Determine if memcache should be enabled on the local unit
56,803
def token_cache_pkgs ( source = None , release = None ) : packages = [ ] if enable_memcache ( source = source , release = release ) : packages . extend ( [ 'memcached' , 'python-memcache' ] ) return packages
Determine additional packages needed for token caching
56,804
def snap_install_requested ( ) : origin = config ( 'openstack-origin' ) or "" if not origin . startswith ( 'snap:' ) : return False _src = origin [ 5 : ] if '/' in _src : channel = _src . split ( '/' ) [ 1 ] else : channel = 'stable' return valid_snap_channel ( channel )
Determine if installing from snaps
56,805
def get_snaps_install_info_from_origin ( snaps , src , mode = 'classic' ) : if not src . startswith ( 'snap:' ) : juju_log ( "Snap source is not a snap origin" , 'WARN' ) return { } _src = src [ 5 : ] channel = '--channel={}' . format ( _src ) return { snap : { 'channel' : channel , 'mode' : mode } for snap in snaps }
Generate a dictionary of snap install information from origin
56,806
def install_os_snaps ( snaps , refresh = False ) : def _ensure_flag ( flag ) : if flag . startswith ( '--' ) : return flag return '--{}' . format ( flag ) if refresh : for snap in snaps . keys ( ) : snap_refresh ( snap , _ensure_flag ( snaps [ snap ] [ 'channel' ] ) , _ensure_flag ( snaps [ snap ] [ 'mode' ] ) ) else : for snap in snaps . keys ( ) : snap_install ( snap , _ensure_flag ( snaps [ snap ] [ 'channel' ] ) , _ensure_flag ( snaps [ snap ] [ 'mode' ] ) )
Install OpenStack snaps from channel and with mode
56,807
def series_upgrade_complete ( resume_unit_helper = None , configs = None ) : clear_unit_paused ( ) clear_unit_upgrading ( ) if configs : configs . write_all ( ) if resume_unit_helper : resume_unit_helper ( configs )
Run common series upgrade complete tasks .
56,808
def get_certificate_request ( json_encode = True ) : req = CertRequest ( json_encode = json_encode ) req . add_hostname_cn ( ) for net_type in [ INTERNAL , ADMIN , PUBLIC ] : net_config = config ( ADDRESS_MAP [ net_type ] [ 'override' ] ) try : net_addr = resolve_address ( endpoint_type = net_type ) ip = network_get_primary_address ( ADDRESS_MAP [ net_type ] [ 'binding' ] ) addresses = [ net_addr , ip ] vip = get_vip_in_network ( resolve_network_cidr ( ip ) ) if vip : addresses . append ( vip ) if net_config : req . add_entry ( net_type , net_config , addresses ) else : req . add_hostname_cn_ip ( addresses ) except NoNetworkBinding : log ( "Skipping request for certificate for ip in {} space, no " "local address found" . format ( net_type ) , WARNING ) return req . get_request ( )
Generate a certificatee requests based on the network confioguration
56,809
def create_ip_cert_links ( ssl_dir , custom_hostname_link = None ) : hostname = get_hostname ( unit_get ( 'private-address' ) ) hostname_cert = os . path . join ( ssl_dir , 'cert_{}' . format ( hostname ) ) hostname_key = os . path . join ( ssl_dir , 'key_{}' . format ( hostname ) ) for net_type in [ INTERNAL , ADMIN , PUBLIC ] : try : addr = resolve_address ( endpoint_type = net_type ) cert = os . path . join ( ssl_dir , 'cert_{}' . format ( addr ) ) key = os . path . join ( ssl_dir , 'key_{}' . format ( addr ) ) if os . path . isfile ( hostname_cert ) and not os . path . isfile ( cert ) : os . symlink ( hostname_cert , cert ) os . symlink ( hostname_key , key ) except NoNetworkBinding : log ( "Skipping creating cert symlink for ip in {} space, no " "local address found" . format ( net_type ) , WARNING ) if custom_hostname_link : custom_cert = os . path . join ( ssl_dir , 'cert_{}' . format ( custom_hostname_link ) ) custom_key = os . path . join ( ssl_dir , 'key_{}' . format ( custom_hostname_link ) ) if os . path . isfile ( hostname_cert ) and not os . path . isfile ( custom_cert ) : os . symlink ( hostname_cert , custom_cert ) os . symlink ( hostname_key , custom_key )
Create symlinks for SAN records
56,810
def install_certs ( ssl_dir , certs , chain = None , user = 'root' , group = 'root' ) : for cn , bundle in certs . items ( ) : cert_filename = 'cert_{}' . format ( cn ) key_filename = 'key_{}' . format ( cn ) cert_data = bundle [ 'cert' ] if chain : cert_data = cert_data + os . linesep + chain write_file ( path = os . path . join ( ssl_dir , cert_filename ) , owner = user , group = group , content = cert_data , perms = 0o640 ) write_file ( path = os . path . join ( ssl_dir , key_filename ) , owner = user , group = group , content = bundle [ 'key' ] , perms = 0o640 )
Install the certs passed into the ssl dir and append the chain if provided .
56,811
def process_certificates ( service_name , relation_id , unit , custom_hostname_link = None , user = 'root' , group = 'root' ) : data = relation_get ( rid = relation_id , unit = unit ) ssl_dir = os . path . join ( '/etc/apache2/ssl/' , service_name ) mkdir ( path = ssl_dir ) name = local_unit ( ) . replace ( '/' , '_' ) certs = data . get ( '{}.processed_requests' . format ( name ) ) chain = data . get ( 'chain' ) ca = data . get ( 'ca' ) if certs : certs = json . loads ( certs ) install_ca_cert ( ca . encode ( ) ) install_certs ( ssl_dir , certs , chain , user = user , group = group ) create_ip_cert_links ( ssl_dir , custom_hostname_link = custom_hostname_link ) return True return False
Process the certificates supplied down the relation
56,812
def get_requests_for_local_unit ( relation_name = None ) : local_name = local_unit ( ) . replace ( '/' , '_' ) raw_certs_key = '{}.processed_requests' . format ( local_name ) relation_name = relation_name or 'certificates' bundles = [ ] for rid in relation_ids ( relation_name ) : for unit in related_units ( rid ) : data = relation_get ( rid = rid , unit = unit ) if data . get ( raw_certs_key ) : bundles . append ( { 'ca' : data [ 'ca' ] , 'chain' : data . get ( 'chain' ) , 'certs' : json . loads ( data [ raw_certs_key ] ) } ) return bundles
Extract any certificates data targeted at this unit down relation_name .
56,813
def get_bundle_for_cn ( cn , relation_name = None ) : entries = get_requests_for_local_unit ( relation_name ) cert_bundle = { } for entry in entries : for _cn , bundle in entry [ 'certs' ] . items ( ) : if _cn == cn : cert_bundle = { 'cert' : bundle [ 'cert' ] , 'key' : bundle [ 'key' ] , 'chain' : entry [ 'chain' ] , 'ca' : entry [ 'ca' ] } break if cert_bundle : break return cert_bundle
Extract certificates for the given cn .
56,814
def add_entry ( self , net_type , cn , addresses ) : self . entries . append ( { 'cn' : cn , 'addresses' : addresses } )
Add a request to the batch
56,815
def add_hostname_cn ( self ) : ip = unit_get ( 'private-address' ) addresses = [ ip ] vip = get_vip_in_network ( resolve_network_cidr ( ip ) ) if vip : addresses . append ( vip ) self . hostname_entry = { 'cn' : get_hostname ( ip ) , 'addresses' : addresses }
Add a request for the hostname of the machine
56,816
def add_hostname_cn_ip ( self , addresses ) : for addr in addresses : if addr not in self . hostname_entry [ 'addresses' ] : self . hostname_entry [ 'addresses' ] . append ( addr )
Add an address to the SAN list for the hostname request
56,817
def get_request ( self ) : if self . hostname_entry : self . entries . append ( self . hostname_entry ) request = { } for entry in self . entries : sans = sorted ( list ( set ( entry [ 'addresses' ] ) ) ) request [ entry [ 'cn' ] ] = { 'sans' : sans } if self . json_encode : return { 'cert_requests' : json . dumps ( request , sort_keys = True ) } else : return { 'cert_requests' : request }
Generate request from the batched up entries
56,818
def get_audits ( ) : audits = [ ] settings = utils . get_settings ( 'os' ) audits . append ( SysctlConf ( ) ) audits . append ( FilePermissionAudit ( '/etc/sysctl.conf' , user = 'root' , group = 'root' , mode = 0o0440 ) ) if not settings [ 'security' ] [ 'kernel_enable_module_loading' ] : audits . append ( ModulesTemplate ( ) ) return audits
Get OS hardening sysctl audits .
56,819
def _stat ( file ) : out = subprocess . check_output ( [ 'stat' , '-c' , '%U %G %a' , file ] ) . decode ( 'utf-8' ) return Ownership ( * out . strip ( ) . split ( ' ' ) )
Get the Ownership information from a file .
56,820
def _config_ini ( path ) : conf = configparser . ConfigParser ( ) conf . read ( path ) return dict ( conf )
Parse an ini file
56,821
def _validate_file_mode ( mode , file_name , optional = False ) : try : ownership = _stat ( file_name ) except subprocess . CalledProcessError as e : print ( "Error reading file: {}" . format ( e ) ) if not optional : assert False , "Specified file does not exist: {}" . format ( file_name ) assert mode == ownership . mode , "{} has an incorrect mode: {} should be {}" . format ( file_name , ownership . mode , mode ) print ( "Validate mode of {}: PASS" . format ( file_name ) )
Validate that a specified file has the specified permissions .
56,822
def _config_section ( config , section ) : path = os . path . join ( config . get ( 'config_path' ) , config . get ( 'config_file' ) ) conf = _config_ini ( path ) return conf . get ( section )
Read the configuration file and return a section .
56,823
def validate_file_permissions ( config ) : files = config . get ( 'files' , { } ) for file_name , options in files . items ( ) : for key in options . keys ( ) : if key not in [ "owner" , "group" , "mode" ] : raise RuntimeError ( "Invalid ownership configuration: {}" . format ( key ) ) mode = options . get ( 'mode' , config . get ( 'permissions' , '600' ) ) optional = options . get ( 'optional' , config . get ( 'optional' , 'False' ) ) if '*' in file_name : for file in glob . glob ( file_name ) : if file not in files . keys ( ) : if os . path . isfile ( file ) : _validate_file_mode ( mode , file , optional ) else : if os . path . isfile ( file_name ) : _validate_file_mode ( mode , file_name , optional )
Verify that permissions on configuration files are secure enough .
56,824
def validate_uses_tls_for_keystone ( audit_options ) : section = _config_section ( audit_options , 'keystone_authtoken' ) assert section is not None , "Missing section 'keystone_authtoken'" assert not section . get ( 'insecure' ) and "https://" in section . get ( "auth_uri" ) , "TLS is not used for Keystone"
Verify that TLS is used to communicate with Keystone .
56,825
def validate_uses_tls_for_glance ( audit_options ) : section = _config_section ( audit_options , 'glance' ) assert section is not None , "Missing section 'glance'" assert not section . get ( 'insecure' ) and "https://" in section . get ( "api_servers" ) , "TLS is not used for Glance"
Verify that TLS is used to communicate with Glance .
56,826
def is_ready ( self ) : ready = len ( self . get ( self . name , [ ] ) ) > 0 if not ready : hookenv . log ( 'Incomplete relation: {}' . format ( self . __class__ . __name__ ) , hookenv . DEBUG ) return ready
Returns True if all of the required_keys are available from any units .
56,827
def _is_ready ( self , unit_data ) : return set ( unit_data . keys ( ) ) . issuperset ( set ( self . required_keys ) )
Helper method that tests a set of relation data and returns True if all of the required_keys are present .
56,828
def service_restart ( service_name ) : if host . service_available ( service_name ) : if host . service_running ( service_name ) : host . service_restart ( service_name ) else : host . service_start ( service_name )
Wrapper around host . service_restart to prevent spurious unknown service messages in the logs .
56,829
def manage ( self ) : hookenv . _run_atstart ( ) try : hook_name = hookenv . hook_name ( ) if hook_name == 'stop' : self . stop_services ( ) else : self . reconfigure_services ( ) self . provide_data ( ) except SystemExit as x : if x . code is None or x . code == 0 : hookenv . _run_atexit ( ) hookenv . _run_atexit ( )
Handle the current hook by doing The Right Thing with the registered services .
56,830
def provide_data ( self ) : for service_name , service in self . services . items ( ) : service_ready = self . is_ready ( service_name ) for provider in service . get ( 'provided_data' , [ ] ) : for relid in hookenv . relation_ids ( provider . name ) : units = hookenv . related_units ( relid ) if not units : continue remote_service = units [ 0 ] . split ( '/' ) [ 0 ] argspec = getargspec ( provider . provide_data ) if len ( argspec . args ) > 1 : data = provider . provide_data ( remote_service , service_ready ) else : data = provider . provide_data ( ) if data : hookenv . relation_set ( relid , data )
Set the relation data for each provider in the provided_data list .
56,831
def reconfigure_services ( self , * service_names ) : for service_name in service_names or self . services . keys ( ) : if self . is_ready ( service_name ) : self . fire_event ( 'data_ready' , service_name ) self . fire_event ( 'start' , service_name , default = [ service_restart , manage_ports ] ) self . save_ready ( service_name ) else : if self . was_ready ( service_name ) : self . fire_event ( 'data_lost' , service_name ) self . fire_event ( 'stop' , service_name , default = [ manage_ports , service_stop ] ) self . save_lost ( service_name )
Update all files for one or more registered services and if ready optionally restart them .
56,832
def stop_services ( self , * service_names ) : for service_name in service_names or self . services . keys ( ) : self . fire_event ( 'stop' , service_name , default = [ manage_ports , service_stop ] )
Stop one or more registered services by name .
56,833
def get_service ( self , service_name ) : service = self . services . get ( service_name ) if not service : raise KeyError ( 'Service not registered: %s' % service_name ) return service
Given the name of a registered service return its service definition .
56,834
def fire_event ( self , event_name , service_name , default = None ) : service = self . get_service ( service_name ) callbacks = service . get ( event_name , default ) if not callbacks : return if not isinstance ( callbacks , Iterable ) : callbacks = [ callbacks ] for callback in callbacks : if isinstance ( callback , ManagerCallback ) : callback ( self , service_name , event_name ) else : callback ( service_name )
Fire a data_ready data_lost start or stop event on a given service .
56,835
def is_ready ( self , service_name ) : service = self . get_service ( service_name ) reqs = service . get ( 'required_data' , [ ] ) return all ( bool ( req ) for req in reqs )
Determine if a registered service is ready by checking its required_data .
56,836
def save_ready ( self , service_name ) : self . _load_ready_file ( ) self . _ready . add ( service_name ) self . _save_ready_file ( )
Save an indicator that the given service is now data_ready .
56,837
def save_lost ( self , service_name ) : self . _load_ready_file ( ) self . _ready . discard ( service_name ) self . _save_ready_file ( )
Save an indicator that the given service is no longer data_ready .
56,838
def is_enabled ( ) : output = subprocess . check_output ( [ 'ufw' , 'status' ] , universal_newlines = True , env = { 'LANG' : 'en_US' , 'PATH' : os . environ [ 'PATH' ] } ) m = re . findall ( r'^Status: active\n' , output , re . M ) return len ( m ) >= 1
Check if ufw is enabled
56,839
def is_ipv6_ok ( soft_fail = False ) : if os . path . isdir ( '/proc/sys/net/ipv6' ) : if not is_module_loaded ( 'ip6_tables' ) : try : modprobe ( 'ip6_tables' ) return True except subprocess . CalledProcessError as ex : hookenv . log ( "Couldn't load ip6_tables module: %s" % ex . output , level = "WARN" ) if soft_fail : return False else : raise UFWIPv6Error ( "IPv6 firewall support broken" ) else : return True else : return False
Check if IPv6 support is present and ip6tables functional
56,840
def default_policy ( policy = 'deny' , direction = 'incoming' ) : if policy not in [ 'allow' , 'deny' , 'reject' ] : raise UFWError ( ( 'Unknown policy %s, valid values: ' 'allow, deny, reject' ) % policy ) if direction not in [ 'incoming' , 'outgoing' , 'routed' ] : raise UFWError ( ( 'Unknown direction %s, valid values: ' 'incoming, outgoing, routed' ) % direction ) output = subprocess . check_output ( [ 'ufw' , 'default' , policy , direction ] , universal_newlines = True , env = { 'LANG' : 'en_US' , 'PATH' : os . environ [ 'PATH' ] } ) hookenv . log ( output , level = 'DEBUG' ) m = re . findall ( "^Default %s policy changed to '%s'\n" % ( direction , policy ) , output , re . M ) if len ( m ) == 0 : hookenv . log ( "ufw couldn't change the default policy to %s for %s" % ( policy , direction ) , level = 'WARN' ) return False else : hookenv . log ( "ufw default policy for %s changed to %s" % ( direction , policy ) , level = 'INFO' ) return True
Changes the default policy for traffic direction
56,841
def revoke_access ( src , dst = 'any' , port = None , proto = None ) : return modify_access ( src , dst = dst , port = port , proto = proto , action = 'delete' )
Revoke access to an address or subnet
56,842
def rmmod ( module , force = False ) : cmd = [ 'rmmod' ] if force : cmd . append ( '-f' ) cmd . append ( module ) log ( 'Removing kernel module %s' % module , level = INFO ) return subprocess . check_call ( cmd )
Remove a module from the linux kernel
56,843
def is_module_loaded ( module ) : matches = re . findall ( '^%s[ ]+' % module , lsmod ( ) , re . M ) return len ( matches ) > 0
Checks if a kernel module is already loaded
56,844
def get_config ( ) : volume_config = { } config = hookenv . config ( ) errors = False if config . get ( 'volume-ephemeral' ) in ( True , 'True' , 'true' , 'Yes' , 'yes' ) : volume_config [ 'ephemeral' ] = True else : volume_config [ 'ephemeral' ] = False try : volume_map = yaml . safe_load ( config . get ( 'volume-map' , '{}' ) ) except yaml . YAMLError as e : hookenv . log ( "Error parsing YAML volume-map: {}" . format ( e ) , hookenv . ERROR ) errors = True if volume_map is None : volume_map = { } elif not isinstance ( volume_map , dict ) : hookenv . log ( "Volume-map should be a dictionary, not {}" . format ( type ( volume_map ) ) ) errors = True volume_config [ 'device' ] = volume_map . get ( os . environ [ 'JUJU_UNIT_NAME' ] ) if volume_config [ 'device' ] and volume_config [ 'ephemeral' ] : hookenv . log ( 'A volume is defined for this unit, but ephemeral ' 'storage was requested' , hookenv . ERROR ) errors = True elif not volume_config [ 'device' ] and not volume_config [ 'ephemeral' ] : hookenv . log ( 'Ephemeral storage was requested, but there is no volume ' 'defined for this unit.' , hookenv . ERROR ) errors = True unit_mount_name = hookenv . local_unit ( ) . replace ( '/' , '-' ) volume_config [ 'mountpoint' ] = os . path . join ( MOUNT_BASE , unit_mount_name ) if errors : return None return volume_config
Gather and sanity - check volume configuration data
56,845
def get_audits ( ) : audits = [ ] settings = utils . get_settings ( 'os' ) audits . append ( DirectoryPermissionAudit ( '/etc/security/limits.d' , user = 'root' , group = 'root' , mode = 0o755 ) ) if not settings [ 'security' ] [ 'kernel_enable_core_dump' ] : audits . append ( TemplatedFile ( '/etc/security/limits.d/10.hardcore.conf' , SecurityLimitsContext ( ) , template_dir = TEMPLATES_DIR , user = 'root' , group = 'root' , mode = 0o0440 ) ) return audits
Get OS hardening security limits audits .
56,846
def _take_action ( self ) : if self . unless is None : return True if hasattr ( self . unless , '__call__' ) : return not self . unless ( ) return not self . unless
Determines whether to perform the action or not .
56,847
def install_alternative ( name , target , source , priority = 50 ) : if ( os . path . exists ( target ) and not os . path . islink ( target ) ) : shutil . move ( target , '{}.bak' . format ( target ) ) cmd = [ 'update-alternatives' , '--force' , '--install' , target , name , source , str ( priority ) ] subprocess . check_call ( cmd )
Install alternative configuration
56,848
def ensure_packages ( packages ) : required = filter_installed_packages ( packages ) if required : apt_install ( required , fatal = True )
Install but do not upgrade required plugin packages .
56,849
def _calculate_workers ( ) : multiplier = config ( 'worker-multiplier' ) or DEFAULT_MULTIPLIER count = int ( _num_cpus ( ) * multiplier ) if multiplier > 0 and count == 0 : count = 1 if config ( 'worker-multiplier' ) is None and is_container ( ) : count = min ( count , MAX_DEFAULT_WORKERS ) return count
Determine the number of worker processes based on the CPU count of the unit containing the application .
56,850
def get_related ( self ) : self . related = False try : for interface in self . interfaces : if relation_ids ( interface ) : self . related = True return self . related except AttributeError as e : log ( "{} {}" "" . format ( self , e ) , 'INFO' ) return self . related
Check if any of the context interfaces have relation ids . Set self . related and return True if one of the interfaces has relation ids .
56,851
def canonical_names ( self ) : cns = [ ] for r_id in relation_ids ( 'identity-service' ) : for unit in related_units ( r_id ) : rdata = relation_get ( rid = r_id , unit = unit ) for k in rdata : if k . startswith ( 'ssl_key_' ) : cns . append ( k . lstrip ( 'ssl_key_' ) ) return sorted ( list ( set ( cns ) ) )
Figure out which canonical names clients will access this service .
56,852
def _determine_ctxt ( self ) : rel = os_release ( self . pkg , base = 'icehouse' ) version = '2' if CompareOpenStackReleases ( rel ) >= 'pike' : version = '3' service_type = 'volumev{version}' . format ( version = version ) service_name = 'cinderv{version}' . format ( version = version ) endpoint_type = 'publicURL' if config ( 'use-internal-endpoints' ) : endpoint_type = 'internalURL' catalog_info = '{type}:{name}:{endpoint}' . format ( type = service_type , name = service_name , endpoint = endpoint_type ) return { 'volume_api_version' : version , 'volume_catalog_info' : catalog_info , }
Determines the Volume API endpoint information .
56,853
def _determine_ctxt ( self ) : if config ( 'aa-profile-mode' ) in [ 'disable' , 'enforce' , 'complain' ] : ctxt = { 'aa_profile_mode' : config ( 'aa-profile-mode' ) , 'ubuntu_release' : lsb_release ( ) [ 'DISTRIB_RELEASE' ] } if self . aa_profile : ctxt [ 'aa_profile' ] = self . aa_profile else : ctxt = None return ctxt
Validate aa - profile - mode settings is disable enforce or complain .
56,854
def manually_disable_aa_profile ( self ) : profile_path = '/etc/apparmor.d' disable_path = '/etc/apparmor.d/disable' if not os . path . lexists ( os . path . join ( disable_path , self . aa_profile ) ) : os . symlink ( os . path . join ( profile_path , self . aa_profile ) , os . path . join ( disable_path , self . aa_profile ) )
Manually disable an apparmor profile .
56,855
def setup_aa_profile ( self ) : self ( ) if not self . ctxt : log ( "Not enabling apparmor Profile" ) return self . install_aa_utils ( ) cmd = [ 'aa-{}' . format ( self . ctxt [ 'aa_profile_mode' ] ) ] cmd . append ( self . ctxt [ 'aa_profile' ] ) log ( "Setting up the apparmor profile for {} in {} mode." "" . format ( self . ctxt [ 'aa_profile' ] , self . ctxt [ 'aa_profile_mode' ] ) ) try : check_call ( cmd ) except CalledProcessError as e : if self . ctxt [ 'aa_profile_mode' ] == 'disable' : log ( "Manually disabling the apparmor profile for {}." "" . format ( self . ctxt [ 'aa_profile' ] ) ) self . manually_disable_aa_profile ( ) return status_set ( 'blocked' , "Apparmor profile {} failed to be set to {}." "" . format ( self . ctxt [ 'aa_profile' ] , self . ctxt [ 'aa_profile_mode' ] ) ) raise e
Setup an apparmor profile . The ctxt dictionary will contain the apparmor profile mode and the apparmor profile name . Makes calls out to aa - disable aa - complain or aa - enforce to setup the apparmor profile .
56,856
def execd_module_paths ( execd_dir = None ) : if not execd_dir : execd_dir = default_execd_dir ( ) if not os . path . exists ( execd_dir ) : return for subpath in os . listdir ( execd_dir ) : module = os . path . join ( execd_dir , subpath ) if os . path . isdir ( module ) : yield module
Generate a list of full paths to modules within execd_dir .
56,857
def execd_submodule_paths ( command , execd_dir = None ) : for module_path in execd_module_paths ( execd_dir ) : path = os . path . join ( module_path , command ) if os . access ( path , os . X_OK ) and os . path . isfile ( path ) : yield path
Generate a list of full paths to the specified command within exec_dir .
56,858
def execd_run ( command , execd_dir = None , die_on_error = True , stderr = subprocess . STDOUT ) : for submodule_path in execd_submodule_paths ( command , execd_dir ) : try : subprocess . check_output ( submodule_path , stderr = stderr , universal_newlines = True ) except subprocess . CalledProcessError as e : hookenv . log ( "Error ({}) running {}. Output: {}" . format ( e . returncode , e . cmd , e . output ) ) if die_on_error : sys . exit ( e . returncode )
Run command for each module within execd_dir which defines it .
56,859
def getrange ( self , key_prefix , strip = False ) : self . cursor . execute ( "select key, data from kv where key like ?" , [ '%s%%' % key_prefix ] ) result = self . cursor . fetchall ( ) if not result : return { } if not strip : key_prefix = '' return dict ( [ ( k [ len ( key_prefix ) : ] , json . loads ( v ) ) for k , v in result ] )
Get a range of keys starting with a common prefix as a mapping of keys to values .
56,860
def update ( self , mapping , prefix = "" ) : for k , v in mapping . items ( ) : self . set ( "%s%s" % ( prefix , k ) , v )
Set the values of multiple keys at once .
56,861
def unset ( self , key ) : self . cursor . execute ( 'delete from kv where key=?' , [ key ] ) if self . revision and self . cursor . rowcount : self . cursor . execute ( 'insert into kv_revisions values (?, ?, ?)' , [ key , self . revision , json . dumps ( 'DELETED' ) ] )
Remove a key from the database entirely .
56,862
def unsetrange ( self , keys = None , prefix = "" ) : if keys is not None : keys = [ '%s%s' % ( prefix , key ) for key in keys ] self . cursor . execute ( 'delete from kv where key in (%s)' % ',' . join ( [ '?' ] * len ( keys ) ) , keys ) if self . revision and self . cursor . rowcount : self . cursor . execute ( 'insert into kv_revisions values %s' % ',' . join ( [ '(?, ?, ?)' ] * len ( keys ) ) , list ( itertools . chain . from_iterable ( ( key , self . revision , json . dumps ( 'DELETED' ) ) for key in keys ) ) ) else : self . cursor . execute ( 'delete from kv where key like ?' , [ '%s%%' % prefix ] ) if self . revision and self . cursor . rowcount : self . cursor . execute ( 'insert into kv_revisions values (?, ?, ?)' , [ '%s%%' % prefix , self . revision , json . dumps ( 'DELETED' ) ] )
Remove a range of keys starting with a common prefix from the database entirely .
56,863
def delta ( self , mapping , prefix ) : previous = self . getrange ( prefix , strip = True ) if not previous : pk = set ( ) else : pk = set ( previous . keys ( ) ) ck = set ( mapping . keys ( ) ) delta = DeltaSet ( ) for k in ck . difference ( pk ) : delta [ k ] = Delta ( None , mapping [ k ] ) for k in pk . difference ( ck ) : delta [ k ] = Delta ( previous [ k ] , None ) for k in pk . intersection ( ck ) : c = mapping [ k ] p = previous [ k ] if c != p : delta [ k ] = Delta ( p , c ) return delta
return a delta containing values that have changed .
56,864
def hook_scope ( self , name = "" ) : assert not self . revision self . cursor . execute ( 'insert into hooks (hook, date) values (?, ?)' , ( name or sys . argv [ 0 ] , datetime . datetime . utcnow ( ) . isoformat ( ) ) ) self . revision = self . cursor . lastrowid try : yield self . revision self . revision = None except Exception : self . flush ( False ) self . revision = None raise else : self . flush ( )
Scope all future interactions to the current hook execution revision .
56,865
def add_bridge ( name , datapath_type = None ) : log ( 'Creating bridge {}' . format ( name ) ) cmd = [ "ovs-vsctl" , "--" , "--may-exist" , "add-br" , name ] if datapath_type is not None : cmd += [ '--' , 'set' , 'bridge' , name , 'datapath_type={}' . format ( datapath_type ) ] subprocess . check_call ( cmd )
Add the named bridge to openvswitch
56,866
def add_bridge_port ( name , port , promisc = False ) : log ( 'Adding port {} to bridge {}' . format ( port , name ) ) subprocess . check_call ( [ "ovs-vsctl" , "--" , "--may-exist" , "add-port" , name , port ] ) subprocess . check_call ( [ "ip" , "link" , "set" , port , "up" ] ) if promisc : subprocess . check_call ( [ "ip" , "link" , "set" , port , "promisc" , "on" ] ) else : subprocess . check_call ( [ "ip" , "link" , "set" , port , "promisc" , "off" ] )
Add a port to the named openvswitch bridge
56,867
def del_bridge_port ( name , port ) : log ( 'Deleting port {} from bridge {}' . format ( port , name ) ) subprocess . check_call ( [ "ovs-vsctl" , "--" , "--if-exists" , "del-port" , name , port ] ) subprocess . check_call ( [ "ip" , "link" , "set" , port , "down" ] ) subprocess . check_call ( [ "ip" , "link" , "set" , port , "promisc" , "off" ] )
Delete a port from the named openvswitch bridge
56,868
def get_certificate ( ) : if os . path . exists ( CERT_PATH ) : log ( 'Reading ovs certificate from {}' . format ( CERT_PATH ) ) with open ( CERT_PATH , 'r' ) as cert : full_cert = cert . read ( ) begin_marker = "-----BEGIN CERTIFICATE-----" end_marker = "-----END CERTIFICATE-----" begin_index = full_cert . find ( begin_marker ) end_index = full_cert . rfind ( end_marker ) if end_index == - 1 or begin_index == - 1 : raise RuntimeError ( "Certificate does not contain valid begin" " and end markers." ) full_cert = full_cert [ begin_index : ( end_index + len ( end_marker ) ) ] return full_cert else : log ( 'Certificate not found' , level = WARNING ) return None
Read openvswitch certificate from disk
56,869
def check_for_eni_source ( ) : with open ( '/etc/network/interfaces' , 'r' ) as eni : for line in eni : if line == 'source /etc/network/interfaces.d/*' : return with open ( '/etc/network/interfaces' , 'a' ) as eni : eni . write ( '\nsource /etc/network/interfaces.d/*' )
Juju removes the source line when setting up interfaces replace if missing
56,870
def delete_package ( self , cache , pkg ) : if self . is_virtual_package ( pkg ) : log ( "Package '%s' appears to be virtual - purging provides" % pkg . name , level = DEBUG ) for _p in pkg . provides_list : self . delete_package ( cache , _p [ 2 ] . parent_pkg ) elif not pkg . current_ver : log ( "Package '%s' not installed" % pkg . name , level = DEBUG ) return else : log ( "Purging package '%s'" % pkg . name , level = DEBUG ) apt_purge ( pkg . name )
Deletes the package from the system .
56,871
def _get_ip_address ( self , request ) : ipaddr = request . META . get ( "HTTP_X_FORWARDED_FOR" , None ) if ipaddr : return ipaddr . split ( "," ) [ 0 ] . strip ( ) return request . META . get ( "REMOTE_ADDR" , "" )
Get the remote ip address the request was generated from .
56,872
def _get_view_name ( self , request ) : method = request . method . lower ( ) try : attributes = getattr ( self , method ) view_name = type ( attributes . __self__ ) . __module__ + '.' + type ( attributes . __self__ ) . __name__ return view_name except AttributeError : return None
Get view name .
56,873
def _get_view_method ( self , request ) : if hasattr ( self , 'action' ) : return self . action if self . action else None return request . method . lower ( )
Get view method .
56,874
def _get_response_ms ( self ) : response_timedelta = now ( ) - self . log [ 'requested_at' ] response_ms = int ( response_timedelta . total_seconds ( ) * 1000 ) return max ( response_ms , 0 )
Get the duration of the request response cycle is milliseconds . In case of negative duration 0 is returned .
56,875
def should_log ( self , request , response ) : return self . logging_methods == '__all__' or request . method in self . logging_methods
Method that should return a value that evaluated to True if the request should be logged . By default check if the request method is in logging_methods .
56,876
def merge_dicts ( dicts , op = operator . add ) : a = None for b in dicts : if a is None : a = b . copy ( ) else : a = dict ( a . items ( ) + b . items ( ) + [ ( k , op ( a [ k ] , b [ k ] ) ) for k in set ( b ) & set ( a ) ] ) return a
Merge a list of dictionaries .
56,877
def getLayerIndex ( url ) : urlInfo = None urlSplit = None inx = None try : urlInfo = urlparse . urlparse ( url ) urlSplit = str ( urlInfo . path ) . split ( '/' ) inx = urlSplit [ len ( urlSplit ) - 1 ] if is_number ( inx ) : return int ( inx ) except : return 0 finally : urlInfo = None urlSplit = None del urlInfo del urlSplit gc . collect ( )
Extract the layer index from a url .
56,878
def getLayerName ( url ) : urlInfo = None urlSplit = None try : urlInfo = urlparse . urlparse ( url ) urlSplit = str ( urlInfo . path ) . split ( '/' ) name = urlSplit [ len ( urlSplit ) - 3 ] return name except : return url finally : urlInfo = None urlSplit = None del urlInfo del urlSplit gc . collect ( )
Extract the layer name from a url .
56,879
def random_string_generator ( size = 6 , chars = string . ascii_uppercase ) : try : return '' . join ( random . choice ( chars ) for _ in range ( size ) ) except : line , filename , synerror = trace ( ) raise ArcRestHelperError ( { "function" : "random_string_generator" , "line" : line , "filename" : filename , "synerror" : synerror , } ) finally : pass
Generates a random string from a set of characters .
56,880
def random_int_generator ( maxrange ) : try : return random . randint ( 0 , maxrange ) except : line , filename , synerror = trace ( ) raise ArcRestHelperError ( { "function" : "random_int_generator" , "line" : line , "filename" : filename , "synerror" : synerror , } ) finally : pass
Generates a random integer from 0 to maxrange inclusive .
56,881
def local_time_to_online ( dt = None ) : is_dst = None utc_offset = None try : if dt is None : dt = datetime . datetime . now ( ) is_dst = time . daylight > 0 and time . localtime ( ) . tm_isdst > 0 utc_offset = ( time . altzone if is_dst else time . timezone ) return ( time . mktime ( dt . timetuple ( ) ) * 1000 ) + ( utc_offset * 1000 ) except : line , filename , synerror = trace ( ) raise ArcRestHelperError ( { "function" : "local_time_to_online" , "line" : line , "filename" : filename , "synerror" : synerror , } ) finally : is_dst = None utc_offset = None del is_dst del utc_offset
Converts datetime object to a UTC timestamp for AGOL .
56,882
def is_number ( s ) : try : float ( s ) return True except ValueError : pass try : import unicodedata unicodedata . numeric ( s ) return True except ( TypeError , ValueError ) : pass return False
Determines if the input is numeric
56,883
def init_config_json ( config_file ) : json_data = None try : if os . path . exists ( config_file ) : with open ( config_file ) as json_file : json_data = json . load ( json_file ) return unicode_convert ( json_data ) else : return None except : line , filename , synerror = trace ( ) raise ArcRestHelperError ( { "function" : "init_config_json" , "line" : line , "filename" : filename , "synerror" : synerror , } ) finally : json_data = None del json_data gc . collect ( )
Deserializes a JSON configuration file .
56,884
def write_config_json ( config_file , data ) : outfile = None try : with open ( config_file , 'w' ) as outfile : json . dump ( data , outfile ) except : line , filename , synerror = trace ( ) raise ArcRestHelperError ( { "function" : "init_config_json" , "line" : line , "filename" : filename , "synerror" : synerror , } ) finally : outfile = None del outfile gc . collect ( )
Serializes an object to disk .
56,885
def unicode_convert ( obj ) : try : if isinstance ( obj , dict ) : return { unicode_convert ( key ) : unicode_convert ( value ) for key , value in obj . items ( ) } elif isinstance ( obj , list ) : return [ unicode_convert ( element ) for element in obj ] elif isinstance ( obj , str ) : return obj elif isinstance ( obj , six . text_type ) : return obj . encode ( 'utf-8' ) elif isinstance ( obj , six . integer_types ) : return obj else : return obj except : return obj
Converts unicode objects to anscii .
56,886
def find_replace ( obj , find , replace ) : try : if isinstance ( obj , dict ) : return { find_replace ( key , find , replace ) : find_replace ( value , find , replace ) for key , value in obj . items ( ) } elif isinstance ( obj , list ) : return [ find_replace ( element , find , replace ) for element in obj ] elif obj == find : return unicode_convert ( replace ) else : try : return unicode_convert ( find_replace_string ( obj , find , replace ) ) except : return unicode_convert ( obj ) except : line , filename , synerror = trace ( ) raise ArcRestHelperError ( { "function" : "find_replace" , "line" : line , "filename" : filename , "synerror" : synerror , } ) finally : pass
Searches an object and performs a find and replace .
56,887
def _tostr ( self , obj ) : if not obj : return '' if isinstance ( obj , list ) : return ', ' . join ( map ( self . _tostr , obj ) ) return str ( obj )
converts a object to list if object is a list it creates a comma seperated string .
56,888
def _unzip_file ( self , zip_file , out_folder ) : try : zf = zipfile . ZipFile ( zip_file , 'r' ) zf . extractall ( path = out_folder ) zf . close ( ) del zf return True except : return False
unzips a file to a given folder
56,889
def _list_files ( self , path ) : files = [ ] for f in glob . glob ( pathname = path ) : files . append ( f ) files . sort ( ) return files
lists files in a given directory
56,890
def _get_content_type ( self , filename ) : mntype = mimetypes . guess_type ( filename ) [ 0 ] filename , fileExtension = os . path . splitext ( filename ) if mntype is None and fileExtension . lower ( ) == ".csv" : mntype = "text/csv" elif mntype is None and fileExtension . lower ( ) == ".sd" : mntype = "File/sd" elif mntype is None : mntype = "File/%s" % fileExtension . replace ( '.' , '' ) return mntype
gets the content type of a file
56,891
def value ( self ) : if self . _outline is None : return { "type" : "esriSMS" , "style" : self . _style , "color" : self . _color . value , "size" : self . _size , "angle" : self . _angle , "xoffset" : self . _xoffset , "yoffset" : self . _yoffset } else : return { "type" : "esriSMS" , "style" : self . _style , "color" : self . _color . value , "size" : self . _size , "angle" : self . _angle , "xoffset" : self . _xoffset , "yoffset" : self . _yoffset , "outline" : { "width" : self . _outline [ 'width' ] , "color" : self . _color . value } }
returns the object as dictionary
56,892
def unfederate ( self , serverId ) : url = self . _url + "/servers/{serverid}/unfederate" . format ( serverid = serverId ) params = { "f" : "json" } return self . _get ( url = url , param_dict = params , proxy_port = self . _proxy_port , proxy_url = self . _proxy_ur )
This operation unfederates an ArcGIS Server from Portal for ArcGIS
56,893
def validateAllServers ( self ) : url = self . _url + "/servers/validate" params = { "f" : "json" } return self . _get ( url = url , param_dict = params , proxy_port = self . _proxy_port , proxy_url = self . _proxy_ur )
This operation provides status information about a specific ArcGIS Server federated with Portal for ArcGIS .
56,894
def editLogSettings ( self , logLocation , logLevel = "WARNING" , maxLogFileAge = 90 ) : url = self . _url + "/settings/edit" params = { "f" : "json" , "logDir" : logLocation , "logLevel" : logLevel , "maxLogFileAge" : maxLogFileAge } return self . _post ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
edits the log settings for the portal site
56,895
def query ( self , logLevel = "WARNING" , source = "ALL" , startTime = None , endTime = None , logCodes = None , users = None , messageCount = 1000 ) : url = self . _url + "/query" filter_value = { "codes" : [ ] , "users" : [ ] , "source" : "*" } if source . lower ( ) == "all" : filter_value [ 'source' ] = "*" else : filter_value [ 'source' ] = [ source ] params = { "f" : "json" , "level" : logLevel } if not startTime is None and isinstance ( startTime , datetime ) : params [ 'startTime' ] = startTime . strftime ( "%Y-%m-%dT%H:%M:%S" ) if not endTime is None and isinstance ( endTime , datetime ) : params [ 'endTime' ] = startTime . strftime ( "%Y-%m-%dT%H:%M:%S" ) if not logCodes is None : filter_value [ 'codes' ] = logCodes . split ( ',' ) if not users is None : filter_value [ 'users' ] = users . split ( ',' ) if messageCount is None : params [ 'pageSize' ] = 1000 elif isinstance ( messageCount , ( int , long , float ) ) : params [ 'pageSize' ] = int ( messageCount ) else : params [ 'pageSize' ] = 1000 params [ 'filter' ] = filter_value return self . _get ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
allows users to look at the log files from a the REST endpoint
56,896
def deleteCertificate ( self , certName ) : params = { "f" : "json" } url = self . _url + "/sslCertificates/{cert}/delete" . format ( cert = certName ) return self . _post ( url = url , param_dict = params , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url )
This operation deletes an SSL certificate from the key store . Once a certificate is deleted it cannot be retrieved or used to enable SSL .
56,897
def exportCertificate ( self , certName , outFolder = None ) : params = { "f" : "json" } url = self . _url + "/sslCertificates/{cert}/export" . format ( cert = certName ) if outFolder is None : outFolder = tempfile . gettempdir ( ) return self . _post ( url = url , param_dict = params , out_folder = outFolder , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url )
This operation downloads an SSL certificate . The file returned by the server is an X . 509 certificate . The downloaded certificate can be imported into a client that is making HTTP requests .
56,898
def generateCertificate ( self , alias , commonName , organizationalUnit , city , state , country , keyalg = "RSA" , keysize = 1024 , sigalg = "SHA256withRSA" , validity = 90 ) : params = { "f" : "json" , "alias" : alias , "commonName" : commonName , "organizationalUnit" : organizationalUnit , "city" : city , "state" : state , "country" : country , "keyalg" : keyalg , "keysize" : keysize , "sigalg" : sigalg , "validity" : validity } url = self . _url + "/SSLCertificate/ generateCertificate" return self . _post ( url = url , param_dict = params , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url )
Use this operation to create a self - signed certificate or as a starting point for getting a production - ready CA - signed certificate . The portal will generate a certificate for you and store it in its keystore .
56,899
def getAppInfo ( self , appId ) : params = { "f" : "json" , "appID" : appId } url = self . _url + "/oauth/getAppInfo" return self . _get ( url = url , param_dict = params , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
Every application registered with Portal for ArcGIS has a unique client ID and a list of redirect URIs that are used for OAuth . This operation returns these OAuth - specific properties of an application . You can use this information to update the redirect URIs by using the Update App Info operation .