idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
56,500
def get_pgs ( self , pool_size , percent_data = DEFAULT_POOL_WEIGHT , device_class = None ) : validator ( value = pool_size , valid_type = int ) if percent_data is None : percent_data = DEFAULT_POOL_WEIGHT osd_list = get_osds ( self . service , device_class ) expected = config ( 'expected-osd-count' ) or 0 if osd_list : if device_class : osd_count = len ( osd_list ) else : osd_count = max ( expected , len ( osd_list ) ) if not device_class and expected and osd_count != expected : log ( "Found more OSDs than provided expected count. " "Using the actual count instead" , INFO ) elif expected : osd_count = expected else : return LEGACY_PG_COUNT percent_data /= 100.0 target_pgs_per_osd = config ( 'pgs-per-osd' ) or DEFAULT_PGS_PER_OSD_TARGET num_pg = ( target_pgs_per_osd * osd_count * percent_data ) // pool_size if num_pg < DEFAULT_MINIMUM_PGS : num_pg = DEFAULT_MINIMUM_PGS exponent = math . floor ( math . log ( num_pg , 2 ) ) nearest = 2 ** exponent if ( num_pg - nearest ) > ( num_pg * 0.25 ) : return int ( nearest * 2 ) else : return int ( nearest )
Return the number of placement groups to use when creating the pool .
56,501
def add_op_create_replicated_pool ( self , name , replica_count = 3 , pg_num = None , weight = None , group = None , namespace = None , app_name = None , max_bytes = None , max_objects = None ) : if pg_num and weight : raise ValueError ( 'pg_num and weight are mutually exclusive' ) self . ops . append ( { 'op' : 'create-pool' , 'name' : name , 'replicas' : replica_count , 'pg_num' : pg_num , 'weight' : weight , 'group' : group , 'group-namespace' : namespace , 'app-name' : app_name , 'max-bytes' : max_bytes , 'max-objects' : max_objects } )
Adds an operation to create a replicated pool .
56,502
def add_op_create_erasure_pool ( self , name , erasure_profile = None , weight = None , group = None , app_name = None , max_bytes = None , max_objects = None ) : self . ops . append ( { 'op' : 'create-pool' , 'name' : name , 'pool-type' : 'erasure' , 'erasure-profile' : erasure_profile , 'weight' : weight , 'group' : group , 'app-name' : app_name , 'max-bytes' : max_bytes , 'max-objects' : max_objects } )
Adds an operation to create a erasure coded pool .
56,503
def get_nagios_unit_name ( relation_name = 'nrpe-external-master' ) : host_context = get_nagios_hostcontext ( relation_name ) if host_context : unit = "%s:%s" % ( host_context , local_unit ( ) ) else : unit = local_unit ( ) return unit
Return the nagios unit name prepended with host_context if needed
56,504
def copy_nrpe_checks ( nrpe_files_dir = None ) : NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' if nrpe_files_dir is None : for segment in [ '.' , 'hooks' ] : nrpe_files_dir = os . path . abspath ( os . path . join ( os . getenv ( 'CHARM_DIR' ) , segment , 'charmhelpers' , 'contrib' , 'openstack' , 'files' ) ) if os . path . isdir ( nrpe_files_dir ) : break else : raise RuntimeError ( "Couldn't find charmhelpers directory" ) if not os . path . exists ( NAGIOS_PLUGINS ) : os . makedirs ( NAGIOS_PLUGINS ) for fname in glob . glob ( os . path . join ( nrpe_files_dir , "check_*" ) ) : if os . path . isfile ( fname ) : shutil . copy2 ( fname , os . path . join ( NAGIOS_PLUGINS , os . path . basename ( fname ) ) )
Copy the nrpe checks into place
56,505
def write_vaultlocker_conf ( context , priority = 100 ) : charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf" . format ( hookenv . service_name ( ) ) host . mkdir ( os . path . dirname ( charm_vl_path ) , perms = 0o700 ) templating . render ( source = 'vaultlocker.conf.j2' , target = charm_vl_path , context = context , perms = 0o600 ) , alternatives . install_alternative ( 'vaultlocker.conf' , '/etc/vaultlocker/vaultlocker.conf' , charm_vl_path , priority )
Write vaultlocker configuration to disk and install alternative
56,506
def vault_relation_complete ( backend = None ) : vault_kv = VaultKVContext ( secret_backend = backend or VAULTLOCKER_BACKEND ) vault_kv ( ) return vault_kv . complete
Determine whether vault relation is complete
56,507
def retrieve_secret_id ( url , token ) : import hvac client = hvac . Client ( url = url , token = token ) response = client . _post ( '/v1/sys/wrapping/unwrap' ) if response . status_code == 200 : data = response . json ( ) return data [ 'data' ] [ 'secret_id' ]
Retrieve a response - wrapped secret_id from Vault
56,508
def retry_on_exception ( num_retries , base_delay = 0 , exc_type = Exception ) : def _retry_on_exception_inner_1 ( f ) : def _retry_on_exception_inner_2 ( * args , ** kwargs ) : retries = num_retries multiplier = 1 while True : try : return f ( * args , ** kwargs ) except exc_type : if not retries : raise delay = base_delay * multiplier multiplier += 1 log ( "Retrying '%s' %d more times (delay=%s)" % ( f . __name__ , retries , delay ) , level = INFO ) retries -= 1 if delay : time . sleep ( delay ) return _retry_on_exception_inner_2 return _retry_on_exception_inner_1
If the decorated function raises exception exc_type allow num_retries retry attempts before raise the exception .
56,509
def _snap_exec ( commands ) : assert type ( commands ) == list retry_count = 0 return_code = None while return_code is None or return_code == SNAP_NO_LOCK : try : return_code = subprocess . check_call ( [ 'snap' ] + commands , env = os . environ ) except subprocess . CalledProcessError as e : retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT : raise CouldNotAcquireLockException ( 'Could not aquire lock after {} attempts' . format ( SNAP_NO_LOCK_RETRY_COUNT ) ) return_code = e . returncode log ( 'Snap failed to acquire lock, trying again in {} seconds.' . format ( SNAP_NO_LOCK_RETRY_DELAY , level = 'WARN' ) ) sleep ( SNAP_NO_LOCK_RETRY_DELAY ) return return_code
Execute snap commands .
56,510
def snap_remove ( packages , * flags ) : if type ( packages ) is not list : packages = [ packages ] flags = list ( flags ) message = 'Removing snap(s) "%s"' % ', ' . join ( packages ) if flags : message += ' with options "%s"' % ', ' . join ( flags ) log ( message , level = 'INFO' ) return _snap_exec ( [ 'remove' ] + flags + packages )
Remove a snap package .
56,511
def validate_v2_endpoint_data ( self , endpoints , admin_port , internal_port , public_port , expected ) : self . log . debug ( 'Validating endpoint data...' ) self . log . debug ( 'actual: {}' . format ( repr ( endpoints ) ) ) found = False for ep in endpoints : self . log . debug ( 'endpoint: {}' . format ( repr ( ep ) ) ) if ( admin_port in ep . adminurl and internal_port in ep . internalurl and public_port in ep . publicurl ) : found = True actual = { 'id' : ep . id , 'region' : ep . region , 'adminurl' : ep . adminurl , 'internalurl' : ep . internalurl , 'publicurl' : ep . publicurl , 'service_id' : ep . service_id } ret = self . _validate_dict_data ( expected , actual ) if ret : return 'unexpected endpoint data - {}' . format ( ret ) if not found : return 'endpoint not found'
Validate endpoint data .
56,512
def validate_v3_endpoint_data ( self , endpoints , admin_port , internal_port , public_port , expected , expected_num_eps = 3 ) : self . log . debug ( 'Validating v3 endpoint data...' ) self . log . debug ( 'actual: {}' . format ( repr ( endpoints ) ) ) found = [ ] for ep in endpoints : self . log . debug ( 'endpoint: {}' . format ( repr ( ep ) ) ) if ( ( admin_port in ep . url and ep . interface == 'admin' ) or ( internal_port in ep . url and ep . interface == 'internal' ) or ( public_port in ep . url and ep . interface == 'public' ) ) : found . append ( ep . interface ) actual = { 'id' : ep . id , 'region' : ep . region , 'region_id' : ep . region_id , 'interface' : self . not_null , 'url' : ep . url , 'service_id' : ep . service_id , } ret = self . _validate_dict_data ( expected , actual ) if ret : return 'unexpected endpoint data - {}' . format ( ret ) if len ( found ) != expected_num_eps : return 'Unexpected number of endpoints found'
Validate keystone v3 endpoint data .
56,513
def convert_svc_catalog_endpoint_data_to_v3 ( self , ep_data ) : self . log . warn ( "Endpoint ID and Region ID validation is limited to not " "null checks after v2 to v3 conversion" ) for svc in ep_data . keys ( ) : assert len ( ep_data [ svc ] ) == 1 , "Unknown data format" svc_ep_data = ep_data [ svc ] [ 0 ] ep_data [ svc ] = [ { 'url' : svc_ep_data [ 'adminURL' ] , 'interface' : 'admin' , 'region' : svc_ep_data [ 'region' ] , 'region_id' : self . not_null , 'id' : self . not_null } , { 'url' : svc_ep_data [ 'publicURL' ] , 'interface' : 'public' , 'region' : svc_ep_data [ 'region' ] , 'region_id' : self . not_null , 'id' : self . not_null } , { 'url' : svc_ep_data [ 'internalURL' ] , 'interface' : 'internal' , 'region' : svc_ep_data [ 'region' ] , 'region_id' : self . not_null , 'id' : self . not_null } ] return ep_data
Convert v2 endpoint data into v3 .
56,514
def validate_v2_svc_catalog_endpoint_data ( self , expected , actual ) : self . log . debug ( 'Validating service catalog endpoint data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) for k , v in six . iteritems ( expected ) : if k in actual : ret = self . _validate_dict_data ( expected [ k ] [ 0 ] , actual [ k ] [ 0 ] ) if ret : return self . endpoint_error ( k , ret ) else : return "endpoint {} does not exist" . format ( k ) return ret
Validate service catalog endpoint data .
56,515
def validate_v3_svc_catalog_endpoint_data ( self , expected , actual ) : self . log . debug ( 'Validating v3 service catalog endpoint data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) for k , v in six . iteritems ( expected ) : if k in actual : l_expected = sorted ( v , key = lambda x : x [ 'interface' ] ) l_actual = sorted ( actual [ k ] , key = lambda x : x [ 'interface' ] ) if len ( l_actual ) != len ( l_expected ) : return ( "endpoint {} has differing number of interfaces " " - expected({}), actual({})" . format ( k , len ( l_expected ) , len ( l_actual ) ) ) for i_expected , i_actual in zip ( l_expected , l_actual ) : self . log . debug ( "checking interface {}" . format ( i_expected [ 'interface' ] ) ) ret = self . _validate_dict_data ( i_expected , i_actual ) if ret : return self . endpoint_error ( k , ret ) else : return "endpoint {} does not exist" . format ( k ) return ret
Validate the keystone v3 catalog endpoint data .
56,516
def validate_tenant_data ( self , expected , actual ) : self . log . debug ( 'Validating tenant data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) for e in expected : found = False for act in actual : a = { 'enabled' : act . enabled , 'description' : act . description , 'name' : act . name , 'id' : act . id } if e [ 'name' ] == a [ 'name' ] : found = True ret = self . _validate_dict_data ( e , a ) if ret : return "unexpected tenant data - {}" . format ( ret ) if not found : return "tenant {} does not exist" . format ( e [ 'name' ] ) return ret
Validate tenant data .
56,517
def validate_user_data ( self , expected , actual , api_version = None ) : self . log . debug ( 'Validating user data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) for e in expected : found = False for act in actual : if e [ 'name' ] == act . name : a = { 'enabled' : act . enabled , 'name' : act . name , 'email' : act . email , 'id' : act . id } if api_version == 3 : a [ 'default_project_id' ] = getattr ( act , 'default_project_id' , 'none' ) else : a [ 'tenantId' ] = act . tenantId found = True ret = self . _validate_dict_data ( e , a ) if ret : return "unexpected user data - {}" . format ( ret ) if not found : return "user {} does not exist" . format ( e [ 'name' ] ) return ret
Validate user data .
56,518
def validate_flavor_data ( self , expected , actual ) : self . log . debug ( 'Validating flavor data...' ) self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) ) act = [ a . name for a in actual ] return self . _validate_list_data ( expected , act )
Validate flavor data .
56,519
def tenant_exists ( self , keystone , tenant ) : self . log . debug ( 'Checking if tenant exists ({})...' . format ( tenant ) ) return tenant in [ t . name for t in keystone . tenants . list ( ) ]
Return True if tenant exists .
56,520
def keystone_wait_for_propagation ( self , sentry_relation_pairs , api_version ) : for ( sentry , relation_name ) in sentry_relation_pairs : rel = sentry . relation ( 'identity-service' , relation_name ) self . log . debug ( 'keystone relation data: {}' . format ( rel ) ) if rel . get ( 'api_version' ) != str ( api_version ) : raise Exception ( "api_version not propagated through relation" " data yet ('{}' != '{}')." "" . format ( rel . get ( 'api_version' ) , api_version ) )
Iterate over list of sentry and relation tuples and verify that api_version has the expected value .
56,521
def keystone_configure_api_version ( self , sentry_relation_pairs , deployment , api_version ) : self . log . debug ( "Setting keystone preferred-api-version: '{}'" "" . format ( api_version ) ) config = { 'preferred-api-version' : api_version } deployment . d . configure ( 'keystone' , config ) deployment . _auto_wait_for_status ( ) self . keystone_wait_for_propagation ( sentry_relation_pairs , api_version )
Configure preferred - api - version of keystone in deployment and monitor provided list of relation objects for propagation before returning to caller .
56,522
def authenticate_cinder_admin ( self , keystone , api_version = 2 ) : self . log . debug ( 'Authenticating cinder admin...' ) _clients = { 1 : cinder_client . Client , 2 : cinder_clientv2 . Client } return _clients [ api_version ] ( session = keystone . session )
Authenticates admin user with cinder .
56,523
def authenticate_keystone ( self , keystone_ip , username , password , api_version = False , admin_port = False , user_domain_name = None , domain_name = None , project_domain_name = None , project_name = None ) : self . log . debug ( 'Authenticating with keystone...' ) if not api_version : api_version = 2 sess , auth = self . get_keystone_session ( keystone_ip = keystone_ip , username = username , password = password , api_version = api_version , admin_port = admin_port , user_domain_name = user_domain_name , domain_name = domain_name , project_domain_name = project_domain_name , project_name = project_name ) if api_version == 2 : client = keystone_client . Client ( session = sess ) else : client = keystone_client_v3 . Client ( session = sess ) client . auth_ref = auth . get_access ( sess ) return client
Authenticate with Keystone
56,524
def get_keystone_session ( self , keystone_ip , username , password , api_version = False , admin_port = False , user_domain_name = None , domain_name = None , project_domain_name = None , project_name = None ) : ep = self . get_keystone_endpoint ( keystone_ip , api_version = api_version , admin_port = admin_port ) if api_version == 2 : auth = v2 . Password ( username = username , password = password , tenant_name = project_name , auth_url = ep ) sess = keystone_session . Session ( auth = auth ) else : auth = v3 . Password ( user_domain_name = user_domain_name , username = username , password = password , domain_name = domain_name , project_domain_name = project_domain_name , project_name = project_name , auth_url = ep ) sess = keystone_session . Session ( auth = auth ) return ( sess , auth )
Return a keystone session object
56,525
def get_keystone_endpoint ( self , keystone_ip , api_version = None , admin_port = False ) : port = 5000 if admin_port : port = 35357 base_ep = "http://{}:{}" . format ( keystone_ip . strip ( ) . decode ( 'utf-8' ) , port ) if api_version == 2 : ep = base_ep + "/v2.0" else : ep = base_ep + "/v3" return ep
Return keystone endpoint
56,526
def get_default_keystone_session ( self , keystone_sentry , openstack_release = None , api_version = 2 ) : self . log . debug ( 'Authenticating keystone admin...' ) if api_version == 3 or ( openstack_release and openstack_release >= 11 ) : client_class = keystone_client_v3 . Client api_version = 3 else : client_class = keystone_client . Client keystone_ip = keystone_sentry . info [ 'public-address' ] session , auth = self . get_keystone_session ( keystone_ip , api_version = api_version , username = 'admin' , password = 'openstack' , project_name = 'admin' , user_domain_name = 'admin_domain' , project_domain_name = 'admin_domain' ) client = client_class ( session = session ) client . auth_ref = auth . get_access ( session ) return session , client
Return a keystone session object and client object assuming standard default settings
56,527
def authenticate_keystone_admin ( self , keystone_sentry , user , password , tenant = None , api_version = None , keystone_ip = None , user_domain_name = None , project_domain_name = None , project_name = None ) : self . log . debug ( 'Authenticating keystone admin...' ) if not keystone_ip : keystone_ip = keystone_sentry . info [ 'public-address' ] if not project_name : project_name = tenant if api_version == 3 and not user_domain_name : user_domain_name = 'admin_domain' if api_version == 3 and not project_domain_name : project_domain_name = 'admin_domain' if api_version == 3 and not project_name : project_name = 'admin' return self . authenticate_keystone ( keystone_ip , user , password , api_version = api_version , user_domain_name = user_domain_name , project_domain_name = project_domain_name , project_name = project_name , admin_port = True )
Authenticates admin user with the keystone admin endpoint .
56,528
def authenticate_keystone_user ( self , keystone , user , password , tenant ) : self . log . debug ( 'Authenticating keystone user ({})...' . format ( user ) ) ep = keystone . service_catalog . url_for ( service_type = 'identity' , interface = 'publicURL' ) keystone_ip = urlparse . urlparse ( ep ) . hostname return self . authenticate_keystone ( keystone_ip , user , password , project_name = tenant )
Authenticates a regular user with the keystone public endpoint .
56,529
def authenticate_glance_admin ( self , keystone , force_v1_client = False ) : self . log . debug ( 'Authenticating glance admin...' ) ep = keystone . service_catalog . url_for ( service_type = 'image' , interface = 'adminURL' ) if not force_v1_client and keystone . session : return glance_clientv2 . Client ( "2" , session = keystone . session ) else : return glance_client . Client ( ep , token = keystone . auth_token )
Authenticates admin user with glance .
56,530
def authenticate_heat_admin ( self , keystone ) : self . log . debug ( 'Authenticating heat admin...' ) ep = keystone . service_catalog . url_for ( service_type = 'orchestration' , interface = 'publicURL' ) if keystone . session : return heat_client . Client ( endpoint = ep , session = keystone . session ) else : return heat_client . Client ( endpoint = ep , token = keystone . auth_token )
Authenticates the admin user with heat .
56,531
def authenticate_nova_user ( self , keystone , user , password , tenant ) : self . log . debug ( 'Authenticating nova user ({})...' . format ( user ) ) ep = keystone . service_catalog . url_for ( service_type = 'identity' , interface = 'publicURL' ) if keystone . session : return nova_client . Client ( NOVA_CLIENT_VERSION , session = keystone . session , auth_url = ep ) elif novaclient . __version__ [ 0 ] >= "7" : return nova_client . Client ( NOVA_CLIENT_VERSION , username = user , password = password , project_name = tenant , auth_url = ep ) else : return nova_client . Client ( NOVA_CLIENT_VERSION , username = user , api_key = password , project_id = tenant , auth_url = ep )
Authenticates a regular user with nova - api .
56,532
def authenticate_swift_user ( self , keystone , user , password , tenant ) : self . log . debug ( 'Authenticating swift user ({})...' . format ( user ) ) ep = keystone . service_catalog . url_for ( service_type = 'identity' , interface = 'publicURL' ) if keystone . session : return swiftclient . Connection ( session = keystone . session ) else : return swiftclient . Connection ( authurl = ep , user = user , key = password , tenant_name = tenant , auth_version = '2.0' )
Authenticates a regular user with swift api .
56,533
def create_flavor ( self , nova , name , ram , vcpus , disk , flavorid = "auto" , ephemeral = 0 , swap = 0 , rxtx_factor = 1.0 , is_public = True ) : try : nova . flavors . find ( name = name ) except ( exceptions . NotFound , exceptions . NoUniqueMatch ) : self . log . debug ( 'Creating flavor ({})' . format ( name ) ) nova . flavors . create ( name , ram , vcpus , disk , flavorid , ephemeral , swap , rxtx_factor , is_public )
Create the specified flavor .
56,534
def glance_create_image ( self , glance , image_name , image_url , download_dir = 'tests' , hypervisor_type = None , disk_format = 'qcow2' , architecture = 'x86_64' , container_format = 'bare' ) : self . log . debug ( 'Creating glance image ({}) from ' '{}...' . format ( image_name , image_url ) ) http_proxy = os . getenv ( 'AMULET_HTTP_PROXY' ) self . log . debug ( 'AMULET_HTTP_PROXY: {}' . format ( http_proxy ) ) if http_proxy : proxies = { 'http' : http_proxy } opener = urllib . FancyURLopener ( proxies ) else : opener = urllib . FancyURLopener ( ) abs_file_name = os . path . join ( download_dir , image_name ) if not os . path . exists ( abs_file_name ) : opener . retrieve ( image_url , abs_file_name ) glance_properties = { 'architecture' : architecture , } if hypervisor_type : glance_properties [ 'hypervisor_type' ] = hypervisor_type if float ( glance . version ) < 2.0 : with open ( abs_file_name ) as f : image = glance . images . create ( name = image_name , is_public = True , disk_format = disk_format , container_format = container_format , properties = glance_properties , data = f ) else : image = glance . images . create ( name = image_name , visibility = "public" , disk_format = disk_format , container_format = container_format ) glance . images . upload ( image . id , open ( abs_file_name , 'rb' ) ) glance . images . update ( image . id , ** glance_properties ) img_id = image . id ret = self . resource_reaches_status ( glance . images , img_id , expected_stat = 'active' , msg = 'Image status wait' ) if not ret : msg = 'Glance image failed to reach expected state.' amulet . raise_status ( amulet . FAIL , msg = msg ) self . log . debug ( 'Validating image attributes...' ) val_img_name = glance . images . get ( img_id ) . name val_img_stat = glance . images . get ( img_id ) . status val_img_cfmt = glance . images . get ( img_id ) . container_format val_img_dfmt = glance . images . get ( img_id ) . disk_format if float ( glance . version ) < 2.0 : val_img_pub = glance . images . get ( img_id ) . is_public else : val_img_pub = glance . images . get ( img_id ) . visibility == "public" msg_attr = ( 'Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}' . format ( val_img_name , val_img_pub , img_id , val_img_stat , val_img_cfmt , val_img_dfmt ) ) if val_img_name == image_name and val_img_stat == 'active' and val_img_pub is True and val_img_cfmt == container_format and val_img_dfmt == disk_format : self . log . debug ( msg_attr ) else : msg = ( 'Image validation failed, {}' . format ( msg_attr ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return image
Download an image and upload it to glance validate its status and return an image object pointer . KVM defaults can override for LXD .
56,535
def create_cirros_image ( self , glance , image_name , hypervisor_type = None ) : self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'glance_create_image instead of ' 'create_cirros_image.' ) self . log . debug ( 'Creating glance cirros image ' '({})...' . format ( image_name ) ) http_proxy = os . getenv ( 'AMULET_HTTP_PROXY' ) self . log . debug ( 'AMULET_HTTP_PROXY: {}' . format ( http_proxy ) ) if http_proxy : proxies = { 'http' : http_proxy } opener = urllib . FancyURLopener ( proxies ) else : opener = urllib . FancyURLopener ( ) f = opener . open ( 'http://download.cirros-cloud.net/version/released' ) version = f . read ( ) . strip ( ) cirros_img = 'cirros-{}-x86_64-disk.img' . format ( version ) cirros_url = 'http://{}/{}/{}' . format ( 'download.cirros-cloud.net' , version , cirros_img ) f . close ( ) return self . glance_create_image ( glance , image_name , cirros_url , hypervisor_type = hypervisor_type )
Download the latest cirros image and upload it to glance validate and return a resource pointer .
56,536
def delete_image ( self , glance , image ) : self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.' ) self . log . debug ( 'Deleting glance image ({})...' . format ( image ) ) return self . delete_resource ( glance . images , image , msg = 'glance image' )
Delete the specified image .
56,537
def create_instance ( self , nova , image_name , instance_name , flavor ) : self . log . debug ( 'Creating instance ' '({}|{}|{})' . format ( instance_name , image_name , flavor ) ) image = nova . glance . find_image ( image_name ) flavor = nova . flavors . find ( name = flavor ) instance = nova . servers . create ( name = instance_name , image = image , flavor = flavor ) count = 1 status = instance . status while status != 'ACTIVE' and count < 60 : time . sleep ( 3 ) instance = nova . servers . get ( instance . id ) status = instance . status self . log . debug ( 'instance status: {}' . format ( status ) ) count += 1 if status != 'ACTIVE' : self . log . error ( 'instance creation timed out' ) return None return instance
Create the specified instance .
56,538
def delete_instance ( self , nova , instance ) : self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.' ) self . log . debug ( 'Deleting instance ({})...' . format ( instance ) ) return self . delete_resource ( nova . servers , instance , msg = 'nova instance' )
Delete the specified instance .
56,539
def create_or_get_keypair ( self , nova , keypair_name = "testkey" ) : try : _keypair = nova . keypairs . get ( keypair_name ) self . log . debug ( 'Keypair ({}) already exists, ' 'using it.' . format ( keypair_name ) ) return _keypair except Exception : self . log . debug ( 'Keypair ({}) does not exist, ' 'creating it.' . format ( keypair_name ) ) _keypair = nova . keypairs . create ( name = keypair_name ) return _keypair
Create a new keypair or return pointer if it already exists .
56,540
def create_cinder_volume ( self , cinder , vol_name = "demo-vol" , vol_size = 1 , img_id = None , src_vol_id = None , snap_id = None ) : if img_id and not src_vol_id and not snap_id : self . log . debug ( 'Creating cinder volume from glance image...' ) bootable = 'true' elif src_vol_id and not img_id and not snap_id : self . log . debug ( 'Cloning cinder volume...' ) bootable = cinder . volumes . get ( src_vol_id ) . bootable elif snap_id and not src_vol_id and not img_id : self . log . debug ( 'Creating cinder volume from snapshot...' ) snap = cinder . volume_snapshots . find ( id = snap_id ) vol_size = snap . size snap_vol_id = cinder . volume_snapshots . get ( snap_id ) . volume_id bootable = cinder . volumes . get ( snap_vol_id ) . bootable elif not img_id and not src_vol_id and not snap_id : self . log . debug ( 'Creating cinder volume...' ) bootable = 'false' else : msg = ( 'Invalid method use - name:{} size:{} img_id:{} ' 'src_vol_id:{} snap_id:{}' . format ( vol_name , vol_size , img_id , src_vol_id , snap_id ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) try : vol_new = cinder . volumes . create ( display_name = vol_name , imageRef = img_id , size = vol_size , source_volid = src_vol_id , snapshot_id = snap_id ) vol_id = vol_new . id except TypeError : vol_new = cinder . volumes . create ( name = vol_name , imageRef = img_id , size = vol_size , source_volid = src_vol_id , snapshot_id = snap_id ) vol_id = vol_new . id except Exception as e : msg = 'Failed to create volume: {}' . format ( e ) amulet . raise_status ( amulet . FAIL , msg = msg ) ret = self . resource_reaches_status ( cinder . volumes , vol_id , expected_stat = "available" , msg = "Volume status wait" ) if not ret : msg = 'Cinder volume failed to reach expected state.' amulet . raise_status ( amulet . FAIL , msg = msg ) self . log . debug ( 'Validating volume attributes...' ) val_vol_name = self . _get_cinder_obj_name ( cinder . volumes . get ( vol_id ) ) val_vol_boot = cinder . volumes . get ( vol_id ) . bootable val_vol_stat = cinder . volumes . get ( vol_id ) . status val_vol_size = cinder . volumes . get ( vol_id ) . size msg_attr = ( 'Volume attributes - name:{} id:{} stat:{} boot:' '{} size:{}' . format ( val_vol_name , vol_id , val_vol_stat , val_vol_boot , val_vol_size ) ) if val_vol_boot == bootable and val_vol_stat == 'available' and val_vol_name == vol_name and val_vol_size == vol_size : self . log . debug ( msg_attr ) else : msg = ( 'Volume validation failed, {}' . format ( msg_attr ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return vol_new
Create cinder volume optionally from a glance image OR optionally as a clone of an existing volume OR optionally from a snapshot . Wait for the new volume status to reach the expected status validate and return a resource pointer .
56,541
def delete_resource ( self , resource , resource_id , msg = "resource" , max_wait = 120 ) : self . log . debug ( 'Deleting OpenStack resource ' '{} ({})' . format ( resource_id , msg ) ) num_before = len ( list ( resource . list ( ) ) ) resource . delete ( resource_id ) tries = 0 num_after = len ( list ( resource . list ( ) ) ) while num_after != ( num_before - 1 ) and tries < ( max_wait / 4 ) : self . log . debug ( '{} delete check: ' '{} [{}:{}] {}' . format ( msg , tries , num_before , num_after , resource_id ) ) time . sleep ( 4 ) num_after = len ( list ( resource . list ( ) ) ) tries += 1 self . log . debug ( '{}: expected, actual count = {}, ' '{}' . format ( msg , num_before - 1 , num_after ) ) if num_after == ( num_before - 1 ) : return True else : self . log . error ( '{} delete timed out' . format ( msg ) ) return False
Delete one openstack resource such as one instance keypair image volume stack etc . and confirm deletion within max wait time .
56,542
def resource_reaches_status ( self , resource , resource_id , expected_stat = 'available' , msg = 'resource' , max_wait = 120 ) : tries = 0 resource_stat = resource . get ( resource_id ) . status while resource_stat != expected_stat and tries < ( max_wait / 4 ) : self . log . debug ( '{} status check: ' '{} [{}:{}] {}' . format ( msg , tries , resource_stat , expected_stat , resource_id ) ) time . sleep ( 4 ) resource_stat = resource . get ( resource_id ) . status tries += 1 self . log . debug ( '{}: expected, actual status = {}, ' '{}' . format ( msg , resource_stat , expected_stat ) ) if resource_stat == expected_stat : return True else : self . log . debug ( '{} never reached expected status: ' '{}' . format ( resource_id , expected_stat ) ) return False
Wait for an openstack resources status to reach an expected status within a specified time . Useful to confirm that nova instances cinder vols snapshots glance images heat stacks and other resources eventually reach the expected status .
56,543
def get_ceph_pools ( self , sentry_unit ) : pools = { } cmd = 'sudo ceph osd lspools' output , code = sentry_unit . run ( cmd ) if code != 0 : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) output = output . replace ( "\n" , "," ) for pool in str ( output ) . split ( ',' ) : pool_id_name = pool . split ( ' ' ) if len ( pool_id_name ) == 2 : pool_id = pool_id_name [ 0 ] pool_name = pool_id_name [ 1 ] pools [ pool_name ] = int ( pool_id ) self . log . debug ( 'Pools on {}: {}' . format ( sentry_unit . info [ 'unit_name' ] , pools ) ) return pools
Return a dict of ceph pools from a single ceph unit with pool name as keys pool id as vals .
56,544
def get_ceph_df ( self , sentry_unit ) : cmd = 'sudo ceph df --format=json' output , code = sentry_unit . run ( cmd ) if code != 0 : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return json . loads ( output )
Return dict of ceph df json output including ceph pool state .
56,545
def get_ceph_pool_sample ( self , sentry_unit , pool_id = 0 ) : df = self . get_ceph_df ( sentry_unit ) for pool in df [ 'pools' ] : if pool [ 'id' ] == pool_id : pool_name = pool [ 'name' ] obj_count = pool [ 'stats' ] [ 'objects' ] kb_used = pool [ 'stats' ] [ 'kb_used' ] self . log . debug ( 'Ceph {} pool (ID {}): {} objects, ' '{} kb used' . format ( pool_name , pool_id , obj_count , kb_used ) ) return pool_name , obj_count , kb_used
Take a sample of attributes of a ceph pool returning ceph pool name object count and disk space used for the specified pool ID number .
56,546
def validate_ceph_pool_samples ( self , samples , sample_type = "resource pool" ) : original , created , deleted = range ( 3 ) if samples [ created ] <= samples [ original ] or samples [ deleted ] >= samples [ created ] : return ( 'Ceph {} samples ({}) ' 'unexpected.' . format ( sample_type , samples ) ) else : self . log . debug ( 'Ceph {} samples (OK): ' '{}' . format ( sample_type , samples ) ) return None
Validate ceph pool samples taken over time such as pool object counts or pool kb used before adding after adding and after deleting items which affect those pool attributes . The 2nd element is expected to be greater than the 1st ; 3rd is expected to be less than the 2nd .
56,547
def rmq_wait_for_cluster ( self , deployment , init_sleep = 15 , timeout = 1200 ) : if init_sleep : time . sleep ( init_sleep ) message = re . compile ( '^Unit is ready and clustered$' ) deployment . _auto_wait_for_status ( message = message , timeout = timeout , include_only = [ 'rabbitmq-server' ] )
Wait for rmq units extended status to show cluster readiness after an optional initial sleep period . Initial sleep is likely necessary to be effective following a config change as status message may not instantly update to non - ready .
56,548
def get_rmq_cluster_status ( self , sentry_unit ) : cmd = 'rabbitmqctl cluster_status' output , _ = self . run_cmd_unit ( sentry_unit , cmd ) self . log . debug ( '{} cluster_status:\n{}' . format ( sentry_unit . info [ 'unit_name' ] , output ) ) return str ( output )
Execute rabbitmq cluster status command on a unit and return the full output .
56,549
def get_rmq_cluster_running_nodes ( self , sentry_unit ) : str_stat = self . get_rmq_cluster_status ( sentry_unit ) if 'running_nodes' in str_stat : pos_start = str_stat . find ( "{running_nodes," ) + 15 pos_end = str_stat . find ( "]}," , pos_start ) + 1 str_run_nodes = str_stat [ pos_start : pos_end ] . replace ( "'" , '"' ) run_nodes = json . loads ( str_run_nodes ) return run_nodes else : return [ ]
Parse rabbitmqctl cluster_status output string return list of running rabbitmq cluster nodes .
56,550
def validate_rmq_cluster_running_nodes ( self , sentry_units ) : host_names = self . get_unit_hostnames ( sentry_units ) errors = [ ] for query_unit in sentry_units : query_unit_name = query_unit . info [ 'unit_name' ] running_nodes = self . get_rmq_cluster_running_nodes ( query_unit ) for validate_unit in sentry_units : val_host_name = host_names [ validate_unit . info [ 'unit_name' ] ] val_node_name = 'rabbit@{}' . format ( val_host_name ) if val_node_name not in running_nodes : errors . append ( 'Cluster member check failed on {}: {} not ' 'in {}\n' . format ( query_unit_name , val_node_name , running_nodes ) ) if errors : return '' . join ( errors )
Check that all rmq unit hostnames are represented in the cluster_status output of all units .
56,551
def rmq_ssl_is_enabled_on_unit ( self , sentry_unit , port = None ) : host = sentry_unit . info [ 'public-address' ] unit_name = sentry_unit . info [ 'unit_name' ] conf_file = '/etc/rabbitmq/rabbitmq.config' conf_contents = str ( self . file_contents_safe ( sentry_unit , conf_file , max_wait = 16 ) ) conf_ssl = 'ssl' in conf_contents conf_port = str ( port ) in conf_contents if port and conf_port and conf_ssl : self . log . debug ( 'SSL is enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return True elif port and not conf_port and conf_ssl : self . log . debug ( 'SSL is enabled @{} but not on port {} ' '({})' . format ( host , port , unit_name ) ) return False elif not port and conf_ssl : self . log . debug ( 'SSL is enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return True elif not conf_ssl : self . log . debug ( 'SSL not enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return False else : msg = ( 'Unknown condition when checking SSL status @{}:{} ' '({})' . format ( host , port , unit_name ) ) amulet . raise_status ( amulet . FAIL , msg )
Check a single juju rmq unit for ssl and port in the config file .
56,552
def validate_rmq_ssl_enabled_units ( self , sentry_units , port = None ) : for sentry_unit in sentry_units : if not self . rmq_ssl_is_enabled_on_unit ( sentry_unit , port = port ) : return ( 'Unexpected condition: ssl is disabled on unit ' '({})' . format ( sentry_unit . info [ 'unit_name' ] ) ) return None
Check that ssl is enabled on rmq juju sentry units .
56,553
def validate_rmq_ssl_disabled_units ( self , sentry_units ) : for sentry_unit in sentry_units : if self . rmq_ssl_is_enabled_on_unit ( sentry_unit ) : return ( 'Unexpected condition: ssl is enabled on unit ' '({})' . format ( sentry_unit . info [ 'unit_name' ] ) ) return None
Check that ssl is enabled on listed rmq juju sentry units .
56,554
def configure_rmq_ssl_on ( self , sentry_units , deployment , port = None , max_wait = 60 ) : self . log . debug ( 'Setting ssl charm config option: on' ) config = { 'ssl' : 'on' } if port : config [ 'ssl_port' ] = port deployment . d . configure ( 'rabbitmq-server' , config ) self . rmq_wait_for_cluster ( deployment ) tries = 0 ret = self . validate_rmq_ssl_enabled_units ( sentry_units , port = port ) while ret and tries < ( max_wait / 4 ) : time . sleep ( 4 ) self . log . debug ( 'Attempt {}: {}' . format ( tries , ret ) ) ret = self . validate_rmq_ssl_enabled_units ( sentry_units , port = port ) tries += 1 if ret : amulet . raise_status ( amulet . FAIL , ret )
Turn ssl charm config option on with optional non - default ssl port specification . Confirm that it is enabled on every unit .
56,555
def configure_rmq_ssl_off ( self , sentry_units , deployment , max_wait = 60 ) : self . log . debug ( 'Setting ssl charm config option: off' ) config = { 'ssl' : 'off' } deployment . d . configure ( 'rabbitmq-server' , config ) self . rmq_wait_for_cluster ( deployment ) tries = 0 ret = self . validate_rmq_ssl_disabled_units ( sentry_units ) while ret and tries < ( max_wait / 4 ) : time . sleep ( 4 ) self . log . debug ( 'Attempt {}: {}' . format ( tries , ret ) ) ret = self . validate_rmq_ssl_disabled_units ( sentry_units ) tries += 1 if ret : amulet . raise_status ( amulet . FAIL , ret )
Turn ssl charm config option off confirm that it is disabled on every unit .
56,556
def connect_amqp_by_unit ( self , sentry_unit , ssl = False , port = None , fatal = True , username = "testuser1" , password = "changeme" ) : host = sentry_unit . info [ 'public-address' ] unit_name = sentry_unit . info [ 'unit_name' ] if ssl and not port : port = 5671 elif not ssl and not port : port = 5672 self . log . debug ( 'Connecting to amqp on {}:{} ({}) as ' '{}...' . format ( host , port , unit_name , username ) ) try : credentials = pika . PlainCredentials ( username , password ) parameters = pika . ConnectionParameters ( host = host , port = port , credentials = credentials , ssl = ssl , connection_attempts = 3 , retry_delay = 5 , socket_timeout = 1 ) connection = pika . BlockingConnection ( parameters ) assert connection . is_open is True assert connection . is_closing is False self . log . debug ( 'Connect OK' ) return connection except Exception as e : msg = ( 'amqp connection failed to {}:{} as ' '{} ({})' . format ( host , port , username , str ( e ) ) ) if fatal : amulet . raise_status ( amulet . FAIL , msg ) else : self . log . warn ( msg ) return None
Establish and return a pika amqp connection to the rabbitmq service running on a rmq juju unit .
56,557
def publish_amqp_message_by_unit ( self , sentry_unit , message , queue = "test" , ssl = False , username = "testuser1" , password = "changeme" , port = None ) : self . log . debug ( 'Publishing message to {} queue:\n{}' . format ( queue , message ) ) connection = self . connect_amqp_by_unit ( sentry_unit , ssl = ssl , port = port , username = username , password = password ) self . log . debug ( 'Defining channel...' ) channel = connection . channel ( ) self . log . debug ( 'Declaring queue...' ) channel . queue_declare ( queue = queue , auto_delete = False , durable = True ) self . log . debug ( 'Publishing message...' ) channel . basic_publish ( exchange = '' , routing_key = queue , body = message ) self . log . debug ( 'Closing channel...' ) channel . close ( ) self . log . debug ( 'Closing connection...' ) connection . close ( )
Publish an amqp message to a rmq juju unit .
56,558
def get_amqp_message_by_unit ( self , sentry_unit , queue = "test" , username = "testuser1" , password = "changeme" , ssl = False , port = None ) : connection = self . connect_amqp_by_unit ( sentry_unit , ssl = ssl , port = port , username = username , password = password ) channel = connection . channel ( ) method_frame , _ , body = channel . basic_get ( queue ) if method_frame : self . log . debug ( 'Retreived message from {} queue:\n{}' . format ( queue , body ) ) channel . basic_ack ( method_frame . delivery_tag ) channel . close ( ) connection . close ( ) return body else : msg = 'No message retrieved.' amulet . raise_status ( amulet . FAIL , msg )
Get an amqp message from a rmq juju unit .
56,559
def validate_memcache ( self , sentry_unit , conf , os_release , earliest_release = 5 , section = 'keystone_authtoken' , check_kvs = None ) : if os_release < earliest_release : self . log . debug ( 'Skipping memcache checks for deployment. {} <' 'mitaka' . format ( os_release ) ) return _kvs = check_kvs or { 'memcached_servers' : 'inet6:[::1]:11211' } self . log . debug ( 'Checking memcached is running' ) ret = self . validate_services_by_name ( { sentry_unit : [ 'memcached' ] } ) if ret : amulet . raise_status ( amulet . FAIL , msg = 'Memcache running check' 'failed {}' . format ( ret ) ) else : self . log . debug ( 'OK' ) self . log . debug ( 'Checking memcache url is configured in {}' . format ( conf ) ) if self . validate_config_data ( sentry_unit , conf , section , _kvs ) : message = "Memcache config error in: {}" . format ( conf ) amulet . raise_status ( amulet . FAIL , msg = message ) else : self . log . debug ( 'OK' ) self . log . debug ( 'Checking memcache configuration in ' '/etc/memcached.conf' ) contents = self . file_contents_safe ( sentry_unit , '/etc/memcached.conf' , fatal = True ) ubuntu_release , _ = self . run_cmd_unit ( sentry_unit , 'lsb_release -cs' ) if CompareHostReleases ( ubuntu_release ) <= 'trusty' : memcache_listen_addr = 'ip6-localhost' else : memcache_listen_addr = '::1' expected = { '-p' : '11211' , '-l' : memcache_listen_addr } found = [ ] for key , value in expected . items ( ) : for line in contents . split ( '\n' ) : if line . startswith ( key ) : self . log . debug ( 'Checking {} is set to {}' . format ( key , value ) ) assert value == line . split ( ) [ - 1 ] self . log . debug ( line . split ( ) [ - 1 ] ) found . append ( key ) if sorted ( found ) == sorted ( expected . keys ( ) ) : self . log . debug ( 'OK' ) else : message = "Memcache config error in: /etc/memcached.conf" amulet . raise_status ( amulet . FAIL , msg = message )
Check Memcache is running and is configured to be used
56,560
def acquire ( self , lock ) : unit = hookenv . local_unit ( ) ts = self . requests [ unit ] . get ( lock ) if not ts : self . requests . setdefault ( lock , { } ) self . requests [ unit ] [ lock ] = _timestamp ( ) self . msg ( 'Requested {}' . format ( lock ) ) if self . granted ( lock ) : self . msg ( 'Acquired {}' . format ( lock ) ) return True if hookenv . is_leader ( ) : return self . grant ( lock , unit ) return False
Acquire the named lock non - blocking .
56,561
def granted ( self , lock ) : unit = hookenv . local_unit ( ) ts = self . requests [ unit ] . get ( lock ) if ts and self . grants . get ( unit , { } ) . get ( lock ) == ts : return True return False
Return True if a previously requested lock has been granted
56,562
def request_timestamp ( self , lock ) : ts = self . requests [ hookenv . local_unit ( ) ] . get ( lock , None ) if ts is not None : return datetime . strptime ( ts , _timestamp_format )
Return the timestamp of our outstanding request for lock or None .
56,563
def grant ( self , lock , unit ) : if not hookenv . is_leader ( ) : return False granted = set ( ) for u in self . grants : if lock in self . grants [ u ] : granted . add ( u ) if unit in granted : return True reqs = set ( ) for u in self . requests : if u in granted : continue for _lock , ts in self . requests [ u ] . items ( ) : if _lock == lock : reqs . add ( ( ts , u ) ) queue = [ t [ 1 ] for t in sorted ( reqs ) ] if unit not in queue : return False grant_func = getattr ( self , 'grant_{}' . format ( lock ) , self . default_grant ) if grant_func ( lock , unit , granted , queue ) : self . msg ( 'Leader grants {} to {}' . format ( lock , unit ) ) self . grants . setdefault ( unit , { } ) [ lock ] = self . requests [ unit ] [ lock ] return True return False
Maybe grant the lock to a unit .
56,564
def released ( self , unit , lock , timestamp ) : interval = _utcnow ( ) - timestamp self . msg ( 'Leader released {} from {}, held {}' . format ( lock , unit , interval ) )
Called on the leader when it has released a lock .
56,565
def require ( self , lock , guard_func , * guard_args , ** guard_kw ) : def decorator ( f ) : @ wraps ( f ) def wrapper ( * args , ** kw ) : if self . granted ( lock ) : self . msg ( 'Granted {}' . format ( lock ) ) return f ( * args , ** kw ) if guard_func ( * guard_args , ** guard_kw ) and self . acquire ( lock ) : return f ( * args , ** kw ) return None return wrapper return decorator
Decorate a function to be run only when a lock is acquired .
56,566
def msg ( self , msg ) : hookenv . log ( 'coordinator.{} {}' . format ( self . _name ( ) , msg ) , level = hookenv . INFO )
Emit a message . Override to customize log spam .
56,567
def deprecate ( warning , date = None , log = None ) : def wrap ( f ) : @ functools . wraps ( f ) def wrapped_f ( * args , ** kwargs ) : try : module = inspect . getmodule ( f ) file = inspect . getsourcefile ( f ) lines = inspect . getsourcelines ( f ) f_name = "{}-{}-{}..{}-{}" . format ( module . __name__ , file , lines [ 0 ] , lines [ - 1 ] , f . __name__ ) except ( IOError , TypeError ) : f_name = f . __name__ if f_name not in __deprecated_functions : __deprecated_functions [ f_name ] = True s = "DEPRECATION WARNING: Function {} is being removed" . format ( f . __name__ ) if date : s = "{} on/around {}" . format ( s , date ) if warning : s = "{} : {}" . format ( s , warning ) if log : log ( s ) else : print ( s ) return f ( * args , ** kwargs ) return wrapped_f return wrap
Add a deprecation warning the first time the function is used . The date which is a string in semi - ISO8660 format indicate the year - month that the function is officially going to be removed .
56,568
def download ( self , source , dest ) : proto , netloc , path , params , query , fragment = urlparse ( source ) if proto in ( 'http' , 'https' ) : auth , barehost = splituser ( netloc ) if auth is not None : source = urlunparse ( ( proto , barehost , path , params , query , fragment ) ) username , password = splitpasswd ( auth ) passman = HTTPPasswordMgrWithDefaultRealm ( ) passman . add_password ( None , source , username , password ) authhandler = HTTPBasicAuthHandler ( passman ) opener = build_opener ( authhandler ) install_opener ( opener ) response = urlopen ( source ) try : with open ( dest , 'wb' ) as dest_file : dest_file . write ( response . read ( ) ) except Exception as e : if os . path . isfile ( dest ) : os . unlink ( dest ) raise e
Download an archive file .
56,569
def install ( self , source , dest = None , checksum = None , hash_type = 'sha1' ) : url_parts = self . parse_url ( source ) dest_dir = os . path . join ( os . environ . get ( 'CHARM_DIR' ) , 'fetched' ) if not os . path . exists ( dest_dir ) : mkdir ( dest_dir , perms = 0o755 ) dld_file = os . path . join ( dest_dir , os . path . basename ( url_parts . path ) ) try : self . download ( source , dld_file ) except URLError as e : raise UnhandledSource ( e . reason ) except OSError as e : raise UnhandledSource ( e . strerror ) options = parse_qs ( url_parts . fragment ) for key , value in options . items ( ) : if not six . PY3 : algorithms = hashlib . algorithms else : algorithms = hashlib . algorithms_available if key in algorithms : if len ( value ) != 1 : raise TypeError ( "Expected 1 hash value, not %d" % len ( value ) ) expected = value [ 0 ] check_hash ( dld_file , expected , key ) if checksum : check_hash ( dld_file , checksum , hash_type ) return extract ( dld_file , dest )
Download and install an archive file with optional checksum validation .
56,570
def set_trace ( addr = DEFAULT_ADDR , port = DEFAULT_PORT ) : atexit . register ( close_port , port ) try : log ( "Starting a remote python debugger session on %s:%s" % ( addr , port ) ) open_port ( port ) debugger = Rpdb ( addr = addr , port = port ) debugger . set_trace ( sys . _getframe ( ) . f_back ) except Exception : _error ( "Cannot start a remote debug session on %s:%s" % ( addr , port ) )
Set a trace point using the remote debugger
56,571
def device_info ( device ) : status = subprocess . check_output ( [ 'ibstat' , device , '-s' ] ) . splitlines ( ) regexes = { "CA type: (.*)" : "device_type" , "Number of ports: (.*)" : "num_ports" , "Firmware version: (.*)" : "fw_ver" , "Hardware version: (.*)" : "hw_ver" , "Node GUID: (.*)" : "node_guid" , "System image GUID: (.*)" : "sys_guid" , } device = DeviceInfo ( ) for line in status : for expression , key in regexes . items ( ) : matches = re . search ( expression , line ) if matches : setattr ( device , key , matches . group ( 1 ) ) return device
Returns a DeviceInfo object with the current device settings
56,572
def ipoib_interfaces ( ) : interfaces = [ ] for interface in network_interfaces ( ) : try : driver = re . search ( '^driver: (.+)$' , subprocess . check_output ( [ 'ethtool' , '-i' , interface ] ) , re . M ) . group ( 1 ) if driver in IPOIB_DRIVERS : interfaces . append ( interface ) except Exception : log ( "Skipping interface %s" % interface , level = INFO ) continue return interfaces
Return a list of IPOIB capable ethernet interfaces
56,573
def get_audits ( ) : audits = [ TemplatedFile ( '/etc/login.defs' , LoginContext ( ) , template_dir = TEMPLATES_DIR , user = 'root' , group = 'root' , mode = 0o0444 ) ] return audits
Get OS hardening login . defs audits .
56,574
def _get_defaults ( modules ) : default = os . path . join ( os . path . dirname ( __file__ ) , 'defaults/%s.yaml' % ( modules ) ) return yaml . safe_load ( open ( default ) )
Load the default config for the provided modules .
56,575
def _get_schema ( modules ) : schema = os . path . join ( os . path . dirname ( __file__ ) , 'defaults/%s.yaml.schema' % ( modules ) ) return yaml . safe_load ( open ( schema ) )
Load the config schema for the provided modules .
56,576
def _get_user_provided_overrides ( modules ) : overrides = os . path . join ( os . environ [ 'JUJU_CHARM_DIR' ] , 'hardening.yaml' ) if os . path . exists ( overrides ) : log ( "Found user-provided config overrides file '%s'" % ( overrides ) , level = DEBUG ) settings = yaml . safe_load ( open ( overrides ) ) if settings and settings . get ( modules ) : log ( "Applying '%s' overrides" % ( modules ) , level = DEBUG ) return settings . get ( modules ) log ( "No overrides found for '%s'" % ( modules ) , level = DEBUG ) else : log ( "No hardening config overrides file '%s' found in charm " "root dir" % ( overrides ) , level = DEBUG ) return { }
Load user - provided config overrides .
56,577
def _apply_overrides ( settings , overrides , schema ) : if overrides : for k , v in six . iteritems ( overrides ) : if k in schema : if schema [ k ] is None : settings [ k ] = v elif type ( schema [ k ] ) is dict : settings [ k ] = _apply_overrides ( settings [ k ] , overrides [ k ] , schema [ k ] ) else : raise Exception ( "Unexpected type found in schema '%s'" % type ( schema [ k ] ) , level = ERROR ) else : log ( "Unknown override key '%s' - ignoring" % ( k ) , level = INFO ) return settings
Get overrides config overlayed onto modules defaults .
56,578
def ensure_permissions ( path , user , group , permissions , maxdepth = - 1 ) : if not os . path . exists ( path ) : log ( "File '%s' does not exist - cannot set permissions" % ( path ) , level = WARNING ) return _user = pwd . getpwnam ( user ) os . chown ( path , _user . pw_uid , grp . getgrnam ( group ) . gr_gid ) os . chmod ( path , permissions ) if maxdepth == 0 : log ( "Max recursion depth reached - skipping further recursion" , level = DEBUG ) return elif maxdepth > 0 : maxdepth -= 1 if os . path . isdir ( path ) : contents = glob . glob ( "%s/*" % ( path ) ) for c in contents : ensure_permissions ( c , user = user , group = group , permissions = permissions , maxdepth = maxdepth )
Ensure permissions for path .
56,579
def create ( sysctl_dict , sysctl_file , ignore = False ) : if type ( sysctl_dict ) is not dict : try : sysctl_dict_parsed = yaml . safe_load ( sysctl_dict ) except yaml . YAMLError : log ( "Error parsing YAML sysctl_dict: {}" . format ( sysctl_dict ) , level = ERROR ) return else : sysctl_dict_parsed = sysctl_dict with open ( sysctl_file , "w" ) as fd : for key , value in sysctl_dict_parsed . items ( ) : fd . write ( "{}={}\n" . format ( key , value ) ) log ( "Updating sysctl_file: {} values: {}" . format ( sysctl_file , sysctl_dict_parsed ) , level = DEBUG ) call = [ "sysctl" , "-p" , sysctl_file ] if ignore : call . append ( "-e" ) check_call ( call )
Creates a sysctl . conf file from a YAML associative array
56,580
def canonical_url ( configs , endpoint_type = PUBLIC ) : scheme = _get_scheme ( configs ) address = resolve_address ( endpoint_type ) if is_ipv6 ( address ) : address = "[{}]" . format ( address ) return '%s://%s' % ( scheme , address )
Returns the correct HTTP URL to this host given the state of HTTPS configuration hacluster and charm configuration .
56,581
def _get_address_override ( endpoint_type = PUBLIC ) : override_key = ADDRESS_MAP [ endpoint_type ] [ 'override' ] addr_override = config ( override_key ) if not addr_override : return None else : return addr_override . format ( service_name = service_name ( ) )
Returns any address overrides that the user has defined based on the endpoint type .
56,582
def resolve_address ( endpoint_type = PUBLIC , override = True ) : resolved_address = None if override : resolved_address = _get_address_override ( endpoint_type ) if resolved_address : return resolved_address vips = config ( 'vip' ) if vips : vips = vips . split ( ) net_type = ADDRESS_MAP [ endpoint_type ] [ 'config' ] net_addr = config ( net_type ) net_fallback = ADDRESS_MAP [ endpoint_type ] [ 'fallback' ] binding = ADDRESS_MAP [ endpoint_type ] [ 'binding' ] clustered = is_clustered ( ) if clustered and vips : if net_addr : for vip in vips : if is_address_in_network ( net_addr , vip ) : resolved_address = vip break else : try : bound_cidr = resolve_network_cidr ( network_get_primary_address ( binding ) ) for vip in vips : if is_address_in_network ( bound_cidr , vip ) : resolved_address = vip break except ( NotImplementedError , NoNetworkBinding ) : resolved_address = vips [ 0 ] else : if config ( 'prefer-ipv6' ) : fallback_addr = get_ipv6_addr ( exc_list = vips ) [ 0 ] else : fallback_addr = unit_get ( net_fallback ) if net_addr : resolved_address = get_address_in_network ( net_addr , fallback_addr ) else : try : resolved_address = network_get_primary_address ( binding ) except ( NotImplementedError , NoNetworkBinding ) : resolved_address = fallback_addr if resolved_address is None : raise ValueError ( "Unable to resolve a suitable IP address based on " "charm state and configuration. (net_type=%s, " "clustered=%s)" % ( net_type , clustered ) ) return resolved_address
Return unit address depending on net config .
56,583
def hugepage_support ( user , group = 'hugetlb' , nr_hugepages = 256 , max_map_count = 65536 , mnt_point = '/run/hugepages/kvm' , pagesize = '2MB' , mount = True , set_shmmax = False ) : group_info = add_group ( group ) gid = group_info . gr_gid add_user_to_group ( user , group ) if max_map_count < 2 * nr_hugepages : max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages' : nr_hugepages , 'vm.max_map_count' : max_map_count , 'vm.hugetlb_shm_group' : gid , } if set_shmmax : shmmax_current = int ( check_output ( [ 'sysctl' , '-n' , 'kernel.shmmax' ] ) ) shmmax_minsize = bytes_from_string ( pagesize ) * nr_hugepages if shmmax_minsize > shmmax_current : sysctl_settings [ 'kernel.shmmax' ] = shmmax_minsize sysctl . create ( yaml . dump ( sysctl_settings ) , '/etc/sysctl.d/10-hugepage.conf' ) mkdir ( mnt_point , owner = 'root' , group = 'root' , perms = 0o755 , force = False ) lfstab = fstab . Fstab ( ) fstab_entry = lfstab . get_entry_by_attr ( 'mountpoint' , mnt_point ) if fstab_entry : lfstab . remove_entry ( fstab_entry ) entry = lfstab . Entry ( 'nodev' , mnt_point , 'hugetlbfs' , 'mode=1770,gid={},pagesize={}' . format ( gid , pagesize ) , 0 , 0 ) lfstab . add_entry ( entry ) if mount : fstab_mount ( mnt_point )
Enable hugepages on system .
56,584
def ensure_compliance ( self ) : if not self . modules : return try : loaded_modules = self . _get_loaded_modules ( ) non_compliant_modules = [ ] for module in self . modules : if module in loaded_modules : log ( "Module '%s' is enabled but should not be." % ( module ) , level = INFO ) non_compliant_modules . append ( module ) if len ( non_compliant_modules ) == 0 : return for module in non_compliant_modules : self . _disable_module ( module ) self . _restart_apache ( ) except subprocess . CalledProcessError as e : log ( 'Error occurred auditing apache module compliance. ' 'This may have been already reported. ' 'Output is: %s' % e . output , level = ERROR )
Ensures that the modules are not loaded .
56,585
def _get_loaded_modules ( ) : output = subprocess . check_output ( [ 'apache2ctl' , '-M' ] ) if six . PY3 : output = output . decode ( 'utf-8' ) modules = [ ] for line in output . splitlines ( ) : matcher = re . search ( r'^ (\S*)_module (\S*)' , line ) if matcher : modules . append ( matcher . group ( 1 ) ) return modules
Returns the modules which are enabled in Apache .
56,586
def _disable_module ( module ) : try : subprocess . check_call ( [ 'a2dismod' , module ] ) except subprocess . CalledProcessError as e : log ( 'Error occurred disabling module %s. ' 'Output is: %s' % ( module , e . output ) , level = ERROR )
Disables the specified module in Apache .
56,587
def get_template_path ( template_dir , path ) : return os . path . join ( template_dir , os . path . basename ( path ) )
Returns the template file which would be used to render the path .
56,588
def render_and_write ( template_dir , path , context ) : env = Environment ( loader = FileSystemLoader ( template_dir ) ) template_file = os . path . basename ( path ) template = env . get_template ( template_file ) log ( 'Rendering from template: %s' % template . name , level = DEBUG ) rendered_content = template . render ( context ) if not rendered_content : log ( "Render returned None - skipping '%s'" % path , level = WARNING ) return write ( path , rendered_content . encode ( 'utf-8' ) . strip ( ) ) log ( 'Wrote template %s' % path , level = DEBUG )
Renders the specified template into the file .
56,589
def get_audits ( ) : audits = [ AptConfig ( [ { 'key' : 'APT::Get::AllowUnauthenticated' , 'expected' : 'false' } ] ) ] settings = get_settings ( 'os' ) clean_packages = settings [ 'security' ] [ 'packages_clean' ] if clean_packages : security_packages = settings [ 'security' ] [ 'packages_list' ] if security_packages : audits . append ( RestrictedPackages ( security_packages ) ) return audits
Get OS hardening apt audits .
56,590
def get_audits ( ) : audits = [ ] settings = utils . get_settings ( 'os' ) if settings [ 'auth' ] [ 'pam_passwdqc_enable' ] : audits . append ( PasswdqcPAM ( '/etc/passwdqc.conf' ) ) if settings [ 'auth' ] [ 'retries' ] : audits . append ( Tally2PAM ( '/usr/share/pam-configs/tally2' ) ) else : audits . append ( DeletedFile ( '/usr/share/pam-configs/tally2' ) ) return audits
Get OS hardening PAM authentication audits .
56,591
def install_ansible_support ( from_ppa = True , ppa_location = 'ppa:rquillo/ansible' ) : if from_ppa : charmhelpers . fetch . add_source ( ppa_location ) charmhelpers . fetch . apt_update ( fatal = True ) charmhelpers . fetch . apt_install ( 'ansible' ) with open ( ansible_hosts_path , 'w+' ) as hosts_file : hosts_file . write ( 'localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp' )
Installs the ansible package .
56,592
def execute ( self , args ) : hook_name = os . path . basename ( args [ 0 ] ) extra_vars = None if hook_name in self . _actions : extra_vars = self . _actions [ hook_name ] ( args [ 1 : ] ) else : super ( AnsibleHooks , self ) . execute ( args ) charmhelpers . contrib . ansible . apply_playbook ( self . playbook_path , tags = [ hook_name ] , extra_vars = extra_vars )
Execute the hook followed by the playbook using the hook as tag .
56,593
def action ( self , * action_names ) : def action_wrapper ( decorated ) : @ functools . wraps ( decorated ) def wrapper ( argv ) : kwargs = dict ( arg . split ( '=' ) for arg in argv ) try : return decorated ( ** kwargs ) except TypeError as e : if decorated . __doc__ : e . args += ( decorated . __doc__ , ) raise self . register_action ( decorated . __name__ , wrapper ) if '_' in decorated . __name__ : self . register_action ( decorated . __name__ . replace ( '_' , '-' ) , wrapper ) return wrapper return action_wrapper
Decorator registering them as actions
56,594
def get_logger ( self , name = "deployment-logger" , level = logging . DEBUG ) : log = logging logger = log . getLogger ( name ) fmt = log . Formatter ( "%(asctime)s %(funcName)s " "%(levelname)s: %(message)s" ) handler = log . StreamHandler ( stream = sys . stdout ) handler . setLevel ( level ) handler . setFormatter ( fmt ) logger . addHandler ( handler ) logger . setLevel ( level ) return logger
Get a logger object that will log to stdout .
56,595
def _determine_branch_locations ( self , other_services ) : self . log . info ( 'OpenStackAmuletDeployment: determine branch locations' ) base_charms = { 'mysql' : [ 'trusty' ] , 'mongodb' : [ 'trusty' ] , 'nrpe' : [ 'trusty' , 'xenial' ] , } for svc in other_services : if svc . get ( 'location' ) : continue if svc [ 'name' ] in base_charms : target_series = self . series if self . series not in base_charms [ svc [ 'name' ] ] : target_series = base_charms [ svc [ 'name' ] ] [ - 1 ] svc [ 'location' ] = 'cs:{}/{}' . format ( target_series , svc [ 'name' ] ) elif self . stable : svc [ 'location' ] = 'cs:{}/{}' . format ( self . series , svc [ 'name' ] ) else : svc [ 'location' ] = 'cs:~openstack-charmers-next/{}/{}' . format ( self . series , svc [ 'name' ] ) return other_services
Determine the branch locations for the other services .
56,596
def _auto_wait_for_status ( self , message = None , exclude_services = None , include_only = None , timeout = None ) : if not timeout : timeout = int ( os . environ . get ( 'AMULET_SETUP_TIMEOUT' , 1800 ) ) self . log . info ( 'Waiting for extended status on units for {}s...' '' . format ( timeout ) ) all_services = self . d . services . keys ( ) if exclude_services and include_only : raise ValueError ( 'exclude_services can not be used ' 'with include_only' ) if message : if isinstance ( message , re . _pattern_type ) : match = message . pattern else : match = message self . log . debug ( 'Custom extended status wait match: ' '{}' . format ( match ) ) else : self . log . debug ( 'Default extended status wait match: contains ' 'READY (case-insensitive)' ) message = re . compile ( '.*ready.*' , re . IGNORECASE ) if exclude_services : self . log . debug ( 'Excluding services from extended status match: ' '{}' . format ( exclude_services ) ) else : exclude_services = [ ] if include_only : services = include_only else : services = list ( set ( all_services ) - set ( exclude_services ) ) self . log . debug ( 'Waiting up to {}s for extended status on services: ' '{}' . format ( timeout , services ) ) service_messages = { service : message for service in services } self . d . sentry . wait ( timeout = timeout ) self . d . sentry . wait_for_status ( self . d . juju_env , services , timeout = timeout ) self . d . sentry . wait_for_messages ( service_messages , timeout = timeout ) self . log . info ( 'OK' )
Wait for all units to have a specific extended status except for any defined as excluded . Unless specified via message any status containing any case of ready will be considered a match .
56,597
def _get_openstack_release ( self ) : for i , os_pair in enumerate ( OPENSTACK_RELEASES_PAIRS ) : setattr ( self , os_pair , i ) releases = { ( 'trusty' , None ) : self . trusty_icehouse , ( 'trusty' , 'cloud:trusty-kilo' ) : self . trusty_kilo , ( 'trusty' , 'cloud:trusty-liberty' ) : self . trusty_liberty , ( 'trusty' , 'cloud:trusty-mitaka' ) : self . trusty_mitaka , ( 'xenial' , None ) : self . xenial_mitaka , ( 'xenial' , 'cloud:xenial-newton' ) : self . xenial_newton , ( 'xenial' , 'cloud:xenial-ocata' ) : self . xenial_ocata , ( 'xenial' , 'cloud:xenial-pike' ) : self . xenial_pike , ( 'xenial' , 'cloud:xenial-queens' ) : self . xenial_queens , ( 'yakkety' , None ) : self . yakkety_newton , ( 'zesty' , None ) : self . zesty_ocata , ( 'artful' , None ) : self . artful_pike , ( 'bionic' , None ) : self . bionic_queens , ( 'bionic' , 'cloud:bionic-rocky' ) : self . bionic_rocky , ( 'bionic' , 'cloud:bionic-stein' ) : self . bionic_stein , ( 'cosmic' , None ) : self . cosmic_rocky , ( 'disco' , None ) : self . disco_stein , } return releases [ ( self . series , self . openstack ) ]
Get openstack release .
56,598
def _get_openstack_release_string ( self ) : releases = OrderedDict ( [ ( 'trusty' , 'icehouse' ) , ( 'xenial' , 'mitaka' ) , ( 'yakkety' , 'newton' ) , ( 'zesty' , 'ocata' ) , ( 'artful' , 'pike' ) , ( 'bionic' , 'queens' ) , ( 'cosmic' , 'rocky' ) , ( 'disco' , 'stein' ) , ] ) if self . openstack : os_origin = self . openstack . split ( ':' ) [ 1 ] return os_origin . split ( '%s-' % self . series ) [ 1 ] . split ( '/' ) [ 0 ] else : return releases [ self . series ]
Get openstack release string .
56,599
def get_ceph_expected_pools ( self , radosgw = False ) : if self . _get_openstack_release ( ) == self . trusty_icehouse : pools = [ 'data' , 'metadata' , 'rbd' , 'cinder-ceph' , 'glance' ] elif ( self . trusty_kilo <= self . _get_openstack_release ( ) <= self . zesty_ocata ) : pools = [ 'rbd' , 'cinder-ceph' , 'glance' ] else : pools = [ 'cinder-ceph' , 'glance' ] if radosgw : pools . extend ( [ '.rgw.root' , '.rgw.control' , '.rgw' , '.rgw.gc' , '.users.uid' ] ) return pools
Return a list of expected ceph pools in a ceph + cinder + glance test scenario based on OpenStack release and whether ceph radosgw is flagged as present or not .