idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
56,400 | def validate_unit_process_ids ( self , expected , actual ) : self . log . debug ( 'Checking units for running processes...' ) self . log . debug ( 'Expected PIDs: {}' . format ( expected ) ) self . log . debug ( 'Actual PIDs: {}' . format ( actual ) ) if len ( actual ) != len ( expected ) : return ( 'Unit count mismatch. expected, actual: {}, ' '{} ' . format ( len ( expected ) , len ( actual ) ) ) for ( e_sentry , e_proc_names ) in six . iteritems ( expected ) : e_sentry_name = e_sentry . info [ 'unit_name' ] if e_sentry in actual . keys ( ) : a_proc_names = actual [ e_sentry ] else : return ( 'Expected sentry ({}) not found in actual dict data.' '{}' . format ( e_sentry_name , e_sentry ) ) if len ( e_proc_names . keys ( ) ) != len ( a_proc_names . keys ( ) ) : return ( 'Process name count mismatch. expected, actual: {}, ' '{}' . format ( len ( expected ) , len ( actual ) ) ) for ( e_proc_name , e_pids ) , ( a_proc_name , a_pids ) in zip ( e_proc_names . items ( ) , a_proc_names . items ( ) ) : if e_proc_name != a_proc_name : return ( 'Process name mismatch. expected, actual: {}, ' '{}' . format ( e_proc_name , a_proc_name ) ) a_pids_length = len ( a_pids ) fail_msg = ( 'PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})' . format ( e_sentry_name , e_proc_name , e_pids , a_pids_length , a_pids ) ) if isinstance ( e_pids , list ) and a_pids_length not in e_pids : return fail_msg elif not isinstance ( e_pids , bool ) and not isinstance ( e_pids , list ) and a_pids_length != e_pids : return fail_msg elif isinstance ( e_pids , bool ) and e_pids is True and a_pids_length < 1 : return fail_msg elif isinstance ( e_pids , bool ) and e_pids is False and a_pids_length != 0 : return fail_msg else : self . log . debug ( 'PID check OK: {} {} {}: ' '{}' . format ( e_sentry_name , e_proc_name , e_pids , a_pids ) ) return None | Validate process id quantities for services on units . |
56,401 | def validate_list_of_identical_dicts ( self , list_of_dicts ) : hashes = [ ] for _dict in list_of_dicts : hashes . append ( hash ( frozenset ( _dict . items ( ) ) ) ) self . log . debug ( 'Hashes: {}' . format ( hashes ) ) if len ( set ( hashes ) ) == 1 : self . log . debug ( 'Dicts within list are identical' ) else : return 'Dicts within list are not identical' return None | Check that all dicts within a list are identical . |
56,402 | def get_unit_hostnames ( self , units ) : host_names = { } for unit in units : host_names [ unit . info [ 'unit_name' ] ] = str ( unit . file_contents ( '/etc/hostname' ) . strip ( ) ) self . log . debug ( 'Unit host names: {}' . format ( host_names ) ) return host_names | Return a dict of juju unit names to hostnames . |
56,403 | def run_cmd_unit ( self , sentry_unit , cmd ) : output , code = sentry_unit . run ( cmd ) if code == 0 : self . log . debug ( '{} `{}` command returned {} ' '(OK)' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code ) ) else : msg = ( '{} `{}` command returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return str ( output ) , code | Run a command on a unit return the output and exit code . |
56,404 | def file_exists_on_unit ( self , sentry_unit , file_name ) : try : sentry_unit . file_stat ( file_name ) return True except IOError : return False except Exception as e : msg = 'Error checking file {}: {}' . format ( file_name , e ) amulet . raise_status ( amulet . FAIL , msg = msg ) | Check if a file exists on a unit . |
56,405 | def file_contents_safe ( self , sentry_unit , file_name , max_wait = 60 , fatal = False ) : unit_name = sentry_unit . info [ 'unit_name' ] file_contents = False tries = 0 while not file_contents and tries < ( max_wait / 4 ) : try : file_contents = sentry_unit . file_contents ( file_name ) except IOError : self . log . debug ( 'Attempt {} to open file {} from {} ' 'failed' . format ( tries , file_name , unit_name ) ) time . sleep ( 4 ) tries += 1 if file_contents : return file_contents elif not fatal : return None elif fatal : msg = 'Failed to get file contents from unit.' amulet . raise_status ( amulet . FAIL , msg ) | Get file contents from a sentry unit . Wrap amulet file_contents with retry logic to address races where a file checks as existing but no longer exists by the time file_contents is called . Return None if file not found . Optionally raise if fatal is True . |
56,406 | def port_knock_tcp ( self , host = "localhost" , port = 22 , timeout = 15 ) : try : connect_host = socket . gethostbyname ( host ) host_human = "{} ({})" . format ( connect_host , host ) except socket . error as e : self . log . warn ( 'Unable to resolve address: ' '{} ({}) Trying anyway!' . format ( host , e ) ) connect_host = host host_human = connect_host try : knock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) knock . settimeout ( timeout ) knock . connect ( ( connect_host , port ) ) knock . close ( ) self . log . debug ( 'Socket connect OK for host ' '{} on port {}.' . format ( host_human , port ) ) return True except socket . error as e : self . log . debug ( 'Socket connect FAIL for' ' {} port {} ({})' . format ( host_human , port , e ) ) return False | Open a TCP socket to check for a listening sevice on a host . |
56,407 | def port_knock_units ( self , sentry_units , port = 22 , timeout = 15 , expect_success = True ) : for unit in sentry_units : host = unit . info [ 'public-address' ] connected = self . port_knock_tcp ( host , port , timeout ) if not connected and expect_success : return 'Socket connect failed.' elif connected and not expect_success : return 'Socket connected unexpectedly.' | Open a TCP socket to check for a listening sevice on each listed juju unit . |
56,408 | def wait_on_action ( self , action_id , _check_output = subprocess . check_output ) : data = amulet . actions . get_action_output ( action_id , full_output = True ) return data . get ( u"status" ) == "completed" | Wait for a given action returning if it completed or not . |
56,409 | def status_get ( self , unit ) : raw_status , return_code = unit . run ( "status-get --format=json --include-data" ) if return_code != 0 : return ( "unknown" , "" ) status = json . loads ( raw_status ) return ( status [ "status" ] , status [ "message" ] ) | Return the current service status of this unit . |
56,410 | def execute ( self , sql ) : cursor = self . connection . cursor ( ) try : cursor . execute ( sql ) finally : cursor . close ( ) | Execute arbitary SQL against the database . |
56,411 | def select ( self , sql ) : cursor = self . connection . cursor ( ) try : cursor . execute ( sql ) results = [ list ( i ) for i in cursor . fetchall ( ) ] finally : cursor . close ( ) return results | Execute arbitrary SQL select query against the database and return the results . |
56,412 | def migrate_passwords_to_leader_storage ( self , excludes = None ) : if not is_leader ( ) : log ( "Skipping password migration as not the lead unit" , level = DEBUG ) return dirname = os . path . dirname ( self . root_passwd_file_template ) path = os . path . join ( dirname , '*.passwd' ) for f in glob . glob ( path ) : if excludes and f in excludes : log ( "Excluding %s from leader storage migration" % ( f ) , level = DEBUG ) continue key = os . path . basename ( f ) with open ( f , 'r' ) as passwd : _value = passwd . read ( ) . strip ( ) try : leader_set ( settings = { key : _value } ) if self . delete_ondisk_passwd_file : os . unlink ( f ) except ValueError : pass | Migrate any passwords storage on disk to leader storage . |
56,413 | def get_mysql_password_on_disk ( self , username = None , password = None ) : if username : template = self . user_passwd_file_template passwd_file = template . format ( username ) else : passwd_file = self . root_passwd_file_template _password = None if os . path . exists ( passwd_file ) : log ( "Using existing password file '%s'" % passwd_file , level = DEBUG ) with open ( passwd_file , 'r' ) as passwd : _password = passwd . read ( ) . strip ( ) else : log ( "Generating new password file '%s'" % passwd_file , level = DEBUG ) if not os . path . isdir ( os . path . dirname ( passwd_file ) ) : mkdir ( os . path . dirname ( passwd_file ) , owner = 'root' , group = 'root' , perms = 0o770 ) os . chmod ( os . path . dirname ( passwd_file ) , 0o770 ) _password = password or pwgen ( length = 32 ) write_file ( passwd_file , _password , owner = 'root' , group = 'root' , perms = 0o660 ) return _password | Retrieve generate or store a mysql password for the provided username on disk . |
56,414 | def passwd_keys ( self , username ) : keys = [ ] if username == 'mysql' : log ( "Bad username '%s'" % ( username ) , level = WARNING ) if username : keys . append ( 'mysql-%s.passwd' % ( username ) ) keys . append ( '%s.passwd' % ( username ) ) else : keys . append ( 'mysql.passwd' ) for key in keys : yield key | Generator to return keys used to store passwords in peer store . |
56,415 | def get_mysql_password ( self , username = None , password = None ) : excludes = [ ] try : for key in self . passwd_keys ( username ) : _password = leader_get ( key ) if _password : break if _password and not username : excludes . append ( self . root_passwd_file_template ) except ValueError : _password = None if not _password : _password = self . get_mysql_password_on_disk ( username , password ) if self . migrate_passwd_to_leader_storage : self . migrate_passwords_to_leader_storage ( excludes = excludes ) return _password | Retrieve generate or store a mysql password for the provided username using peer relation cluster . |
56,416 | def set_mysql_password ( self , username , password ) : if username is None : username = 'root' rel_username = None if username == 'root' else username cur_passwd = self . get_mysql_password ( rel_username ) new_passwd = password try : self . connect ( user = username , password = cur_passwd ) cursor = self . connection . cursor ( ) except MySQLdb . OperationalError as ex : raise MySQLSetPasswordError ( ( 'Cannot connect using password in ' 'leader settings (%s)' ) % ex , ex ) try : release = CompareHostReleases ( lsb_release ( ) [ 'DISTRIB_CODENAME' ] ) if release < 'bionic' : SQL_UPDATE_PASSWD = ( "UPDATE mysql.user SET password = " "PASSWORD( %s ) WHERE user = %s;" ) else : SQL_UPDATE_PASSWD = ( "UPDATE mysql.user SET " "authentication_string = " "PASSWORD( %s ) WHERE user = %s;" ) cursor . execute ( SQL_UPDATE_PASSWD , ( new_passwd , username ) ) cursor . execute ( 'FLUSH PRIVILEGES;' ) self . connection . commit ( ) except MySQLdb . OperationalError as ex : raise MySQLSetPasswordError ( 'Cannot update password: %s' % str ( ex ) , ex ) finally : cursor . close ( ) try : self . connect ( user = username , password = new_passwd ) self . execute ( 'select 1;' ) except MySQLdb . OperationalError as ex : raise MySQLSetPasswordError ( ( 'Cannot connect using new password: ' '%s' ) % str ( ex ) , ex ) if not is_leader ( ) : log ( 'Only the leader can set a new password in the relation' , level = DEBUG ) return for key in self . passwd_keys ( rel_username ) : _password = leader_get ( key ) if _password : log ( 'Updating password for %s (%s)' % ( key , rel_username ) , level = DEBUG ) leader_set ( settings = { key : new_passwd } ) | Update a mysql password for the provided username changing the leader settings |
56,417 | def get_allowed_units ( self , database , username , relation_id = None ) : self . connect ( password = self . get_mysql_root_password ( ) ) allowed_units = set ( ) for unit in related_units ( relation_id ) : settings = relation_get ( rid = relation_id , unit = unit ) for attr in [ "%s_hostname" % ( database ) , 'hostname' ] : hosts = settings . get ( attr , None ) if hosts : break if hosts : try : hosts = json . loads ( hosts ) except ValueError : hosts = [ hosts ] else : hosts = [ settings [ 'private-address' ] ] if hosts : for host in hosts : host = self . normalize_address ( host ) if self . grant_exists ( database , username , host ) : log ( "Grant exists for host '%s' on db '%s'" % ( host , database ) , level = DEBUG ) if unit not in allowed_units : allowed_units . add ( unit ) else : log ( "Grant does NOT exist for host '%s' on db '%s'" % ( host , database ) , level = DEBUG ) else : log ( "No hosts found for grant check" , level = INFO ) return allowed_units | Get list of units with access grants for database with username . |
56,418 | def configure_db ( self , hostname , database , username , admin = False ) : self . connect ( password = self . get_mysql_root_password ( ) ) if not self . database_exists ( database ) : self . create_database ( database ) remote_ip = self . normalize_address ( hostname ) password = self . get_mysql_password ( username ) if not self . grant_exists ( database , username , remote_ip ) : if not admin : self . create_grant ( database , username , remote_ip , password ) else : self . create_admin_grant ( username , remote_ip , password ) self . flush_priviledges ( ) return password | Configure access to database for username from hostname . |
56,419 | def human_to_bytes ( self , human ) : num_re = re . compile ( '^[0-9]+$' ) if num_re . match ( human ) : return human factors = { 'K' : 1024 , 'M' : 1048576 , 'G' : 1073741824 , 'T' : 1099511627776 } modifier = human [ - 1 ] if modifier in factors : return int ( human [ : - 1 ] ) * factors [ modifier ] if modifier == '%' : total_ram = self . human_to_bytes ( self . get_mem_total ( ) ) if self . is_32bit_system ( ) and total_ram > self . sys_mem_limit ( ) : total_ram = self . sys_mem_limit ( ) factor = int ( human [ : - 1 ] ) * 0.01 pctram = total_ram * factor return int ( pctram - ( pctram % self . DEFAULT_PAGE_SIZE ) ) raise ValueError ( "Can only convert K,M,G, or T" ) | Convert human readable configuration options to bytes . |
56,420 | def sys_mem_limit ( self ) : if platform . machine ( ) in [ 'armv7l' ] : _mem_limit = self . human_to_bytes ( '2700M' ) else : _mem_limit = self . human_to_bytes ( '4G' ) return _mem_limit | Determine the default memory limit for the current service unit . |
56,421 | def get_mem_total ( self ) : with open ( '/proc/meminfo' ) as meminfo_file : for line in meminfo_file : key , mem = line . split ( ':' , 2 ) if key == 'MemTotal' : mtot , modifier = mem . strip ( ) . split ( ' ' ) return '%s%s' % ( mtot , modifier [ 0 ] . upper ( ) ) | Calculate the total memory in the current service unit . |
56,422 | def parse_config ( self ) : config = config_get ( ) mysql_config = { } if 'max-connections' in config : mysql_config [ 'max_connections' ] = config [ 'max-connections' ] if 'wait-timeout' in config : mysql_config [ 'wait_timeout' ] = config [ 'wait-timeout' ] if 'innodb-flush-log-at-trx-commit' in config : mysql_config [ 'innodb_flush_log_at_trx_commit' ] = config [ 'innodb-flush-log-at-trx-commit' ] elif 'tuning-level' in config : mysql_config [ 'innodb_flush_log_at_trx_commit' ] = self . INNODB_FLUSH_CONFIG_VALUES . get ( config [ 'tuning-level' ] , 1 ) if ( 'innodb-change-buffering' in config and config [ 'innodb-change-buffering' ] in self . INNODB_VALID_BUFFERING_VALUES ) : mysql_config [ 'innodb_change_buffering' ] = config [ 'innodb-change-buffering' ] if 'innodb-io-capacity' in config : mysql_config [ 'innodb_io_capacity' ] = config [ 'innodb-io-capacity' ] mysql_config [ 'key_buffer' ] = self . human_to_bytes ( '32M' ) total_memory = self . human_to_bytes ( self . get_mem_total ( ) ) dataset_bytes = config . get ( 'dataset-size' , None ) innodb_buffer_pool_size = config . get ( 'innodb-buffer-pool-size' , None ) if innodb_buffer_pool_size : innodb_buffer_pool_size = self . human_to_bytes ( innodb_buffer_pool_size ) elif dataset_bytes : log ( "Option 'dataset-size' has been deprecated, please use" "innodb_buffer_pool_size option instead" , level = "WARN" ) innodb_buffer_pool_size = self . human_to_bytes ( dataset_bytes ) else : innodb_buffer_pool_size = min ( int ( total_memory * self . DEFAULT_INNODB_BUFFER_FACTOR ) , self . DEFAULT_INNODB_BUFFER_SIZE_MAX ) if innodb_buffer_pool_size > total_memory : log ( "innodb_buffer_pool_size; {} is greater than system available memory:{}" . format ( innodb_buffer_pool_size , total_memory ) , level = 'WARN' ) mysql_config [ 'innodb_buffer_pool_size' ] = innodb_buffer_pool_size return mysql_config | Parse charm configuration and calculate values for config files . |
56,423 | def create_loopback ( file_path ) : file_path = os . path . abspath ( file_path ) check_call ( [ 'losetup' , '--find' , file_path ] ) for d , f in six . iteritems ( loopback_devices ( ) ) : if f == file_path : return d | Create a loopback device for a given backing file . |
56,424 | def ensure_loopback_device ( path , size ) : for d , f in six . iteritems ( loopback_devices ( ) ) : if f == path : return d if not os . path . exists ( path ) : cmd = [ 'truncate' , '--size' , size , path ] check_call ( cmd ) return create_loopback ( path ) | Ensure a loopback device exists for a given backing file path and size . If it a loopback device is not mapped to file a new one will be created . |
56,425 | def leader_get ( attribute = None , rid = None ) : migration_key = '__leader_get_migrated_settings__' if not is_leader ( ) : return _leader_get ( attribute = attribute ) settings_migrated = False leader_settings = _leader_get ( attribute = attribute ) previously_migrated = _leader_get ( attribute = migration_key ) if previously_migrated : migrated = set ( json . loads ( previously_migrated ) ) else : migrated = set ( [ ] ) try : if migration_key in leader_settings : del leader_settings [ migration_key ] except TypeError : pass if attribute : if attribute in migrated : return leader_settings if not leader_settings : peer_setting = _relation_get ( attribute = attribute , unit = local_unit ( ) , rid = rid ) if peer_setting : leader_set ( settings = { attribute : peer_setting } ) leader_settings = peer_setting if leader_settings : settings_migrated = True migrated . add ( attribute ) else : r_settings = _relation_get ( unit = local_unit ( ) , rid = rid ) if r_settings : for key in set ( r_settings . keys ( ) ) . difference ( migrated ) : if not leader_settings . get ( key ) : leader_settings [ key ] = r_settings [ key ] settings_migrated = True migrated . add ( key ) if settings_migrated : leader_set ( ** leader_settings ) if migrated and settings_migrated : migrated = json . dumps ( list ( migrated ) ) leader_set ( settings = { migration_key : migrated } ) return leader_settings | Wrapper to ensure that settings are migrated from the peer relation . |
56,426 | def relation_set ( relation_id = None , relation_settings = None , ** kwargs ) : try : if relation_id in relation_ids ( 'cluster' ) : return leader_set ( settings = relation_settings , ** kwargs ) else : raise NotImplementedError except NotImplementedError : return _relation_set ( relation_id = relation_id , relation_settings = relation_settings , ** kwargs ) | Attempt to use leader - set if supported in the current version of Juju otherwise falls back on relation - set . |
56,427 | def relation_get ( attribute = None , unit = None , rid = None ) : try : if rid in relation_ids ( 'cluster' ) : return leader_get ( attribute , rid ) else : raise NotImplementedError except NotImplementedError : return _relation_get ( attribute = attribute , rid = rid , unit = unit ) | Attempt to use leader - get if supported in the current version of Juju otherwise falls back on relation - get . |
56,428 | def peer_retrieve ( key , relation_name = 'cluster' ) : cluster_rels = relation_ids ( relation_name ) if len ( cluster_rels ) > 0 : cluster_rid = cluster_rels [ 0 ] return relation_get ( attribute = key , rid = cluster_rid , unit = local_unit ( ) ) else : raise ValueError ( 'Unable to detect' 'peer relation {}' . format ( relation_name ) ) | Retrieve a named key from peer relation relation_name . |
56,429 | def peer_echo ( includes = None , force = False ) : try : is_leader ( ) except NotImplementedError : pass else : if not force : return relation_get = _relation_get relation_set = _relation_set rdata = relation_get ( ) echo_data = { } if includes is None : echo_data = rdata . copy ( ) for ex in [ 'private-address' , 'public-address' ] : if ex in echo_data : echo_data . pop ( ex ) else : for attribute , value in six . iteritems ( rdata ) : for include in includes : if include in attribute : echo_data [ attribute ] = value if len ( echo_data ) > 0 : relation_set ( relation_settings = echo_data ) | Echo filtered attributes back onto the same relation for storage . |
56,430 | def peer_store_and_set ( relation_id = None , peer_relation_name = 'cluster' , peer_store_fatal = False , relation_settings = None , delimiter = '_' , ** kwargs ) : relation_settings = relation_settings if relation_settings else { } relation_set ( relation_id = relation_id , relation_settings = relation_settings , ** kwargs ) if is_relation_made ( peer_relation_name ) : for key , value in six . iteritems ( dict ( list ( kwargs . items ( ) ) + list ( relation_settings . items ( ) ) ) ) : key_prefix = relation_id or current_relation_id ( ) peer_store ( key_prefix + delimiter + key , value , relation_name = peer_relation_name ) else : if peer_store_fatal : raise ValueError ( 'Unable to detect ' 'peer relation {}' . format ( peer_relation_name ) ) | Store passed - in arguments both in argument relation and in peer storage . |
56,431 | def sed ( filename , before , after , flags = 'g' ) : expression = r's/{0}/{1}/{2}' . format ( before , after , flags ) return subprocess . check_call ( [ "sed" , "-i" , "-r" , "-e" , expression , os . path . expanduser ( filename ) ] ) | Search and replaces the given pattern on filename . |
56,432 | def get_listening ( self , listen = [ '0.0.0.0' ] ) : if listen == [ '0.0.0.0' ] : return listen value = [ ] for network in listen : try : ip = get_address_in_network ( network = network , fatal = True ) except ValueError : if is_ip ( network ) : ip = network else : try : ip = get_iface_addr ( iface = network , fatal = False ) [ 0 ] except IndexError : continue value . append ( ip ) if value == [ ] : return [ '0.0.0.0' ] return value | Returns a list of addresses SSH can list on |
56,433 | def get_loader ( templates_dir , os_release ) : tmpl_dirs = [ ( rel , os . path . join ( templates_dir , rel ) ) for rel in six . itervalues ( OPENSTACK_CODENAMES ) ] if not os . path . isdir ( templates_dir ) : log ( 'Templates directory not found @ %s.' % templates_dir , level = ERROR ) raise OSConfigException loaders = [ FileSystemLoader ( templates_dir ) ] helper_templates = os . path . join ( os . path . dirname ( __file__ ) , 'templates' ) if os . path . isdir ( helper_templates ) : loaders . append ( FileSystemLoader ( helper_templates ) ) for rel , tmpl_dir in tmpl_dirs : if os . path . isdir ( tmpl_dir ) : loaders . insert ( 0 , FileSystemLoader ( tmpl_dir ) ) if rel == os_release : break log ( 'Creating choice loader with dirs: %s' % [ l . searchpath for l in loaders ] , level = TRACE ) return ChoiceLoader ( loaders ) | Create a jinja2 . ChoiceLoader containing template dirs up to and including os_release . If directory template directory is missing at templates_dir it will be omitted from the loader . templates_dir is added to the bottom of the search list as a base loading dir . |
56,434 | def complete_contexts ( self ) : if self . _complete_contexts : return self . _complete_contexts self . context ( ) return self . _complete_contexts | Return a list of interfaces that have satisfied contexts . |
56,435 | def write ( self , config_file ) : if config_file not in self . templates : log ( 'Config not registered: %s' % config_file , level = ERROR ) raise OSConfigException _out = self . render ( config_file ) if six . PY3 : _out = _out . encode ( 'UTF-8' ) with open ( config_file , 'wb' ) as out : out . write ( _out ) log ( 'Wrote template %s.' % config_file , level = INFO ) | Write a single config file raises if config file is not registered . |
56,436 | def write_all ( self ) : [ self . write ( k ) for k in six . iterkeys ( self . templates ) ] | Write out all registered config files . |
56,437 | def set_release ( self , openstack_release ) : self . _tmpl_env = None self . openstack_release = openstack_release self . _get_tmpl_env ( ) | Resets the template environment and generates a new template loader based on a the new openstack release . |
56,438 | def complete_contexts ( self ) : interfaces = [ ] [ interfaces . extend ( i . complete_contexts ( ) ) for i in six . itervalues ( self . templates ) ] return interfaces | Returns a list of context interfaces that yield a complete context . |
56,439 | def is_elected_leader ( resource ) : try : return juju_is_leader ( ) except NotImplementedError : log ( 'Juju leadership election feature not enabled' ', using fallback support' , level = WARNING ) if is_clustered ( ) : if not is_crm_leader ( resource ) : log ( 'Deferring action to CRM leader.' , level = INFO ) return False else : peers = peer_units ( ) if peers and not oldest_peer ( peers ) : log ( 'Deferring action to oldest service unit.' , level = INFO ) return False return True | Returns True if the charm executing this is the elected cluster leader . |
56,440 | def is_crm_dc ( ) : cmd = [ 'crm' , 'status' ] try : status = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) if not isinstance ( status , six . text_type ) : status = six . text_type ( status , "utf-8" ) except subprocess . CalledProcessError as ex : raise CRMDCNotFound ( str ( ex ) ) current_dc = '' for line in status . split ( '\n' ) : if line . startswith ( 'Current DC' ) : current_dc = line . split ( ':' ) [ 1 ] . split ( ) [ 0 ] if current_dc == get_unit_hostname ( ) : return True elif current_dc == 'NONE' : raise CRMDCNotFound ( 'Current DC: NONE' ) return False | Determine leadership by querying the pacemaker Designated Controller |
56,441 | def is_crm_leader ( resource , retry = False ) : if resource == DC_RESOURCE_NAME : return is_crm_dc ( ) cmd = [ 'crm' , 'resource' , 'show' , resource ] try : status = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) if not isinstance ( status , six . text_type ) : status = six . text_type ( status , "utf-8" ) except subprocess . CalledProcessError : status = None if status and get_unit_hostname ( ) in status : return True if status and "resource %s is NOT running" % ( resource ) in status : raise CRMResourceNotFound ( "CRM resource %s not found" % ( resource ) ) return False | Returns True if the charm calling this is the elected corosync leader as returned by calling the external crm command . |
56,442 | def peer_ips ( peer_relation = 'cluster' , addr_key = 'private-address' ) : peers = { } for r_id in relation_ids ( peer_relation ) : for unit in relation_list ( r_id ) : peers [ unit ] = relation_get ( addr_key , rid = r_id , unit = unit ) return peers | Return a dict of peers and their private - address |
56,443 | def oldest_peer ( peers ) : local_unit_no = int ( os . getenv ( 'JUJU_UNIT_NAME' ) . split ( '/' ) [ 1 ] ) for peer in peers : remote_unit_no = int ( peer . split ( '/' ) [ 1 ] ) if remote_unit_no < local_unit_no : return False return True | Determines who the oldest peer is by comparing unit numbers . |
56,444 | def canonical_url ( configs , vip_setting = 'vip' ) : scheme = 'http' if 'https' in configs . complete_contexts ( ) : scheme = 'https' if is_clustered ( ) : addr = config_get ( vip_setting ) else : addr = unit_get ( 'private-address' ) return '%s://%s' % ( scheme , addr ) | Returns the correct HTTP URL to this host given the state of HTTPS configuration and hacluster . |
56,445 | def distributed_wait ( modulo = None , wait = None , operation_name = 'operation' ) : if modulo is None : modulo = config_get ( 'modulo-nodes' ) or 3 if wait is None : wait = config_get ( 'known-wait' ) or 30 if juju_is_leader ( ) : calculated_wait = 0 else : calculated_wait = modulo_distribution ( modulo = modulo , wait = wait , non_zero_wait = True ) msg = "Waiting {} seconds for {} ..." . format ( calculated_wait , operation_name ) log ( msg , DEBUG ) status_set ( 'maintenance' , msg ) time . sleep ( calculated_wait ) | Distribute operations by waiting based on modulo_distribution |
56,446 | def update ( fatal = False ) : cmd = [ 'yum' , '--assumeyes' , 'update' ] log ( "Update with fatal: {}" . format ( fatal ) ) _run_yum_command ( cmd , fatal ) | Update local yum cache . |
56,447 | def yum_search ( packages ) : output = { } cmd = [ 'yum' , 'search' ] if isinstance ( packages , six . string_types ) : cmd . append ( packages ) else : cmd . extend ( packages ) log ( "Searching for {}" . format ( packages ) ) result = subprocess . check_output ( cmd ) for package in list ( packages ) : output [ package ] = package in result return output | Search for a package . |
56,448 | def _run_yum_command ( cmd , fatal = False ) : env = os . environ . copy ( ) if fatal : retry_count = 0 result = None while result is None or result == YUM_NO_LOCK : try : result = subprocess . check_call ( cmd , env = env ) except subprocess . CalledProcessError as e : retry_count = retry_count + 1 if retry_count > YUM_NO_LOCK_RETRY_COUNT : raise result = e . returncode log ( "Couldn't acquire YUM lock. Will retry in {} seconds." "" . format ( YUM_NO_LOCK_RETRY_DELAY ) ) time . sleep ( YUM_NO_LOCK_RETRY_DELAY ) else : subprocess . call ( cmd , env = env ) | Run an YUM command . |
56,449 | def py ( self , output ) : import pprint pprint . pprint ( output , stream = self . outfile ) | Output data as a nicely - formatted python data structure |
56,450 | def csv ( self , output ) : import csv csvwriter = csv . writer ( self . outfile ) csvwriter . writerows ( output ) | Output data as excel - compatible CSV |
56,451 | def tab ( self , output ) : import csv csvwriter = csv . writer ( self . outfile , dialect = csv . excel_tab ) csvwriter . writerows ( output ) | Output data in excel - compatible tab - delimited format |
56,452 | def subcommand ( self , command_name = None ) : def wrapper ( decorated ) : cmd_name = command_name or decorated . __name__ subparser = self . subparsers . add_parser ( cmd_name , description = decorated . __doc__ ) for args , kwargs in describe_arguments ( decorated ) : subparser . add_argument ( * args , ** kwargs ) subparser . set_defaults ( func = decorated ) return decorated return wrapper | Decorate a function as a subcommand . Use its arguments as the command - line arguments |
56,453 | def run ( self ) : "Run cli, processing arguments and executing subcommands." arguments = self . argument_parser . parse_args ( ) argspec = inspect . getargspec ( arguments . func ) vargs = [ ] for arg in argspec . args : vargs . append ( getattr ( arguments , arg ) ) if argspec . varargs : vargs . extend ( getattr ( arguments , argspec . varargs ) ) output = arguments . func ( * vargs ) if getattr ( arguments . func , '_cli_test_command' , False ) : self . exit_code = 0 if output else 1 output = '' if getattr ( arguments . func , '_cli_no_output' , False ) : output = '' self . formatter . format_output ( output , arguments . format ) if charmhelpers . core . unitdata . _KV : charmhelpers . core . unitdata . _KV . flush ( ) | Run cli processing arguments and executing subcommands . |
56,454 | def ssh_authorized_peers ( peer_interface , user , group = None , ensure_local_user = False ) : if ensure_local_user : ensure_user ( user , group ) priv_key , pub_key = get_keypair ( user ) hook = hook_name ( ) if hook == '%s-relation-joined' % peer_interface : relation_set ( ssh_pub_key = pub_key ) elif hook == '%s-relation-changed' % peer_interface or hook == '%s-relation-departed' % peer_interface : hosts = [ ] keys = [ ] for r_id in relation_ids ( peer_interface ) : for unit in related_units ( r_id ) : ssh_pub_key = relation_get ( 'ssh_pub_key' , rid = r_id , unit = unit ) priv_addr = relation_get ( 'private-address' , rid = r_id , unit = unit ) if ssh_pub_key : keys . append ( ssh_pub_key ) hosts . append ( priv_addr ) else : log ( 'ssh_authorized_peers(): ssh_pub_key ' 'missing for unit %s, skipping.' % unit ) write_authorized_keys ( user , keys ) write_known_hosts ( user , hosts ) authed_hosts = ':' . join ( hosts ) relation_set ( ssh_authorized_hosts = authed_hosts ) | Main setup function should be called from both peer - changed and - joined hooks with the same parameters . |
56,455 | def collect_authed_hosts ( peer_interface ) : hosts = [ ] for r_id in ( relation_ids ( peer_interface ) or [ ] ) : for unit in related_units ( r_id ) : private_addr = relation_get ( 'private-address' , rid = r_id , unit = unit ) authed_hosts = relation_get ( 'ssh_authorized_hosts' , rid = r_id , unit = unit ) if not authed_hosts : log ( 'Peer %s has not authorized *any* hosts yet, skipping.' % ( unit ) , level = INFO ) continue if unit_private_ip ( ) in authed_hosts . split ( ':' ) : hosts . append ( private_addr ) else : log ( 'Peer %s has not authorized *this* host yet, skipping.' % ( unit ) , level = INFO ) return hosts | Iterate through the units on peer interface to find all that have the calling host in its authorized hosts list |
56,456 | def sync_path_to_host ( path , host , user , verbose = False , cmd = None , gid = None , fatal = False ) : cmd = cmd or copy ( BASE_CMD ) if not verbose : cmd . append ( '-silent' ) if path . endswith ( '/' ) : path = path [ : ( len ( path ) - 1 ) ] cmd = cmd + [ path , 'ssh://%s@%s/%s' % ( user , host , path ) ] try : log ( 'Syncing local path %s to %s@%s:%s' % ( path , user , host , path ) ) run_as_user ( user , cmd , gid ) except Exception : log ( 'Error syncing remote files' ) if fatal : raise | Sync path to an specific peer host |
56,457 | def sync_to_peer ( host , user , paths = None , verbose = False , cmd = None , gid = None , fatal = False ) : if paths : for p in paths : sync_path_to_host ( p , host , user , verbose , cmd , gid , fatal ) | Sync paths to an specific peer host |
56,458 | def sync_to_peers ( peer_interface , user , paths = None , verbose = False , cmd = None , gid = None , fatal = False ) : if paths : for host in collect_authed_hosts ( peer_interface ) : sync_to_peer ( host , user , paths , verbose , cmd , gid , fatal ) | Sync all hosts to an specific path |
56,459 | def parse_options ( given , available ) : for key , value in sorted ( given . items ( ) ) : if not value : continue if key in available : yield "--{0}={1}" . format ( key , value ) | Given a set of options check if available |
56,460 | def pip_install_requirements ( requirements , constraints = None , ** options ) : command = [ "install" ] available_options = ( 'proxy' , 'src' , 'log' , ) for option in parse_options ( options , available_options ) : command . append ( option ) command . append ( "-r {0}" . format ( requirements ) ) if constraints : command . append ( "-c {0}" . format ( constraints ) ) log ( "Installing from file: {} with constraints {} " "and options: {}" . format ( requirements , constraints , command ) ) else : log ( "Installing from file: {} with options: {}" . format ( requirements , command ) ) pip_execute ( command ) | Install a requirements file . |
56,461 | def pip_install ( package , fatal = False , upgrade = False , venv = None , constraints = None , ** options ) : if venv : venv_python = os . path . join ( venv , 'bin/pip' ) command = [ venv_python , "install" ] else : command = [ "install" ] available_options = ( 'proxy' , 'src' , 'log' , 'index-url' , ) for option in parse_options ( options , available_options ) : command . append ( option ) if upgrade : command . append ( '--upgrade' ) if constraints : command . extend ( [ '-c' , constraints ] ) if isinstance ( package , list ) : command . extend ( package ) else : command . append ( package ) log ( "Installing {} package with options: {}" . format ( package , command ) ) if venv : subprocess . check_call ( command ) else : pip_execute ( command ) | Install a python package |
56,462 | def pip_uninstall ( package , ** options ) : command = [ "uninstall" , "-q" , "-y" ] available_options = ( 'proxy' , 'log' , ) for option in parse_options ( options , available_options ) : command . append ( option ) if isinstance ( package , list ) : command . extend ( package ) else : command . append ( package ) log ( "Uninstalling {} package with options: {}" . format ( package , command ) ) pip_execute ( command ) | Uninstall a python package |
56,463 | def pip_create_virtualenv ( path = None ) : if six . PY2 : apt_install ( 'python-virtualenv' ) else : apt_install ( 'python3-virtualenv' ) if path : venv_path = path else : venv_path = os . path . join ( charm_dir ( ) , 'venv' ) if not os . path . exists ( venv_path ) : subprocess . check_call ( [ 'virtualenv' , venv_path ] ) | Create an isolated Python environment . |
56,464 | def configure_sources ( update = False , sources_var = 'install_sources' , keys_var = 'install_keys' ) : sources = safe_load ( ( config ( sources_var ) or '' ) . strip ( ) ) or [ ] keys = safe_load ( ( config ( keys_var ) or '' ) . strip ( ) ) or None if isinstance ( sources , six . string_types ) : sources = [ sources ] if keys is None : for source in sources : add_source ( source , None ) else : if isinstance ( keys , six . string_types ) : keys = [ keys ] if len ( sources ) != len ( keys ) : raise SourceConfigError ( 'Install sources and keys lists are different lengths' ) for source , key in zip ( sources , keys ) : add_source ( source , key ) if update : _fetch_update ( fatal = True ) | Configure multiple sources from charm configuration . |
56,465 | def install_remote ( source , * args , ** kwargs ) : handlers = [ h for h in plugins ( ) if h . can_handle ( source ) is True ] for handler in handlers : try : return handler . install ( source , * args , ** kwargs ) except UnhandledSource as e : log ( 'Install source attempt unsuccessful: {}' . format ( e ) , level = 'WARNING' ) raise UnhandledSource ( "No handler found for source {}" . format ( source ) ) | Install a file tree from a remote source . |
56,466 | def base_url ( self , url ) : parts = list ( self . parse_url ( url ) ) parts [ 4 : ] = [ '' for i in parts [ 4 : ] ] return urlunparse ( parts ) | Return url without querystring or fragment |
56,467 | def is_block_device ( path ) : if not os . path . exists ( path ) : return False return S_ISBLK ( os . stat ( path ) . st_mode ) | Confirm device at path is a valid block device node . |
56,468 | def zap_disk ( block_device ) : call ( [ 'sgdisk' , '--zap-all' , '--' , block_device ] ) call ( [ 'sgdisk' , '--clear' , '--mbrtogpt' , '--' , block_device ] ) dev_end = check_output ( [ 'blockdev' , '--getsz' , block_device ] ) . decode ( 'UTF-8' ) gpt_end = int ( dev_end . split ( ) [ 0 ] ) - 100 check_call ( [ 'dd' , 'if=/dev/zero' , 'of=%s' % ( block_device ) , 'bs=1M' , 'count=1' ] ) check_call ( [ 'dd' , 'if=/dev/zero' , 'of=%s' % ( block_device ) , 'bs=512' , 'count=100' , 'seek=%s' % ( gpt_end ) ] ) | Clear a block device of partition table . Relies on sgdisk which is installed as pat of the gdisk package in Ubuntu . |
56,469 | def is_device_mounted ( device ) : try : out = check_output ( [ 'lsblk' , '-P' , device ] ) . decode ( 'UTF-8' ) except Exception : return False return bool ( re . search ( r'MOUNTPOINT=".+"' , out ) ) | Given a device path return True if that device is mounted and False if it isn t . |
56,470 | def mkfs_xfs ( device , force = False ) : cmd = [ 'mkfs.xfs' ] if force : cmd . append ( "-f" ) cmd += [ '-i' , 'size=1024' , device ] check_call ( cmd ) | Format device with XFS filesystem . |
56,471 | def wait_for_machine ( num_machines = 1 , timeout = 300 ) : if get_machine_data ( ) [ 0 ] [ 'dns-name' ] == 'localhost' : return 1 , 0 start_time = time . time ( ) while True : machine_data = get_machine_data ( ) non_zookeeper_machines = [ machine_data [ key ] for key in list ( machine_data . keys ( ) ) [ 1 : ] ] if len ( non_zookeeper_machines ) >= num_machines : all_machines_running = True for machine in non_zookeeper_machines : if machine . get ( 'instance-state' ) != 'running' : all_machines_running = False break if all_machines_running : break if time . time ( ) - start_time >= timeout : raise RuntimeError ( 'timeout waiting for service to start' ) time . sleep ( SLEEP_AMOUNT ) return num_machines , time . time ( ) - start_time | Wait timeout seconds for num_machines machines to come up . |
56,472 | def wait_for_unit ( service_name , timeout = 480 ) : wait_for_machine ( num_machines = 1 ) start_time = time . time ( ) while True : state = unit_info ( service_name , 'agent-state' ) if 'error' in state or state == 'started' : break if time . time ( ) - start_time >= timeout : raise RuntimeError ( 'timeout waiting for service to start' ) time . sleep ( SLEEP_AMOUNT ) if state != 'started' : raise RuntimeError ( 'unit did not start, agent-state: ' + state ) | Wait timeout seconds for a given service name to come up . |
56,473 | def wait_for_relation ( service_name , relation_name , timeout = 120 ) : start_time = time . time ( ) while True : relation = unit_info ( service_name , 'relations' ) . get ( relation_name ) if relation is not None and relation [ 'state' ] == 'up' : break if time . time ( ) - start_time >= timeout : raise RuntimeError ( 'timeout waiting for relation to be up' ) time . sleep ( SLEEP_AMOUNT ) | Wait timeout seconds for a given relation to come up . |
56,474 | def service_available ( service_name ) : try : subprocess . check_output ( [ 'service' , service_name , 'status' ] , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) except subprocess . CalledProcessError as e : return b'unrecognized service' not in e . output else : return True | Determine whether a system service is available |
56,475 | def install_salt_support ( from_ppa = True ) : if from_ppa : subprocess . check_call ( [ '/usr/bin/add-apt-repository' , '--yes' , 'ppa:saltstack/salt' , ] ) subprocess . check_call ( [ '/usr/bin/apt-get' , 'update' ] ) charmhelpers . fetch . apt_install ( 'salt-common' ) | Installs the salt - minion helper for machine state . |
56,476 | def update_machine_state ( state_path ) : charmhelpers . contrib . templating . contexts . juju_state_to_yaml ( salt_grains_path ) subprocess . check_call ( [ 'salt-call' , '--local' , 'state.template' , state_path , ] ) | Update the machine state using the provided state declaration . |
56,477 | def pool_exists ( service , name ) : try : out = check_output ( [ 'rados' , '--id' , service , 'lspools' ] ) if six . PY3 : out = out . decode ( 'UTF-8' ) except CalledProcessError : return False return name in out . split ( ) | Check to see if a RADOS pool already exists . |
56,478 | def install ( ) : ceph_dir = "/etc/ceph" if not os . path . exists ( ceph_dir ) : os . mkdir ( ceph_dir ) apt_install ( 'ceph-common' , fatal = True ) | Basic Ceph client installation . |
56,479 | def rbd_exists ( service , pool , rbd_img ) : try : out = check_output ( [ 'rbd' , 'list' , '--id' , service , '--pool' , pool ] ) if six . PY3 : out = out . decode ( 'UTF-8' ) except CalledProcessError : return False return rbd_img in out | Check to see if a RADOS block device exists . |
56,480 | def create_rbd_image ( service , pool , image , sizemb ) : cmd = [ 'rbd' , 'create' , image , '--size' , str ( sizemb ) , '--id' , service , '--pool' , pool ] check_call ( cmd ) | Create a new RADOS block device . |
56,481 | def set_app_name_for_pool ( client , pool , name ) : if cmp_pkgrevno ( 'ceph-common' , '12.0.0' ) >= 0 : cmd = [ 'ceph' , '--id' , client , 'osd' , 'pool' , 'application' , 'enable' , pool , name ] check_call ( cmd ) | Calls osd pool application enable for the specified pool name |
56,482 | def create_pool ( service , name , replicas = 3 , pg_num = None ) : if pool_exists ( service , name ) : log ( "Ceph pool {} already exists, skipping creation" . format ( name ) , level = WARNING ) return if not pg_num : osds = get_osds ( service ) if osds : pg_num = ( len ( osds ) * 100 // replicas ) else : pg_num = 200 cmd = [ 'ceph' , '--id' , service , 'osd' , 'pool' , 'create' , name , str ( pg_num ) ] check_call ( cmd ) update_pool ( service , name , settings = { 'size' : str ( replicas ) } ) | Create a new RADOS pool . |
56,483 | def add_key ( service , key ) : keyring = _keyring_path ( service ) if os . path . exists ( keyring ) : with open ( keyring , 'r' ) as ring : if key in ring . read ( ) : log ( 'Ceph keyring exists at %s and has not changed.' % keyring , level = DEBUG ) return log ( 'Updating existing keyring %s.' % keyring , level = DEBUG ) cmd = [ 'ceph-authtool' , keyring , '--create-keyring' , '--name=client.{}' . format ( service ) , '--add-key={}' . format ( key ) ] check_call ( cmd ) log ( 'Created new ceph keyring at %s.' % keyring , level = DEBUG ) | Add a key to a keyring . |
56,484 | def delete_keyring ( service ) : keyring = _keyring_path ( service ) if not os . path . exists ( keyring ) : log ( 'Keyring does not exist at %s' % keyring , level = WARNING ) return os . remove ( keyring ) log ( 'Deleted ring at %s.' % keyring , level = INFO ) | Delete an existing Ceph keyring . |
56,485 | def create_key_file ( service , key ) : keyfile = _keyfile_path ( service ) if os . path . exists ( keyfile ) : log ( 'Keyfile exists at %s.' % keyfile , level = WARNING ) return with open ( keyfile , 'w' ) as fd : fd . write ( key ) log ( 'Created new keyfile at %s.' % keyfile , level = INFO ) | Create a file containing key . |
56,486 | def get_ceph_nodes ( relation = 'ceph' ) : hosts = [ ] for r_id in relation_ids ( relation ) : for unit in related_units ( r_id ) : hosts . append ( relation_get ( 'private-address' , unit = unit , rid = r_id ) ) return hosts | Query named relation to determine current nodes . |
56,487 | def configure ( service , key , auth , use_syslog ) : add_key ( service , key ) create_key_file ( service , key ) hosts = get_ceph_nodes ( ) with open ( '/etc/ceph/ceph.conf' , 'w' ) as ceph_conf : ceph_conf . write ( CEPH_CONF . format ( auth = auth , keyring = _keyring_path ( service ) , mon_hosts = "," . join ( map ( str , hosts ) ) , use_syslog = use_syslog ) ) modprobe ( 'rbd' ) | Perform basic configuration of Ceph . |
56,488 | def image_mapped ( name ) : try : out = check_output ( [ 'rbd' , 'showmapped' ] ) if six . PY3 : out = out . decode ( 'UTF-8' ) except CalledProcessError : return False return name in out | Determine whether a RADOS block device is mapped locally . |
56,489 | def map_block_storage ( service , pool , image ) : cmd = [ 'rbd' , 'map' , '{}/{}' . format ( pool , image ) , '--user' , service , '--secret' , _keyfile_path ( service ) , ] check_call ( cmd ) | Map a RADOS block device for local use . |
56,490 | def make_filesystem ( blk_device , fstype = 'ext4' , timeout = 10 ) : count = 0 e_noent = os . errno . ENOENT while not os . path . exists ( blk_device ) : if count >= timeout : log ( 'Gave up waiting on block device %s' % blk_device , level = ERROR ) raise IOError ( e_noent , os . strerror ( e_noent ) , blk_device ) log ( 'Waiting for block device %s to appear' % blk_device , level = DEBUG ) count += 1 time . sleep ( 1 ) else : log ( 'Formatting block device %s as filesystem %s.' % ( blk_device , fstype ) , level = INFO ) check_call ( [ 'mkfs' , '-t' , fstype , blk_device ] ) | Make a new filesystem on the specified block device . |
56,491 | def place_data_on_block_device ( blk_device , data_src_dst ) : mount ( blk_device , '/mnt' ) copy_files ( data_src_dst , '/mnt' ) umount ( '/mnt' ) _dir = os . stat ( data_src_dst ) uid = _dir . st_uid gid = _dir . st_gid mount ( blk_device , data_src_dst , persist = True ) os . chown ( data_src_dst , uid , gid ) | Migrate data in data_src_dst to blk_device and then remount . |
56,492 | def ensure_ceph_keyring ( service , user = None , group = None , relation = 'ceph' , key = None ) : if not key : for rid in relation_ids ( relation ) : for unit in related_units ( rid ) : key = relation_get ( 'key' , rid = rid , unit = unit ) if key : break if not key : return False add_key ( service = service , key = key ) keyring = _keyring_path ( service ) if user and group : check_call ( [ 'chown' , '%s.%s' % ( user , group ) , keyring ] ) return True | Ensures a ceph keyring is created for a named service and optionally ensures user and group ownership . |
56,493 | def get_previous_request ( rid ) : request = None broker_req = relation_get ( attribute = 'broker_req' , rid = rid , unit = local_unit ( ) ) if broker_req : request_data = json . loads ( broker_req ) request = CephBrokerRq ( api_version = request_data [ 'api-version' ] , request_id = request_data [ 'request-id' ] ) request . set_ops ( request_data [ 'ops' ] ) return request | Return the last ceph broker request sent on a given relation |
56,494 | def get_request_states ( request , relation = 'ceph' ) : complete = [ ] requests = { } for rid in relation_ids ( relation ) : complete = False previous_request = get_previous_request ( rid ) if request == previous_request : sent = True complete = is_request_complete_for_rid ( previous_request , rid ) else : sent = False complete = False requests [ rid ] = { 'sent' : sent , 'complete' : complete , } return requests | Return a dict of requests per relation id with their corresponding completion state . |
56,495 | def is_request_sent ( request , relation = 'ceph' ) : states = get_request_states ( request , relation = relation ) for rid in states . keys ( ) : if not states [ rid ] [ 'sent' ] : return False return True | Check to see if a functionally equivalent request has already been sent |
56,496 | def is_request_complete_for_rid ( request , rid ) : broker_key = get_broker_rsp_key ( ) for unit in related_units ( rid ) : rdata = relation_get ( rid = rid , unit = unit ) if rdata . get ( broker_key ) : rsp = CephBrokerRsp ( rdata . get ( broker_key ) ) if rsp . request_id == request . request_id : if not rsp . exit_code : return True else : if rdata . get ( 'broker_rsp' ) : request_data = json . loads ( rdata [ 'broker_rsp' ] ) if request_data . get ( 'request-id' ) : log ( 'Ignoring legacy broker_rsp without unit key as remote ' 'service supports unit specific replies' , level = DEBUG ) else : log ( 'Using legacy broker_rsp as remote service does not ' 'supports unit specific replies' , level = DEBUG ) rsp = CephBrokerRsp ( rdata [ 'broker_rsp' ] ) if not rsp . exit_code : return True return False | Check if a given request has been completed on the given relation |
56,497 | def send_request_if_needed ( request , relation = 'ceph' ) : if is_request_sent ( request , relation = relation ) : log ( 'Request already sent but not complete, not sending new request' , level = DEBUG ) else : for rid in relation_ids ( relation ) : log ( 'Sending request {}' . format ( request . request_id ) , level = DEBUG ) relation_set ( relation_id = rid , broker_req = request . request ) | Send broker request if an equivalent request has not already been sent |
56,498 | def is_broker_action_done ( action , rid = None , unit = None ) : rdata = relation_get ( rid , unit ) or { } broker_rsp = rdata . get ( get_broker_rsp_key ( ) ) if not broker_rsp : return False rsp = CephBrokerRsp ( broker_rsp ) unit_name = local_unit ( ) . partition ( '/' ) [ 2 ] key = "unit_{}_ceph_broker_action.{}" . format ( unit_name , action ) kvstore = kv ( ) val = kvstore . get ( key = key ) if val and val == rsp . request_id : return True return False | Check whether broker action has completed yet . |
56,499 | def mark_broker_action_done ( action , rid = None , unit = None ) : rdata = relation_get ( rid , unit ) or { } broker_rsp = rdata . get ( get_broker_rsp_key ( ) ) if not broker_rsp : return rsp = CephBrokerRsp ( broker_rsp ) unit_name = local_unit ( ) . partition ( '/' ) [ 2 ] key = "unit_{}_ceph_broker_action.{}" . format ( unit_name , action ) kvstore = kv ( ) kvstore . set ( key = key , value = rsp . request_id ) kvstore . flush ( ) | Mark action as having been completed . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.