idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
56,700
def get_bcache_fs ( ) : cachesetroot = "{}/fs/bcache" . format ( SYSFS ) try : dirs = os . listdir ( cachesetroot ) except OSError : log ( "No bcache fs found" ) return [ ] cacheset = set ( [ Bcache ( '{}/{}' . format ( cachesetroot , d ) ) for d in dirs if not d . startswith ( 'register' ) ] ) return cacheset
Return all cache sets
56,701
def get_stats_action ( cachespec , interval ) : if cachespec == 'global' : caches = get_bcache_fs ( ) else : caches = [ Bcache . fromdevice ( cachespec ) ] res = dict ( ( c . cachepath , c . get_stats ( interval ) ) for c in caches ) return json . dumps ( res , indent = 4 , separators = ( ',' , ': ' ) )
Action for getting bcache statistics for a given cachespec . Cachespec can either be a device name eg . sdb which will retrieve cache stats for the given device or global which will retrieve stats for all cachesets
56,702
def get_stats ( self , interval ) : intervaldir = 'stats_{}' . format ( interval ) path = "{}/{}" . format ( self . cachepath , intervaldir ) out = dict ( ) for elem in os . listdir ( path ) : out [ elem ] = open ( '{}/{}' . format ( path , elem ) ) . read ( ) . strip ( ) return out
Get cache stats
56,703
def update_dns_ha_resource_params ( resources , resource_params , relation_id = None , crm_ocf = 'ocf:maas:dns' ) : _relation_data = { 'resources' : { } , 'resource_params' : { } } update_hacluster_dns_ha ( charm_name ( ) , _relation_data , crm_ocf ) resources . update ( _relation_data [ 'resources' ] ) resource_params . update ( _relation_data [ 'resource_params' ] ) relation_set ( relation_id = relation_id , groups = _relation_data [ 'groups' ] )
Configure DNS - HA resources based on provided configuration and update resource dictionaries for the HA relation .
56,704
def expect_ha ( ) : ha_related_units = [ ] try : ha_related_units = list ( expected_related_units ( reltype = 'ha' ) ) except ( NotImplementedError , KeyError ) : pass return len ( ha_related_units ) > 0 or config ( 'vip' ) or config ( 'dns-ha' )
Determine if the unit expects to be in HA
56,705
def generate_ha_relation_data ( service , extra_settings = None ) : _haproxy_res = 'res_{}_haproxy' . format ( service ) _relation_data = { 'resources' : { _haproxy_res : 'lsb:haproxy' , } , 'resource_params' : { _haproxy_res : 'op monitor interval="5s"' } , 'init_services' : { _haproxy_res : 'haproxy' } , 'clones' : { 'cl_{}_haproxy' . format ( service ) : _haproxy_res } , } if extra_settings : for k , v in extra_settings . items ( ) : if _relation_data . get ( k ) : _relation_data [ k ] . update ( v ) else : _relation_data [ k ] = v if config ( 'dns-ha' ) : update_hacluster_dns_ha ( service , _relation_data ) else : update_hacluster_vip ( service , _relation_data ) return { 'json_{}' . format ( k ) : json . dumps ( v , ** JSON_ENCODE_OPTIONS ) for k , v in _relation_data . items ( ) if v }
Generate relation data for ha relation
56,706
def update_hacluster_dns_ha ( service , relation_data , crm_ocf = 'ocf:maas:dns' ) : assert_charm_supports_dns_ha ( ) settings = [ 'os-admin-hostname' , 'os-internal-hostname' , 'os-public-hostname' , 'os-access-hostname' ] hostname_group = [ ] for setting in settings : hostname = config ( setting ) if hostname is None : log ( 'DNS HA: Hostname setting {} is None. Ignoring.' '' . format ( setting ) , DEBUG ) continue m = re . search ( 'os-(.+?)-hostname' , setting ) if m : endpoint_type = m . group ( 1 ) if endpoint_type == 'internal' : endpoint_type = 'int' else : msg = ( 'Unexpected DNS hostname setting: {}. ' 'Cannot determine endpoint_type name' '' . format ( setting ) ) status_set ( 'blocked' , msg ) raise DNSHAException ( msg ) hostname_key = 'res_{}_{}_hostname' . format ( service , endpoint_type ) if hostname_key in hostname_group : log ( 'DNS HA: Resource {}: {} already exists in ' 'hostname group - skipping' . format ( hostname_key , hostname ) , DEBUG ) continue hostname_group . append ( hostname_key ) relation_data [ 'resources' ] [ hostname_key ] = crm_ocf relation_data [ 'resource_params' ] [ hostname_key ] = ( 'params fqdn="{}" ip_address="{}"' . format ( hostname , resolve_address ( endpoint_type = endpoint_type , override = False ) ) ) if len ( hostname_group ) >= 1 : log ( 'DNS HA: Hostname group is set with {} as members. ' 'Informing the ha relation' . format ( ' ' . join ( hostname_group ) ) , DEBUG ) relation_data [ 'groups' ] = { DNSHA_GROUP_NAME . format ( service = service ) : ' ' . join ( hostname_group ) } else : msg = 'DNS HA: Hostname group has no members.' status_set ( 'blocked' , msg ) raise DNSHAException ( msg )
Configure DNS - HA resources based on provided configuration
56,707
def get_vip_settings ( vip ) : iface = get_iface_for_address ( vip ) netmask = get_netmask_for_address ( vip ) fallback = False if iface is None : iface = config ( 'vip_iface' ) fallback = True if netmask is None : netmask = config ( 'vip_cidr' ) fallback = True return iface , netmask , fallback
Calculate which nic is on the correct network for the given vip .
56,708
def update_hacluster_vip ( service , relation_data ) : cluster_config = get_hacluster_config ( ) vip_group = [ ] vips_to_delete = [ ] for vip in cluster_config [ 'vip' ] . split ( ) : if is_ipv6 ( vip ) : res_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else : res_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface , netmask , fallback = get_vip_settings ( vip ) vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"' if iface is not None : vip_key = 'res_{}_{}_vip' . format ( service , iface ) if vip_key in vips_to_delete : vip_key = '{}_{}' . format ( vip_key , vip_params ) vips_to_delete . append ( vip_key ) vip_key = 'res_{}_{}_vip' . format ( service , hashlib . sha1 ( vip . encode ( 'UTF-8' ) ) . hexdigest ( ) [ : 7 ] ) relation_data [ 'resources' ] [ vip_key ] = res_vip if fallback : relation_data [ 'resource_params' ] [ vip_key ] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}" ' 'nic="{iface}" {vip_monitoring}' . format ( ip = vip_params , vip = vip , iface = iface , netmask = netmask , vip_monitoring = vip_monitoring ) ) else : relation_data [ 'resource_params' ] [ vip_key ] = ( 'params {ip}="{vip}" {vip_monitoring}' . format ( ip = vip_params , vip = vip , vip_monitoring = vip_monitoring ) ) vip_group . append ( vip_key ) if vips_to_delete : try : relation_data [ 'delete_resources' ] . extend ( vips_to_delete ) except KeyError : relation_data [ 'delete_resources' ] = vips_to_delete if len ( vip_group ) >= 1 : key = VIP_GROUP_NAME . format ( service = service ) try : relation_data [ 'groups' ] [ key ] = ' ' . join ( vip_group ) except KeyError : relation_data [ 'groups' ] = { key : ' ' . join ( vip_group ) }
Configure VIP resources based on provided configuration
56,709
def _add_services ( self , this_service , other_services ) : if this_service [ 'name' ] != os . path . basename ( os . getcwd ( ) ) : s = this_service [ 'name' ] msg = "The charm's root directory name needs to be {}" . format ( s ) amulet . raise_status ( amulet . FAIL , msg = msg ) if 'units' not in this_service : this_service [ 'units' ] = 1 self . d . add ( this_service [ 'name' ] , units = this_service [ 'units' ] , constraints = this_service . get ( 'constraints' ) , storage = this_service . get ( 'storage' ) ) for svc in other_services : if 'location' in svc : branch_location = svc [ 'location' ] elif self . series : branch_location = 'cs:{}/{}' . format ( self . series , svc [ 'name' ] ) , else : branch_location = None if 'units' not in svc : svc [ 'units' ] = 1 self . d . add ( svc [ 'name' ] , charm = branch_location , units = svc [ 'units' ] , constraints = svc . get ( 'constraints' ) , storage = svc . get ( 'storage' ) )
Add services .
56,710
def _add_relations ( self , relations ) : for k , v in six . iteritems ( relations ) : self . d . relate ( k , v )
Add all of the relations for the services .
56,711
def _configure_services ( self , configs ) : for service , config in six . iteritems ( configs ) : self . d . configure ( service , config )
Configure all of the services .
56,712
def _deploy ( self ) : timeout = int ( os . environ . get ( 'AMULET_SETUP_TIMEOUT' , 900 ) ) try : self . d . setup ( timeout = timeout ) self . d . sentry . wait ( timeout = timeout ) except amulet . helpers . TimeoutError : amulet . raise_status ( amulet . FAIL , msg = "Deployment timed out ({}s)" . format ( timeout ) ) except Exception : raise
Deploy environment and wait for all hooks to finish executing .
56,713
def _init_ca ( self ) : if not exists ( path_join ( self . ca_dir , 'ca.cnf' ) ) : with open ( path_join ( self . ca_dir , 'ca.cnf' ) , 'w' ) as fh : fh . write ( CA_CONF_TEMPLATE % ( self . get_conf_variables ( ) ) ) if not exists ( path_join ( self . ca_dir , 'signing.cnf' ) ) : with open ( path_join ( self . ca_dir , 'signing.cnf' ) , 'w' ) as fh : fh . write ( SIGNING_CONF_TEMPLATE % ( self . get_conf_variables ( ) ) ) if exists ( self . ca_cert ) or exists ( self . ca_key ) : raise RuntimeError ( "Initialized called when CA already exists" ) cmd = [ 'openssl' , 'req' , '-config' , self . ca_conf , '-x509' , '-nodes' , '-newkey' , 'rsa' , '-days' , self . default_ca_expiry , '-keyout' , self . ca_key , '-out' , self . ca_cert , '-outform' , 'PEM' ] output = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) log ( "CA Init:\n %s" % output , level = DEBUG )
Generate the root ca s cert and key .
56,714
def format_endpoint ( schema , addr , port , api_version ) : return '{}://{}:{}/{}/' . format ( schema , addr , port , get_api_suffix ( api_version ) )
Return a formatted keystone endpoint
56,715
def get_keystone_manager ( endpoint , api_version , ** kwargs ) : if api_version == 2 : return KeystoneManager2 ( endpoint , ** kwargs ) if api_version == 3 : return KeystoneManager3 ( endpoint , ** kwargs ) raise ValueError ( 'No manager found for api version {}' . format ( api_version ) )
Return a keystonemanager for the correct API version
56,716
def get_keystone_manager_from_identity_service_context ( ) : context = IdentityServiceContext ( ) ( ) if not context : msg = "Identity service context cannot be generated" log ( msg , level = ERROR ) raise ValueError ( msg ) endpoint = format_endpoint ( context [ 'service_protocol' ] , context [ 'service_host' ] , context [ 'service_port' ] , context [ 'api_version' ] ) if context [ 'api_version' ] in ( 2 , "2.0" ) : api_version = 2 else : api_version = 3 return get_keystone_manager ( endpoint , api_version , username = context [ 'admin_user' ] , password = context [ 'admin_password' ] , tenant_name = context [ 'admin_tenant_name' ] )
Return a keystonmanager generated from a instance of charmhelpers . contrib . openstack . context . IdentityServiceContext
56,717
def resolve_service_id ( self , service_name = None , service_type = None ) : services = [ s . _info for s in self . api . services . list ( ) ] service_name = service_name . lower ( ) for s in services : name = s [ 'name' ] . lower ( ) if service_type and service_name : if ( service_name == name and service_type == s [ 'type' ] ) : return s [ 'id' ] elif service_name and service_name == name : return s [ 'id' ] elif service_type and service_type == s [ 'type' ] : return s [ 'id' ] return None
Find the service_id of a given service
56,718
def deactivate_lvm_volume_group ( block_device ) : vg = list_lvm_volume_group ( block_device ) if vg : cmd = [ 'vgchange' , '-an' , vg ] check_call ( cmd )
Deactivate any volume gruop associated with an LVM physical volume .
56,719
def remove_lvm_physical_volume ( block_device ) : p = Popen ( [ 'pvremove' , '-ff' , block_device ] , stdin = PIPE ) p . communicate ( input = 'y\n' )
Remove LVM PV signatures from a given block device .
56,720
def list_lvm_volume_group ( block_device ) : vg = None pvd = check_output ( [ 'pvdisplay' , block_device ] ) . splitlines ( ) for lvm in pvd : lvm = lvm . decode ( 'UTF-8' ) if lvm . strip ( ) . startswith ( 'VG Name' ) : vg = ' ' . join ( lvm . strip ( ) . split ( ) [ 2 : ] ) return vg
List LVM volume group associated with a given block device .
56,721
def list_logical_volumes ( select_criteria = None , path_mode = False ) : lv_diplay_attr = 'lv_name' if path_mode : lv_diplay_attr = 'vg_name,' + lv_diplay_attr cmd = [ 'lvs' , '--options' , lv_diplay_attr , '--noheadings' ] if select_criteria : cmd . extend ( [ '--select' , select_criteria ] ) lvs = [ ] for lv in check_output ( cmd ) . decode ( 'UTF-8' ) . splitlines ( ) : if not lv : continue if path_mode : lvs . append ( '/' . join ( lv . strip ( ) . split ( ) ) ) else : lvs . append ( lv . strip ( ) ) return lvs
List logical volumes
56,722
def create_logical_volume ( lv_name , volume_group , size = None ) : if size : check_call ( [ 'lvcreate' , '--yes' , '-L' , '{}' . format ( size ) , '-n' , lv_name , volume_group ] ) else : check_call ( [ 'lvcreate' , '--yes' , '-l' , '100%FREE' , '-n' , lv_name , volume_group ] )
Create a new logical volume in an existing volume group
56,723
def render ( source , target , context , owner = 'root' , group = 'root' , perms = 0o444 , templates_dir = None , encoding = 'UTF-8' , template_loader = None , config_template = None ) : try : from jinja2 import FileSystemLoader , Environment , exceptions except ImportError : try : from charmhelpers . fetch import apt_install except ImportError : hookenv . log ( 'Could not import jinja2, and could not import ' 'charmhelpers.fetch to install it' , level = hookenv . ERROR ) raise if sys . version_info . major == 2 : apt_install ( 'python-jinja2' , fatal = True ) else : apt_install ( 'python3-jinja2' , fatal = True ) from jinja2 import FileSystemLoader , Environment , exceptions if template_loader : template_env = Environment ( loader = template_loader ) else : if templates_dir is None : templates_dir = os . path . join ( hookenv . charm_dir ( ) , 'templates' ) template_env = Environment ( loader = FileSystemLoader ( templates_dir ) ) if config_template is not None : template = template_env . from_string ( config_template ) else : try : source = source template = template_env . get_template ( source ) except exceptions . TemplateNotFound as e : hookenv . log ( 'Could not load template %s from %s.' % ( source , templates_dir ) , level = hookenv . ERROR ) raise e content = template . render ( context ) if target is not None : target_dir = os . path . dirname ( target ) if not os . path . exists ( target_dir ) : host . mkdir ( os . path . dirname ( target ) , owner , group , perms = 0o755 ) host . write_file ( target , content . encode ( encoding ) , owner , group , perms ) return content
Render a template .
56,724
def cached ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : global cache key = json . dumps ( ( func , args , kwargs ) , sort_keys = True , default = str ) try : return cache [ key ] except KeyError : pass res = func ( * args , ** kwargs ) cache [ key ] = res return res wrapper . _wrapped = func return wrapper
Cache return values for multiple executions of func + args
56,725
def flush ( key ) : flush_list = [ ] for item in cache : if key in item : flush_list . append ( item ) for item in flush_list : del cache [ item ]
Flushes any entries from function cache where the key is found in the function + args
56,726
def log ( message , level = None ) : command = [ 'juju-log' ] if level : command += [ '-l' , level ] if not isinstance ( message , six . string_types ) : message = repr ( message ) command += [ message [ : SH_MAX_ARG ] ] try : subprocess . call ( command ) except OSError as e : if e . errno == errno . ENOENT : if level : message = "{}: {}" . format ( level , message ) message = "juju-log: {}" . format ( message ) print ( message , file = sys . stderr ) else : raise
Write a message to the juju log
56,727
def execution_environment ( ) : context = { } context [ 'conf' ] = config ( ) if relation_id ( ) : context [ 'reltype' ] = relation_type ( ) context [ 'relid' ] = relation_id ( ) context [ 'rel' ] = relation_get ( ) context [ 'unit' ] = local_unit ( ) context [ 'rels' ] = relations ( ) context [ 'env' ] = os . environ return context
A convenient bundling of the current execution context
56,728
def relation_id ( relation_name = None , service_or_unit = None ) : if not relation_name and not service_or_unit : return os . environ . get ( 'JUJU_RELATION_ID' , None ) elif relation_name and service_or_unit : service_name = service_or_unit . split ( '/' ) [ 0 ] for relid in relation_ids ( relation_name ) : remote_service = remote_service_name ( relid ) if remote_service == service_name : return relid else : raise ValueError ( 'Must specify neither or both of relation_name and service_or_unit' )
The relation ID for the current or a specified relation
56,729
def principal_unit ( ) : principal_unit = os . environ . get ( 'JUJU_PRINCIPAL_UNIT' , None ) if principal_unit == '' : return os . environ [ 'JUJU_UNIT_NAME' ] elif principal_unit is not None : return principal_unit for reltype in relation_types ( ) : for rid in relation_ids ( reltype ) : for unit in related_units ( rid ) : md = _metadata_unit ( unit ) if not md : continue subordinate = md . pop ( 'subordinate' , None ) if not subordinate : return unit return None
Returns the principal unit of this unit otherwise None
56,730
def relation_get ( attribute = None , unit = None , rid = None ) : _args = [ 'relation-get' , '--format=json' ] if rid : _args . append ( '-r' ) _args . append ( rid ) _args . append ( attribute or '-' ) if unit : _args . append ( unit ) try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None except CalledProcessError as e : if e . returncode == 2 : return None raise
Get relation information
56,731
def relation_set ( relation_id = None , relation_settings = None , ** kwargs ) : relation_settings = relation_settings if relation_settings else { } relation_cmd_line = [ 'relation-set' ] accepts_file = "--file" in subprocess . check_output ( relation_cmd_line + [ "--help" ] , universal_newlines = True ) if relation_id is not None : relation_cmd_line . extend ( ( '-r' , relation_id ) ) settings = relation_settings . copy ( ) settings . update ( kwargs ) for key , value in settings . items ( ) : if value is not None : settings [ key ] = "{}" . format ( value ) if accepts_file : with tempfile . NamedTemporaryFile ( delete = False ) as settings_file : settings_file . write ( yaml . safe_dump ( settings ) . encode ( "utf-8" ) ) subprocess . check_call ( relation_cmd_line + [ "--file" , settings_file . name ] ) os . remove ( settings_file . name ) else : for key , value in settings . items ( ) : if value is None : relation_cmd_line . append ( '{}=' . format ( key ) ) else : relation_cmd_line . append ( '{}={}' . format ( key , value ) ) subprocess . check_call ( relation_cmd_line ) flush ( local_unit ( ) )
Set relation information for the current unit
56,732
def relation_clear ( r_id = None ) : settings = relation_get ( rid = r_id , unit = local_unit ( ) ) for setting in settings : if setting not in [ 'public-address' , 'private-address' ] : settings [ setting ] = None relation_set ( relation_id = r_id , ** settings )
Clears any relation data already set on relation r_id
56,733
def relation_ids ( reltype = None ) : reltype = reltype or relation_type ( ) relid_cmd_line = [ 'relation-ids' , '--format=json' ] if reltype is not None : relid_cmd_line . append ( reltype ) return json . loads ( subprocess . check_output ( relid_cmd_line ) . decode ( 'UTF-8' ) ) or [ ] return [ ]
A list of relation_ids
56,734
def related_units ( relid = None ) : relid = relid or relation_id ( ) units_cmd_line = [ 'relation-list' , '--format=json' ] if relid is not None : units_cmd_line . extend ( ( '-r' , relid ) ) return json . loads ( subprocess . check_output ( units_cmd_line ) . decode ( 'UTF-8' ) ) or [ ]
A list of related units
56,735
def expected_peer_units ( ) : if not has_juju_version ( "2.4.0" ) : raise NotImplementedError ( "goal-state" ) _goal_state = goal_state ( ) return ( key for key in _goal_state [ 'units' ] if '/' in key and key != local_unit ( ) )
Get a generator for units we expect to join peer relation based on goal - state .
56,736
def expected_related_units ( reltype = None ) : if not has_juju_version ( "2.4.4" ) : raise NotImplementedError ( "goal-state relation unit count" ) reltype = reltype or relation_type ( ) _goal_state = goal_state ( ) return ( key for key in _goal_state [ 'relations' ] [ reltype ] if '/' in key )
Get a generator for units we expect to join relation based on goal - state .
56,737
def relation_for_unit ( unit = None , rid = None ) : unit = unit or remote_unit ( ) relation = relation_get ( unit = unit , rid = rid ) for key in relation : if key . endswith ( '-list' ) : relation [ key ] = relation [ key ] . split ( ) relation [ '__unit__' ] = unit return relation
Get the json represenation of a unit s relation
56,738
def relations_for_id ( relid = None ) : relation_data = [ ] relid = relid or relation_ids ( ) for unit in related_units ( relid ) : unit_data = relation_for_unit ( unit , relid ) unit_data [ '__relid__' ] = relid relation_data . append ( unit_data ) return relation_data
Get relations of a specific relation ID
56,739
def relations_of_type ( reltype = None ) : relation_data = [ ] reltype = reltype or relation_type ( ) for relid in relation_ids ( reltype ) : for relation in relations_for_id ( relid ) : relation [ '__relid__' ] = relid relation_data . append ( relation ) return relation_data
Get relations of a specific type
56,740
def metadata ( ) : with open ( os . path . join ( charm_dir ( ) , 'metadata.yaml' ) ) as md : return yaml . safe_load ( md )
Get the current charm metadata . yaml contents as a python object
56,741
def relation_types ( ) : rel_types = [ ] md = metadata ( ) for key in ( 'provides' , 'requires' , 'peers' ) : section = md . get ( key ) if section : rel_types . extend ( section . keys ( ) ) return rel_types
Get a list of relation types supported by this charm
56,742
def peer_relation_id ( ) : md = metadata ( ) section = md . get ( 'peers' ) if section : for key in section : relids = relation_ids ( key ) if relids : return relids [ 0 ] return None
Get the peers relation id if a peers relation has been joined else None .
56,743
def interface_to_relations ( interface_name ) : results = [ ] for role in ( 'provides' , 'requires' , 'peers' ) : results . extend ( role_and_interface_to_relations ( role , interface_name ) ) return results
Given an interface return a list of relation names for the current charm that use that interface .
56,744
def relations ( ) : rels = { } for reltype in relation_types ( ) : relids = { } for relid in relation_ids ( reltype ) : units = { local_unit ( ) : relation_get ( unit = local_unit ( ) , rid = relid ) } for unit in related_units ( relid ) : reldata = relation_get ( unit = unit , rid = relid ) units [ unit ] = reldata relids [ relid ] = units rels [ reltype ] = relids return rels
Get a nested dictionary of relation data for all related units
56,745
def _port_op ( op_name , port , protocol = "TCP" ) : _args = [ op_name ] icmp = protocol . upper ( ) == "ICMP" if icmp : _args . append ( protocol ) else : _args . append ( '{}/{}' . format ( port , protocol ) ) try : subprocess . check_call ( _args ) except subprocess . CalledProcessError : if not icmp : raise
Open or close a service network port
56,746
def open_ports ( start , end , protocol = "TCP" ) : _args = [ 'open-port' ] _args . append ( '{}-{}/{}' . format ( start , end , protocol ) ) subprocess . check_call ( _args )
Opens a range of service network ports
56,747
def unit_get ( attribute ) : _args = [ 'unit-get' , '--format=json' , attribute ] try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None
Get the unit ID for the remote unit
56,748
def storage_get ( attribute = None , storage_id = None ) : _args = [ 'storage-get' , '--format=json' ] if storage_id : _args . extend ( ( '-s' , storage_id ) ) if attribute : _args . append ( attribute ) try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None
Get storage attributes
56,749
def storage_list ( storage_name = None ) : _args = [ 'storage-list' , '--format=json' ] if storage_name : _args . append ( storage_name ) try : return json . loads ( subprocess . check_output ( _args ) . decode ( 'UTF-8' ) ) except ValueError : return None except OSError as e : import errno if e . errno == errno . ENOENT : return [ ] raise
List the storage IDs for the unit
56,750
def charm_dir ( ) : d = os . environ . get ( 'JUJU_CHARM_DIR' ) if d is not None : return d return os . environ . get ( 'CHARM_DIR' )
Return the root directory of the current charm
56,751
def action_set ( values ) : cmd = [ 'action-set' ] for k , v in list ( values . items ( ) ) : cmd . append ( '{}={}' . format ( k , v ) ) subprocess . check_call ( cmd )
Sets the values to be returned after the action finishes
56,752
def status_set ( workload_state , message ) : valid_states = [ 'maintenance' , 'blocked' , 'waiting' , 'active' ] if workload_state not in valid_states : raise ValueError ( '{!r} is not a valid workload state' . format ( workload_state ) ) cmd = [ 'status-set' , workload_state , message ] try : ret = subprocess . call ( cmd ) if ret == 0 : return except OSError as e : if e . errno != errno . ENOENT : raise log_message = 'status-set failed: {} {}' . format ( workload_state , message ) log ( log_message , level = 'INFO' )
Set the workload state with a message
56,753
def status_get ( ) : cmd = [ 'status-get' , "--format=json" , "--include-data" ] try : raw_status = subprocess . check_output ( cmd ) except OSError as e : if e . errno == errno . ENOENT : return ( 'unknown' , "" ) else : raise else : status = json . loads ( raw_status . decode ( "UTF-8" ) ) return ( status [ "status" ] , status [ "message" ] )
Retrieve the previously set juju workload state and message
56,754
def application_version_set ( version ) : cmd = [ 'application-version-set' ] cmd . append ( version ) try : subprocess . check_call ( cmd ) except OSError : log ( "Application Version: {}" . format ( version ) )
Charm authors may trigger this command from any hook to output what version of the application is running . This could be a package version for instance postgres version 9 . 5 . It could also be a build number or version control revision identifier for instance git sha 6fb7ba68 .
56,755
def payload_register ( ptype , klass , pid ) : cmd = [ 'payload-register' ] for x in [ ptype , klass , pid ] : cmd . append ( x ) subprocess . check_call ( cmd )
is used while a hook is running to let Juju know that a payload has been started .
56,756
def resource_get ( name ) : if not name : return False cmd = [ 'resource-get' , name ] try : return subprocess . check_output ( cmd ) . decode ( 'UTF-8' ) except subprocess . CalledProcessError : return False
used to fetch the resource path of the given name .
56,757
def atstart ( callback , * args , ** kwargs ) : global _atstart _atstart . append ( ( callback , args , kwargs ) )
Schedule a callback to run before the main hook .
56,758
def _run_atstart ( ) : global _atstart for callback , args , kwargs in _atstart : callback ( * args , ** kwargs ) del _atstart [ : ]
Hook frameworks must invoke this before running the main hook body .
56,759
def _run_atexit ( ) : global _atexit for callback , args , kwargs in reversed ( _atexit ) : callback ( * args , ** kwargs ) del _atexit [ : ]
Hook frameworks must invoke this after the main hook body has successfully completed . Do not invoke it if the hook fails .
56,760
def network_get ( endpoint , relation_id = None ) : if not has_juju_version ( '2.2' ) : raise NotImplementedError ( juju_version ( ) ) if relation_id and not has_juju_version ( '2.3' ) : raise NotImplementedError cmd = [ 'network-get' , endpoint , '--format' , 'yaml' ] if relation_id : cmd . append ( '-r' ) cmd . append ( relation_id ) response = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) . strip ( ) return yaml . safe_load ( response )
Retrieve the network details for a relation endpoint
56,761
def add_metric ( * args , ** kwargs ) : _args = [ 'add-metric' ] _kvpairs = [ ] _kvpairs . extend ( args ) _kvpairs . extend ( [ '{}={}' . format ( k , v ) for k , v in kwargs . items ( ) ] ) _args . extend ( sorted ( _kvpairs ) ) try : subprocess . check_call ( _args ) return except EnvironmentError as e : if e . errno != errno . ENOENT : raise log_message = 'add-metric failed: {}' . format ( ' ' . join ( _kvpairs ) ) log ( log_message , level = 'INFO' )
Add metric values . Values may be expressed with keyword arguments . For metric names containing dashes these may be expressed as one or more key = value positional arguments . May only be called from the collect - metrics hook .
56,762
def iter_units_for_relation_name ( relation_name ) : RelatedUnit = namedtuple ( 'RelatedUnit' , 'rid, unit' ) for rid in relation_ids ( relation_name ) : for unit in related_units ( rid ) : yield RelatedUnit ( rid , unit )
Iterate through all units in a relation
56,763
def ingress_address ( rid = None , unit = None ) : settings = relation_get ( rid = rid , unit = unit ) return ( settings . get ( 'ingress-address' ) or settings . get ( 'private-address' ) )
Retrieve the ingress - address from a relation when available . Otherwise return the private - address .
56,764
def egress_subnets ( rid = None , unit = None ) : def _to_range ( addr ) : if re . search ( r'^(?:\d{1,3}\.){3}\d{1,3}$' , addr ) is not None : addr += '/32' elif ':' in addr and '/' not in addr : addr += '/128' return addr settings = relation_get ( rid = rid , unit = unit ) if 'egress-subnets' in settings : return [ n . strip ( ) for n in settings [ 'egress-subnets' ] . split ( ',' ) if n . strip ( ) ] if 'ingress-address' in settings : return [ _to_range ( settings [ 'ingress-address' ] ) ] if 'private-address' in settings : return [ _to_range ( settings [ 'private-address' ] ) ] return [ ]
Retrieve the egress - subnets from a relation .
56,765
def unit_doomed ( unit = None ) : if not has_juju_version ( "2.4.1" ) : raise NotImplementedError ( "is_doomed" ) if unit is None : unit = local_unit ( ) gs = goal_state ( ) units = gs . get ( 'units' , { } ) if unit not in units : return True return units [ unit ] [ 'status' ] in ( 'dying' , 'dead' )
Determines if the unit is being removed from the model
56,766
def env_proxy_settings ( selected_settings = None ) : SUPPORTED_SETTINGS = { 'http' : 'HTTP_PROXY' , 'https' : 'HTTPS_PROXY' , 'no_proxy' : 'NO_PROXY' , 'ftp' : 'FTP_PROXY' } if selected_settings is None : selected_settings = SUPPORTED_SETTINGS selected_vars = [ v for k , v in SUPPORTED_SETTINGS . items ( ) if k in selected_settings ] proxy_settings = { } for var in selected_vars : var_val = os . getenv ( var ) if var_val : proxy_settings [ var ] = var_val proxy_settings [ var . lower ( ) ] = var_val charm_var_val = os . getenv ( 'JUJU_CHARM_{}' . format ( var ) ) if charm_var_val : proxy_settings [ var ] = charm_var_val proxy_settings [ var . lower ( ) ] = charm_var_val if 'no_proxy' in proxy_settings : if _contains_range ( proxy_settings [ 'no_proxy' ] ) : log ( RANGE_WARNING , level = WARNING ) return proxy_settings if proxy_settings else None
Get proxy settings from process environment variables .
56,767
def load_previous ( self , path = None ) : self . path = path or self . path with open ( self . path ) as f : try : self . _prev_dict = json . load ( f ) except ValueError as e : log ( 'Unable to parse previous config data - {}' . format ( str ( e ) ) , level = ERROR ) for k , v in copy . deepcopy ( self . _prev_dict ) . items ( ) : if k not in self : self [ k ] = v
Load previous copy of config from disk .
56,768
def changed ( self , key ) : if self . _prev_dict is None : return True return self . previous ( key ) != self . get ( key )
Return True if the current value for this key is different from the previous value .
56,769
def save ( self ) : with open ( self . path , 'w' ) as f : os . fchmod ( f . fileno ( ) , 0o600 ) json . dump ( self , f )
Save this config to disk .
56,770
def hook ( self , * hook_names ) : def wrapper ( decorated ) : for hook_name in hook_names : self . register ( hook_name , decorated ) else : self . register ( decorated . __name__ , decorated ) if '_' in decorated . __name__ : self . register ( decorated . __name__ . replace ( '_' , '-' ) , decorated ) return decorated return wrapper
Decorator registering them as hooks
56,771
def shutdown ( self ) : sys . stdout = self . old_stdout sys . stdin = self . old_stdin self . skt . close ( ) self . set_continue ( )
Revert stdin and stdout close the socket .
56,772
def start ( ) : action_set ( 'meta.start' , time . strftime ( '%Y-%m-%dT%H:%M:%SZ' ) ) COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' if os . path . exists ( COLLECT_PROFILE_DATA ) : subprocess . check_output ( [ COLLECT_PROFILE_DATA ] )
If the collectd charm is also installed tell it to send a snapshot of the current profile data .
56,773
def get_os_codename_install_source ( src ) : ubuntu_rel = lsb_release ( ) [ 'DISTRIB_CODENAME' ] rel = '' if src is None : return rel if src in [ 'distro' , 'distro-proposed' , 'proposed' ] : try : rel = UBUNTU_OPENSTACK_RELEASE [ ubuntu_rel ] except KeyError : e = 'Could not derive openstack release for ' 'this Ubuntu release: %s' % ubuntu_rel error_out ( e ) return rel if src . startswith ( 'cloud:' ) : ca_rel = src . split ( ':' ) [ 1 ] ca_rel = ca_rel . split ( '-' ) [ 1 ] . split ( '/' ) [ 0 ] return ca_rel if ( src . startswith ( 'deb' ) or src . startswith ( 'ppa' ) or src . startswith ( 'snap' ) ) : for v in OPENSTACK_CODENAMES . values ( ) : if v in src : return v
Derive OpenStack release codename from a given installation source .
56,774
def get_os_version_codename ( codename , version_map = OPENSTACK_CODENAMES ) : for k , v in six . iteritems ( version_map ) : if v == codename : return k e = 'Could not derive OpenStack version for ' 'codename: %s' % codename error_out ( e )
Determine OpenStack version number from codename .
56,775
def get_os_version_codename_swift ( codename ) : for k , v in six . iteritems ( SWIFT_CODENAMES ) : if k == codename : return v [ - 1 ] e = 'Could not derive swift version for ' 'codename: %s' % codename error_out ( e )
Determine OpenStack version number of swift from codename .
56,776
def get_swift_codename ( version ) : codenames = [ k for k , v in six . iteritems ( SWIFT_CODENAMES ) if version in v ] if len ( codenames ) > 1 : for codename in reversed ( codenames ) : releases = UBUNTU_OPENSTACK_RELEASE release = [ k for k , v in six . iteritems ( releases ) if codename in v ] ret = subprocess . check_output ( [ 'apt-cache' , 'policy' , 'swift' ] ) if six . PY3 : ret = ret . decode ( 'UTF-8' ) if codename in ret or release [ 0 ] in ret : return codename elif len ( codenames ) == 1 : return codenames [ 0 ] match = re . match ( r'^(\d+)\.(\d+)' , version ) if match : major_minor_version = match . group ( 0 ) for codename , versions in six . iteritems ( SWIFT_CODENAMES ) : for release_version in versions : if release_version . startswith ( major_minor_version ) : return codename return None
Determine OpenStack codename that corresponds to swift version .
56,777
def get_os_codename_package ( package , fatal = True ) : if snap_install_requested ( ) : cmd = [ 'snap' , 'list' , package ] try : out = subprocess . check_output ( cmd ) if six . PY3 : out = out . decode ( 'UTF-8' ) except subprocess . CalledProcessError : return None lines = out . split ( '\n' ) for line in lines : if package in line : return line . split ( ) [ 1 ] import apt_pkg as apt cache = apt_cache ( ) try : pkg = cache [ package ] except Exception : if not fatal : return None e = 'Could not determine version of package with no installation ' 'candidate: %s' % package error_out ( e ) if not pkg . current_ver : if not fatal : return None e = 'Could not determine version of uninstalled package: %s' % package error_out ( e ) vers = apt . upstream_version ( pkg . current_ver . ver_str ) if 'swift' in pkg . name : match = re . match ( r'^(\d+)\.(\d+)\.(\d+)' , vers ) else : match = re . match ( r'^(\d+)\.(\d+)' , vers ) if match : vers = match . group ( 0 ) major_vers = vers . split ( '.' ) [ 0 ] if ( package in PACKAGE_CODENAMES and major_vers in PACKAGE_CODENAMES [ package ] ) : return PACKAGE_CODENAMES [ package ] [ major_vers ] else : try : if 'swift' in pkg . name : return get_swift_codename ( vers ) else : return OPENSTACK_CODENAMES [ vers ] except KeyError : if not fatal : return None e = 'Could not determine OpenStack codename for version %s' % vers error_out ( e )
Derive OpenStack release codename from an installed package .
56,778
def get_os_version_package ( pkg , fatal = True ) : codename = get_os_codename_package ( pkg , fatal = fatal ) if not codename : return None if 'swift' in pkg : vers_map = SWIFT_CODENAMES for cname , version in six . iteritems ( vers_map ) : if cname == codename : return version [ - 1 ] else : vers_map = OPENSTACK_CODENAMES for version , cname in six . iteritems ( vers_map ) : if cname == codename : return version
Derive OpenStack version number from an installed package .
56,779
def os_release ( package , base = 'essex' , reset_cache = False ) : global _os_rel if reset_cache : reset_os_release ( ) if _os_rel : return _os_rel _os_rel = ( get_os_codename_package ( package , fatal = False ) or get_os_codename_install_source ( config ( 'openstack-origin' ) ) or base ) return _os_rel
Returns OpenStack release codename from a cached global .
56,780
def import_key ( keyid ) : try : return fetch_import_key ( keyid ) except GPGKeyError as e : error_out ( "Could not import key: {}" . format ( str ( e ) ) )
Import a key either ASCII armored or a GPG key id .
56,781
def get_source_and_pgp_key ( source_and_key ) : try : source , key = source_and_key . split ( '|' , 2 ) return source , key or None except ValueError : return source_and_key , None
Look for a pgp key ID or ascii - armor key in the given input .
56,782
def configure_installation_source ( source_plus_key ) : if source_plus_key . startswith ( 'snap' ) : return source , key = get_source_and_pgp_key ( source_plus_key ) try : fetch_add_source ( source , key , fail_invalid = True ) except SourceConfigError as se : error_out ( str ( se ) )
Configure an installation source .
56,783
def config_value_changed ( option ) : hook_data = unitdata . HookData ( ) with hook_data ( ) : db = unitdata . kv ( ) current = config ( option ) saved = db . get ( option ) db . set ( option , current ) if saved is None : return False return current != saved
Determine if config value changed since last call to this function .
56,784
def save_script_rc ( script_path = "scripts/scriptrc" , ** env_vars ) : juju_rc_path = "%s/%s" % ( charm_dir ( ) , script_path ) if not os . path . exists ( os . path . dirname ( juju_rc_path ) ) : os . mkdir ( os . path . dirname ( juju_rc_path ) ) with open ( juju_rc_path , 'wt' ) as rc_script : rc_script . write ( "#!/bin/bash\n" ) [ rc_script . write ( 'export %s=%s\n' % ( u , p ) ) for u , p in six . iteritems ( env_vars ) if u != "script_path" ]
Write an rc file in the charm - delivered directory containing exported environment variables provided by env_vars . Any charm scripts run outside the juju hook environment can source this scriptrc to obtain updated config information necessary to perform health checks or service changes .
56,785
def openstack_upgrade_available ( package ) : import apt_pkg as apt src = config ( 'openstack-origin' ) cur_vers = get_os_version_package ( package ) if not cur_vers : return False if "swift" in package : codename = get_os_codename_install_source ( src ) avail_vers = get_os_version_codename_swift ( codename ) else : avail_vers = get_os_version_install_source ( src ) apt . init ( ) return apt . version_compare ( avail_vers , cur_vers ) >= 1
Determines if an OpenStack upgrade is available from installation source based on version of installed package .
56,786
def ensure_block_device ( block_device ) : _none = [ 'None' , 'none' , None ] if ( block_device in _none ) : error_out ( 'prepare_storage(): Missing required input: block_device=%s.' % block_device ) if block_device . startswith ( '/dev/' ) : bdev = block_device elif block_device . startswith ( '/' ) : _bd = block_device . split ( '|' ) if len ( _bd ) == 2 : bdev , size = _bd else : bdev = block_device size = DEFAULT_LOOPBACK_SIZE bdev = ensure_loopback_device ( bdev , size ) else : bdev = '/dev/%s' % block_device if not is_block_device ( bdev ) : error_out ( 'Failed to locate valid block device at %s' % bdev ) return bdev
Confirm block_device create as loopback if necessary .
56,787
def os_requires_version ( ostack_release , pkg ) : def wrap ( f ) : @ wraps ( f ) def wrapped_f ( * args ) : if os_release ( pkg ) < ostack_release : raise Exception ( "This hook is not supported on releases" " before %s" % ostack_release ) f ( * args ) return wrapped_f return wrap
Decorator for hook to specify minimum supported release
56,788
def os_workload_status ( configs , required_interfaces , charm_func = None ) : def wrap ( f ) : @ wraps ( f ) def wrapped_f ( * args , ** kwargs ) : f ( * args , ** kwargs ) set_os_workload_status ( configs , required_interfaces , charm_func ) return wrapped_f return wrap
Decorator to set workload status based on complete contexts
56,789
def set_os_workload_status ( configs , required_interfaces , charm_func = None , services = None , ports = None ) : state , message = _determine_os_workload_status ( configs , required_interfaces , charm_func , services , ports ) status_set ( state , message )
Set the state of the workload status for the charm .
56,790
def _determine_os_workload_status ( configs , required_interfaces , charm_func = None , services = None , ports = None ) : state , message = _ows_check_if_paused ( services , ports ) if state is None : state , message = _ows_check_generic_interfaces ( configs , required_interfaces ) if state != 'maintenance' and charm_func : state , message = _ows_check_charm_func ( state , message , lambda : charm_func ( configs ) ) if state is None : state , message = _ows_check_services_running ( services , ports ) if state is None : state = 'active' message = "Unit is ready" juju_log ( message , 'INFO' ) return state , message
Determine the state of the workload status for the charm .
56,791
def _ows_check_generic_interfaces ( configs , required_interfaces ) : incomplete_rel_data = incomplete_relation_data ( configs , required_interfaces ) state = None message = None missing_relations = set ( ) incomplete_relations = set ( ) for generic_interface , relations_states in incomplete_rel_data . items ( ) : related_interface = None missing_data = { } for interface , relation_state in relations_states . items ( ) : if relation_state . get ( 'related' ) : related_interface = interface missing_data = relation_state . get ( 'missing_data' ) break if not related_interface : juju_log ( "{} relation is missing and must be related for " "functionality. " . format ( generic_interface ) , 'WARN' ) state = 'blocked' missing_relations . add ( generic_interface ) else : if not missing_data : _hook_name = hook_name ( ) if ( ( 'departed' in _hook_name or 'broken' in _hook_name ) and related_interface in _hook_name ) : state = 'blocked' missing_relations . add ( generic_interface ) juju_log ( "{} relation's interface, {}, " "relationship is departed or broken " "and is required for functionality." "" . format ( generic_interface , related_interface ) , "WARN" ) else : juju_log ( "{} relations's interface, {}, is related but has" " no units in the relation." "" . format ( generic_interface , related_interface ) , "INFO" ) else : juju_log ( "{} relation's interface, {}, is related awaiting " "the following data from the relationship: {}. " "" . format ( generic_interface , related_interface , ", " . join ( missing_data ) ) , "INFO" ) if state != 'blocked' : state = 'waiting' if generic_interface not in missing_relations : incomplete_relations . add ( generic_interface ) if missing_relations : message = "Missing relations: {}" . format ( ", " . join ( missing_relations ) ) if incomplete_relations : message += "; incomplete relations: {}" "" . format ( ", " . join ( incomplete_relations ) ) state = 'blocked' elif incomplete_relations : message = "Incomplete relations: {}" "" . format ( ", " . join ( incomplete_relations ) ) state = 'waiting' return state , message
Check the complete contexts to determine the workload status .
56,792
def _ows_check_services_running ( services , ports ) : messages = [ ] state = None if services is not None : services = _extract_services_list_helper ( services ) services_running , running = _check_running_services ( services ) if not all ( running ) : messages . append ( "Services not running that should be: {}" . format ( ", " . join ( _filter_tuples ( services_running , False ) ) ) ) state = 'blocked' map_not_open , ports_open = ( _check_listening_on_services_ports ( services ) ) if not all ( ports_open ) : message_parts = { service : ", " . join ( [ str ( v ) for v in open_ports ] ) for service , open_ports in map_not_open . items ( ) } message = ", " . join ( [ "{}: [{}]" . format ( s , sp ) for s , sp in message_parts . items ( ) ] ) messages . append ( "Services with ports not open that should be: {}" . format ( message ) ) state = 'blocked' if ports is not None : ports_open , ports_open_bools = _check_listening_on_ports_list ( ports ) if not all ( ports_open_bools ) : messages . append ( "Ports which should be open, but are not: {}" . format ( ", " . join ( [ str ( p ) for p , v in ports_open if not v ] ) ) ) state = 'blocked' if state is not None : message = "; " . join ( messages ) return state , message return None , None
Check that the services that should be running are actually running and that any ports specified are being listened to .
56,793
def _check_listening_on_ports_list ( ports ) : ports_open = [ port_has_listener ( '0.0.0.0' , p ) for p in ports ] return zip ( ports , ports_open ) , ports_open
Check that the ports list given are being listened to
56,794
def workload_state_compare ( current_workload_state , workload_state ) : hierarchy = { 'unknown' : - 1 , 'active' : 0 , 'maintenance' : 1 , 'waiting' : 2 , 'blocked' : 3 , } if hierarchy . get ( workload_state ) is None : workload_state = 'unknown' if hierarchy . get ( current_workload_state ) is None : current_workload_state = 'unknown' if hierarchy . get ( current_workload_state ) > hierarchy . get ( workload_state ) : return current_workload_state else : return workload_state
Return highest priority of two states
56,795
def incomplete_relation_data ( configs , required_interfaces ) : complete_ctxts = configs . complete_contexts ( ) incomplete_relations = [ svc_type for svc_type , interfaces in required_interfaces . items ( ) if not set ( interfaces ) . intersection ( complete_ctxts ) ] return { i : configs . get_incomplete_context_data ( required_interfaces [ i ] ) for i in incomplete_relations }
Check complete contexts against required_interfaces Return dictionary of incomplete relation data .
56,796
def do_action_openstack_upgrade ( package , upgrade_callback , configs ) : ret = False if openstack_upgrade_available ( package ) : if config ( 'action-managed-upgrade' ) : juju_log ( 'Upgrading OpenStack release' ) try : upgrade_callback ( configs = configs ) action_set ( { 'outcome' : 'success, upgrade completed.' } ) ret = True except Exception : action_set ( { 'outcome' : 'upgrade failed, see traceback.' } ) action_set ( { 'traceback' : traceback . format_exc ( ) } ) action_fail ( 'do_openstack_upgrade resulted in an ' 'unexpected error' ) else : action_set ( { 'outcome' : 'action-managed-upgrade config is ' 'False, skipped upgrade.' } ) else : action_set ( { 'outcome' : 'no upgrade available.' } ) return ret
Perform action - managed OpenStack upgrade .
56,797
def manage_payload_services ( action , services = None , charm_func = None ) : actions = { 'pause' : service_pause , 'resume' : service_resume , 'start' : service_start , 'stop' : service_stop } action = action . lower ( ) if action not in actions . keys ( ) : raise RuntimeError ( "action: {} must be one of: {}" . format ( action , ', ' . join ( actions . keys ( ) ) ) ) services = _extract_services_list_helper ( services ) messages = [ ] success = True if services : for service in services . keys ( ) : rc = actions [ action ] ( service ) if not rc : success = False messages . append ( "{} didn't {} cleanly." . format ( service , action ) ) if charm_func : try : message = charm_func ( ) if message : messages . append ( message ) except Exception as e : success = False messages . append ( str ( e ) ) return success , messages
Run an action against all services .
56,798
def pausable_restart_on_change ( restart_map , stopstart = False , restart_functions = None ) : def wrap ( f ) : __restart_map_cache = { 'cache' : None } @ functools . wraps ( f ) def wrapped_f ( * args , ** kwargs ) : if is_unit_paused_set ( ) : return f ( * args , ** kwargs ) if __restart_map_cache [ 'cache' ] is None : __restart_map_cache [ 'cache' ] = restart_map ( ) if callable ( restart_map ) else restart_map return restart_on_change_helper ( ( lambda : f ( * args , ** kwargs ) ) , __restart_map_cache [ 'cache' ] , stopstart , restart_functions ) return wrapped_f return wrap
A restart_on_change decorator that checks to see if the unit is paused . If it is paused then the decorated function doesn t fire .
56,799
def ordered ( orderme ) : if not isinstance ( orderme , dict ) : raise ValueError ( 'argument must be a dict type' ) result = OrderedDict ( ) for k , v in sorted ( six . iteritems ( orderme ) , key = lambda x : x [ 0 ] ) : if isinstance ( v , dict ) : result [ k ] = ordered ( v ) else : result [ k ] = v return result
Converts the provided dictionary into a collections . OrderedDict .