idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
50,000
def fold_columns_to_rows ( df , levels_from = 2 ) : df = df . copy ( ) df . reset_index ( inplace = True , drop = True ) df = df . T a = [ list ( set ( df . index . get_level_values ( i ) ) ) for i in range ( 0 , levels_from ) ] combinations = list ( itertools . product ( * a ) ) names = df . index . names [ : levels_from ] concats = [ ] for c in combinations : try : dfcc = df . loc [ c ] except KeyError : continue else : if len ( dfcc . shape ) == 1 : continue dfcc . columns = pd . MultiIndex . from_tuples ( [ c ] * dfcc . shape [ 1 ] , names = names ) concats . append ( dfcc ) dfc = pd . concat ( concats , axis = 1 ) dfc . sort_index ( axis = 1 , inplace = True ) if dfc . index . name is None : dfc . index . name = df . index . names [ - 1 ] return dfc
Take a levels from the columns and fold down into the row index . This destroys the existing index ; existing rows will appear as columns under the new column index
50,001
def args ( self , args ) : self . _args = args self . _logger . log ( 'debug' , 'Args set to {}' . format ( args ) )
Set additional arguments to be passed to the fitness function
50,002
def minimize ( self , minimize ) : self . _minimize = minimize self . _logger . log ( 'debug' , 'Minimize set to {}' . format ( minimize ) )
Configures the ABC to minimize fitness function return value or derived score
50,003
def num_employers ( self , num_employers ) : if num_employers < 2 : self . _logger . log ( 'warn' , 'Two employers are needed: setting to two' ) num_employers = 2 self . _num_employers = num_employers self . _logger . log ( 'debug' , 'Number of employers set to {}' . format ( num_employers ) ) self . _limit = num_employers * len ( self . _value_ranges ) self . _logger . log ( 'debug' , 'Limit set to {}' . format ( self . _limit ) )
Sets the number of employer bees ; at least two are required
50,004
def processes ( self , processes ) : if self . _processes > 1 : self . _pool . close ( ) self . _pool . join ( ) self . _pool = multiprocessing . Pool ( processes ) else : self . _pool = None self . _logger . log ( 'debug' , 'Number of processes set to {}' . format ( processes ) )
Set the number of concurrent processes the ABC will utilize for fitness function evaluation ; if < = 1 single process is used
50,005
def infer_process_count ( self ) : try : self . processes = multiprocessing . cpu_count ( ) except NotImplementedError : self . _logger . log ( 'error' , 'Could infer CPU count, setting number of processes back to 4' ) self . processes = 4
Infers the number of CPU cores in the current system sets the number of concurrent processes accordingly
50,006
def create_employers ( self ) : self . __verify_ready ( True ) employers = [ ] for i in range ( self . _num_employers ) : employer = EmployerBee ( self . __gen_random_values ( ) ) if self . _processes <= 1 : employer . error = self . _fitness_fxn ( employer . values , ** self . _args ) employer . score = employer . get_score ( ) if np . isnan ( employer . score ) : self . _logger . log ( 'warn' , 'NaN bee score: {}, {}' . format ( employer . id , employer . score ) ) self . _logger . log ( 'debug' , 'Bee number {} created' . format ( i + 1 ) ) self . __update ( employer . score , employer . values , employer . error ) else : employer . error = self . _pool . apply_async ( self . _fitness_fxn , [ employer . values ] , self . _args ) employers . append ( employer ) self . _employers . append ( employer ) for idx , employer in enumerate ( employers ) : try : employer . error = employer . error . get ( ) employer . score = employer . get_score ( ) if np . isnan ( employer . score ) : self . _logger . log ( 'warn' , 'NaN bee score: {}, {}' . format ( employer . id , employer . score ) ) self . _logger . log ( 'debug' , 'Bee number {} created' . format ( i + 1 ) ) self . __update ( employer . score , employer . values , employer . error ) except Exception as e : raise e self . _logger . log ( 'debug' , 'Employer creation complete' )
Generate employer bees . This should be called directly after the ABC is initialized .
50,007
def run_iteration ( self ) : self . _employer_phase ( ) self . _calc_probability ( ) self . _onlooker_phase ( ) self . _check_positions ( )
Runs a single iteration of the ABC ; employer phase - > probability calculation - > onlooker phase - > check positions
50,008
def _calc_probability ( self ) : self . _logger . log ( 'debug' , 'Calculating bee probabilities' ) self . __verify_ready ( ) self . _total_score = 0 for employer in self . _employers : self . _total_score += employer . score if self . __update ( employer . score , employer . values , employer . error ) : self . _logger . log ( 'info' , 'Update to best performer -' ' error: {} | score: {} | values: {}' . format ( employer . error , employer . score , employer . values ) ) for employer in self . _employers : employer . calculate_probability ( self . _total_score )
Determines the probability that each bee will be chosen during the onlooker phase ; also determines if a new best - performing bee is found
50,009
def _merge_bee ( self , bee ) : random_dimension = randint ( 0 , len ( self . _value_ranges ) - 1 ) second_bee = randint ( 0 , self . _num_employers - 1 ) while ( bee . id == self . _employers [ second_bee ] . id ) : second_bee = randint ( 0 , self . _num_employers - 1 ) new_bee = deepcopy ( bee ) new_bee . values [ random_dimension ] = self . __onlooker . calculate_positions ( new_bee . values [ random_dimension ] , self . _employers [ second_bee ] . values [ random_dimension ] , self . _value_ranges [ random_dimension ] ) fitness_score = new_bee . get_score ( self . _fitness_fxn ( new_bee . values , ** self . _args ) ) return ( fitness_score , new_bee . values , new_bee . error )
Shifts a random value for a supplied bee with in accordance with another random bee s value
50,010
def _move_bee ( self , bee , new_values ) : score = np . nan_to_num ( new_values [ 0 ] ) if bee . score > score : bee . failed_trials += 1 else : bee . values = new_values [ 1 ] bee . score = score bee . error = new_values [ 2 ] bee . failed_trials = 0 self . _logger . log ( 'debug' , 'Bee assigned to new merged position' )
Moves a bee to a new position if new fitness score is better than the bee s current fitness score
50,011
def __update ( self , score , values , error ) : if self . _minimize : if self . _best_score is None or score > self . _best_score : self . _best_score = score self . _best_values = values . copy ( ) self . _best_error = error self . _logger . log ( 'debug' , 'New best food source memorized: {}' . format ( self . _best_error ) ) return True elif not self . _minimize : if self . _best_score is None or score < self . _best_score : self . _best_score = score self . _best_values = values . copy ( ) self . _best_error = error self . _logger . log ( 'debug' , 'New best food source memorized: {}' . format ( self . _best_error ) ) return True return False
Update the best score and values if the given score is better than the current best score
50,012
def __gen_random_values ( self ) : values = [ ] if self . _value_ranges is None : self . _logger . log ( 'crit' , 'Must set the type/range of possible values' ) raise RuntimeError ( "Must set the type/range of possible values" ) else : for t in self . _value_ranges : if t [ 0 ] == 'int' : values . append ( randint ( t [ 1 ] [ 0 ] , t [ 1 ] [ 1 ] ) ) elif t [ 0 ] == 'float' : values . append ( np . random . uniform ( t [ 1 ] [ 0 ] , t [ 1 ] [ 1 ] ) ) else : self . _logger . log ( 'crit' , 'Value type must be either an `int` or a `float`' ) raise RuntimeError ( 'Value type must be either an `int` or a `float`' ) return values
Generate random values based on supplied value ranges
50,013
def __verify_ready ( self , creating = False ) : if len ( self . _value_ranges ) == 0 : self . _logger . log ( 'crit' , 'Attribute value_ranges must have at least one value' ) raise RuntimeWarning ( 'Attribute value_ranges must have at least one value' ) if len ( self . _employers ) == 0 and creating is False : self . _logger . log ( 'crit' , 'Need to create employers' ) raise RuntimeWarning ( 'Need to create employers' )
Some cleanup ensures that everything is set up properly to avoid random errors during execution
50,014
def import_settings ( self , filename ) : if not os . path . isfile ( filename ) : self . _logger . log ( 'error' , 'File: {} not found, continuing with default settings' . format ( filename ) ) else : with open ( filename , 'r' ) as jsonFile : data = json . load ( jsonFile ) self . _value_ranges = data [ 'valueRanges' ] self . _best_values = data [ 'best_values' ] self . _best_values = [ ] for index , value in enumerate ( data [ 'best_values' ] ) : if self . _value_ranges [ index ] == 'int' : self . _best_values . append ( int ( value ) ) else : self . _best_values . append ( float ( value ) ) self . minimize = data [ 'minimize' ] self . num_employers = data [ 'num_employers' ] self . _best_score = float ( data [ 'best_score' ] ) self . limit = data [ 'limit' ]
Import settings from a JSON file
50,015
def save_settings ( self , filename ) : data = dict ( ) data [ 'valueRanges' ] = self . _value_ranges data [ 'best_values' ] = [ str ( value ) for value in self . _best_values ] data [ 'minimize' ] = self . _minimize data [ 'num_employers' ] = self . _num_employers data [ 'best_score' ] = str ( self . _best_score ) data [ 'limit' ] = self . _limit data [ 'best_error' ] = self . _best_error with open ( filename , 'w' ) as outfile : json . dump ( data , outfile , indent = 4 , sort_keys = True )
Save settings to a JSON file
50,016
def get_score ( self , error = None ) : if error is not None : self . error = error if self . error >= 0 : return 1 / ( self . error + 1 ) else : return 1 + abs ( self . error )
Calculate bee s fitness score given a value returned by the fitness function
50,017
def create_elb_dns ( self , regionspecific = False ) : if regionspecific : dns_elb = self . generated . dns ( ) [ 'elb_region' ] else : dns_elb = self . generated . dns ( ) [ 'elb' ] dns_elb_aws = find_elb ( name = self . app_name , env = self . env , region = self . region ) zone_ids = get_dns_zone_ids ( env = self . env , facing = self . elb_subnet ) self . log . info ( 'Updating Application URL: %s' , dns_elb ) dns_kwargs = { 'dns_name' : dns_elb , 'dns_name_aws' : dns_elb_aws , 'dns_ttl' : self . dns_ttl , } for zone_id in zone_ids : self . log . debug ( 'zone_id: %s' , zone_id ) update_dns_zone_record ( self . env , zone_id , ** dns_kwargs ) return dns_elb
Create dns entries in route53 .
50,018
def create_failover_dns ( self , primary_region = 'us-east-1' ) : dns_record = self . generated . dns ( ) [ 'global' ] zone_ids = get_dns_zone_ids ( env = self . env , facing = self . elb_subnet ) elb_dns_aws = find_elb ( name = self . app_name , env = self . env , region = self . region ) elb_dns_zone_id = find_elb_dns_zone_id ( name = self . app_name , env = self . env , region = self . region ) if primary_region in elb_dns_aws : failover_state = 'PRIMARY' else : failover_state = 'SECONDARY' self . log . info ( "%s set as %s record" , elb_dns_aws , failover_state ) self . log . info ( 'Updating Application Failover URL: %s' , dns_record ) dns_kwargs = { 'dns_name' : dns_record , 'elb_dns_zone_id' : elb_dns_zone_id , 'elb_aws_dns' : elb_dns_aws , 'dns_ttl' : self . dns_ttl , 'failover_state' : failover_state , } for zone_id in zone_ids : self . log . debug ( 'zone_id: %s' , zone_id ) update_failover_dns_record ( self . env , zone_id , ** dns_kwargs ) return dns_record
Create dns entries in route53 for multiregion failover setups .
50,019
def format_listeners ( elb_settings = None , env = 'dev' , region = 'us-east-1' ) : LOG . debug ( 'ELB settings:\n%s' , elb_settings ) credential = get_env_credential ( env = env ) account = credential [ 'accountId' ] listeners = [ ] if 'ports' in elb_settings : for listener in elb_settings [ 'ports' ] : cert_name = format_cert_name ( env = env , region = region , account = account , certificate = listener . get ( 'certificate' , None ) ) lb_proto , lb_port = listener [ 'loadbalancer' ] . split ( ':' ) i_proto , i_port = listener [ 'instance' ] . split ( ':' ) listener_policies = listener . get ( 'policies' , [ ] ) listener_policies += listener . get ( 'listener_policies' , [ ] ) backend_policies = listener . get ( 'backend_policies' , [ ] ) elb_data = { 'externalPort' : int ( lb_port ) , 'externalProtocol' : lb_proto . upper ( ) , 'internalPort' : int ( i_port ) , 'internalProtocol' : i_proto . upper ( ) , 'sslCertificateId' : cert_name , 'listenerPolicies' : listener_policies , 'backendPolicies' : backend_policies , } listeners . append ( elb_data ) else : listener_policies = elb_settings . get ( 'policies' , [ ] ) listener_policies += elb_settings . get ( 'listener_policies' , [ ] ) backend_policies = elb_settings . get ( 'backend_policies' , [ ] ) listeners = [ { 'externalPort' : int ( elb_settings [ 'lb_port' ] ) , 'externalProtocol' : elb_settings [ 'lb_proto' ] , 'internalPort' : int ( elb_settings [ 'i_port' ] ) , 'internalProtocol' : elb_settings [ 'i_proto' ] , 'sslCertificateId' : elb_settings [ 'certificate' ] , 'listenerPolicies' : listener_policies , 'backendPolicies' : backend_policies , } ] for listener in listeners : LOG . info ( 'ELB Listener:\n' 'loadbalancer %(externalProtocol)s:%(externalPort)d\n' 'instance %(internalProtocol)s:%(internalPort)d\n' 'certificate: %(sslCertificateId)s\n' 'listener_policies: %(listenerPolicies)s\n' 'backend_policies: %(backendPolicies)s' , listener ) return listeners
Format ELB Listeners into standard list .
50,020
def format_cert_name ( env = '' , account = '' , region = '' , certificate = None ) : cert_name = None if certificate : if certificate . startswith ( 'arn' ) : LOG . info ( "Full ARN provided...skipping lookup." ) cert_name = certificate else : generated_cert_name = generate_custom_cert_name ( env , region , account , certificate ) if generated_cert_name : LOG . info ( "Found generated certificate %s from template" , generated_cert_name ) cert_name = generated_cert_name else : LOG . info ( "Using default certificate name logic" ) cert_name = ( 'arn:aws:iam::{account}:server-certificate/{name}' . format ( account = account , name = certificate ) ) LOG . debug ( 'Certificate name: %s' , cert_name ) return cert_name
Format the SSL certificate name into ARN for ELB .
50,021
def generate_custom_cert_name ( env = '' , region = '' , account = '' , certificate = None ) : cert_name = None template_kwargs = { 'account' : account , 'name' : certificate } try : rendered_template = get_template ( template_file = 'infrastructure/iam/tlscert_naming.json.j2' , ** template_kwargs ) tlscert_dict = json . loads ( rendered_template ) except ForemastTemplateNotFound : LOG . info ( 'Unable to find TLS Cert Template...falling back to default logic...' ) return cert_name try : LOG . info ( "Attempting to find TLS Cert using TLS Cert Template v1 lookup..." ) cert_name = tlscert_dict [ env ] [ certificate ] LOG . info ( "Found TLS certificate named %s under %s using TLS Cert Template v1" , certificate , env ) except KeyError : LOG . error ( "Unable to find TLS certificate named %s under %s using v1 TLS Cert Template." , certificate , env ) tls_services = [ 'iam' , 'acm' ] if cert_name is None and all ( service in tlscert_dict for service in tls_services ) : LOG . info ( "Attempting to find TLS Cert using TLS Cert Template v2 lookup..." ) if certificate in tlscert_dict [ 'iam' ] [ env ] : cert_name = tlscert_dict [ 'iam' ] [ env ] [ certificate ] LOG . info ( "Found IAM TLS certificate named %s under %s using TLS Cert Template v2" , certificate , env ) elif certificate in tlscert_dict [ 'acm' ] [ region ] [ env ] : cert_name = tlscert_dict [ 'acm' ] [ region ] [ env ] [ certificate ] LOG . info ( "Found ACM TLS certificate named %s under %s in %s using TLS Cert Template v2" , certificate , env , region ) else : LOG . error ( "Unable to find TLS certificate named %s under parent keys [ACM, IAM] %s in v2 TLS Cert Template." , certificate , env ) return cert_name
Generate a custom TLS Cert name based on a template .
50,022
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) log = logging . getLogger ( __name__ ) parser = argparse . ArgumentParser ( ) add_debug ( parser ) add_app ( parser ) add_env ( parser ) add_properties ( parser ) args = parser . parse_args ( ) logging . getLogger ( __package__ . split ( "." ) [ 0 ] ) . setLevel ( args . debug ) log . debug ( 'Parsed arguements: %s' , args ) if "prod" not in args . env : log . info ( 'No slack message sent, not a production environment' ) else : log . info ( "Sending slack message, production environment" ) slacknotify = SlackNotification ( app = args . app , env = args . env , prop_path = args . properties ) slacknotify . post_message ( )
Send Slack notification to a configured channel .
50,023
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) parser = argparse . ArgumentParser ( description = main . __doc__ ) add_debug ( parser ) add_app ( parser ) args = parser . parse_args ( ) if args . debug == logging . DEBUG : logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) else : LOG . setLevel ( args . debug ) for env in ENVS : for region in REGIONS : LOG . info ( 'DESTROY %s:%s' , env , region ) try : destroy_dns ( app = args . app , env = env ) except botocore . exceptions . ClientError as error : LOG . warning ( 'DNS issue for %s in %s: %s' , env , region , error ) try : destroy_elb ( app = args . app , env = env , region = region ) except SpinnakerError : pass try : destroy_iam ( app = args . app , env = env ) except botocore . exceptions . ClientError as error : LOG . warning ( 'IAM issue for %s in %s: %s' , env , region , error ) try : destroy_s3 ( app = args . app , env = env ) except botocore . exceptions . ClientError as error : LOG . warning ( 'S3 issue for %s in %s: %s' , env , region , error ) try : destroy_sg ( app = args . app , env = env , region = region ) except SpinnakerError : pass LOG . info ( 'Destroyed %s:%s' , env , region ) LOG . info ( 'Destruction complete.' )
Attempt to fully destroy AWS Resources for a Spinnaker Application .
50,024
def check_provider_healthcheck ( settings , default_provider = 'Discovery' ) : ProviderHealthCheck = collections . namedtuple ( 'ProviderHealthCheck' , [ 'providers' , 'has_healthcheck' ] ) eureka_enabled = settings [ 'app' ] [ 'eureka_enabled' ] providers = settings [ 'asg' ] [ 'provider_healthcheck' ] LOG . debug ( 'Template defined Health Check Providers: %s' , providers ) health_check_providers = [ ] has_healthcheck = False normalized_default_provider = default_provider . capitalize ( ) if eureka_enabled : LOG . info ( 'Eureka enabled, enabling default Provider Health Check: %s' , normalized_default_provider ) for provider , active in providers . items ( ) : if provider . lower ( ) == normalized_default_provider . lower ( ) : providers [ provider ] = True LOG . debug ( 'Override defined Provider Health Check: %s -> %s' , active , providers [ provider ] ) break else : LOG . debug ( 'Adding default Provider Health Check: %s' , normalized_default_provider ) providers [ normalized_default_provider ] = True for provider , active in providers . items ( ) : if active : health_check_providers . append ( provider . capitalize ( ) ) LOG . info ( 'Provider healthchecks: %s' , health_check_providers ) if health_check_providers : has_healthcheck = True return ProviderHealthCheck ( providers = health_check_providers , has_healthcheck = has_healthcheck )
Set Provider Health Check when specified .
50,025
def get_template_name ( env , pipeline_type ) : pipeline_base = 'pipeline/pipeline' template_name_format = '{pipeline_base}' if env . startswith ( 'prod' ) : template_name_format = template_name_format + '_{env}' else : template_name_format = template_name_format + '_stages' if pipeline_type != 'ec2' : template_name_format = template_name_format + '_{pipeline_type}' template_name_format = template_name_format + '.json.j2' template_name = template_name_format . format ( pipeline_base = pipeline_base , env = env , pipeline_type = pipeline_type ) return template_name
Generates the correct template name based on pipeline type
50,026
def ec2_pipeline_setup ( generated = None , project = '' , settings = None , env = '' , pipeline_type = '' , region = '' , region_subnets = None , ) : data = copy . deepcopy ( settings ) user_data = generate_encoded_user_data ( env = env , region = region , generated = generated , group_name = project , pipeline_type = pipeline_type , ) instance_security_groups = sorted ( DEFAULT_EC2_SECURITYGROUPS [ env ] ) instance_security_groups . append ( generated . security_group_app ) instance_security_groups . extend ( settings [ 'security_group' ] [ 'instance_extras' ] ) instance_security_groups = remove_duplicate_sg ( instance_security_groups ) LOG . info ( 'Instance security groups to attach: %s' , instance_security_groups ) if settings [ 'asg' ] [ 'scaling_policy' ] : scalingpolicy = True LOG . info ( 'Found scaling policy' ) else : scalingpolicy = False LOG . info ( 'No scaling policy found' ) if settings [ 'app' ] [ 'eureka_enabled' ] : elb = [ ] else : elb = [ generated . elb_app ] LOG . info ( 'Attaching the following ELB: %s' , elb ) health_checks = check_provider_healthcheck ( settings ) if env == 'dev' or settings [ 'app' ] [ 'eureka_enabled' ] : data [ 'asg' ] . update ( { 'hc_type' : 'EC2' } ) LOG . info ( 'Switching health check type to: EC2' ) hc_grace_period = data [ 'asg' ] . get ( 'hc_grace_period' ) app_grace_period = data [ 'asg' ] . get ( 'app_grace_period' ) grace_period = hc_grace_period + app_grace_period ssh_keypair = data [ 'asg' ] . get ( 'ssh_keypair' , None ) if not ssh_keypair : ssh_keypair = '{0}_{1}_default' . format ( env , region ) LOG . info ( 'SSH keypair (%s) used' , ssh_keypair ) if settings [ 'app' ] [ 'canary' ] : canary_user_data = generate_encoded_user_data ( env = env , region = region , generated = generated , group_name = project , canary = True , ) data [ 'app' ] . update ( { 'canary_encoded_user_data' : canary_user_data , } ) data [ 'asg' ] . update ( { 'hc_type' : data [ 'asg' ] . get ( 'hc_type' ) . upper ( ) , 'hc_grace_period' : grace_period , 'ssh_keypair' : ssh_keypair , 'provider_healthcheck' : json . dumps ( health_checks . providers ) , 'enable_public_ips' : json . dumps ( settings [ 'asg' ] [ 'enable_public_ips' ] ) , 'has_provider_healthcheck' : health_checks . has_healthcheck , 'asg_whitelist' : ASG_WHITELIST , } ) data [ 'app' ] . update ( { 'az_dict' : json . dumps ( region_subnets ) , 'encoded_user_data' : user_data , 'instance_security_groups' : json . dumps ( instance_security_groups ) , 'elb' : json . dumps ( elb ) , 'scalingpolicy' : scalingpolicy , } ) return data
Handles ec2 pipeline data setup
50,027
def create_pipeline ( self ) : pipelines = self . settings [ 'pipeline' ] [ 'pipeline_files' ] self . log . info ( 'Uploading manual Pipelines: %s' , pipelines ) lookup = FileLookup ( git_short = self . generated . gitlab ( ) [ 'main' ] , runway_dir = self . runway_dir ) for json_file in pipelines : json_dict = lookup . json ( filename = json_file ) json_dict . setdefault ( 'application' , self . app_name ) json_dict . setdefault ( 'name' , normalize_pipeline_name ( name = json_file ) ) json_dict . setdefault ( 'id' , get_pipeline_id ( app = json_dict [ 'application' ] , name = json_dict [ 'name' ] ) ) self . post_pipeline ( json_dict ) return True
Use JSON files to create Pipelines .
50,028
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) log = logging . getLogger ( __name__ ) parser = argparse . ArgumentParser ( ) add_debug ( parser ) add_app ( parser ) add_properties ( parser ) parser . add_argument ( '-b' , '--base' , help = 'Base AMI name to use, e.g. fedora, tomcat' ) parser . add_argument ( "--triggerjob" , help = "The jenkins job to monitor for pipeline triggering" , required = True ) parser . add_argument ( '--onetime' , required = False , choices = ENVS , help = 'Onetime deployment environment' ) parser . add_argument ( '-t' , '--type' , dest = 'type' , required = False , default = 'ec2' , help = 'Deployment type, e.g. ec2, lambda' ) args = parser . parse_args ( ) if args . base and '"' in args . base : args . base = args . base . strip ( '"' ) logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) log . debug ( 'Parsed arguments: %s' , args ) if args . onetime : spinnakerapps = SpinnakerPipelineOnetime ( app = args . app , onetime = args . onetime , trigger_job = args . triggerjob , prop_path = args . properties , base = args . base ) spinnakerapps . create_pipeline ( ) else : if args . type == "ec2" : spinnakerapps = SpinnakerPipeline ( app = args . app , trigger_job = args . triggerjob , prop_path = args . properties , base = args . base ) spinnakerapps . create_pipeline ( ) elif args . type == "lambda" : spinnakerapps = SpinnakerPipelineLambda ( app = args . app , trigger_job = args . triggerjob , prop_path = args . properties , base = args . base ) spinnakerapps . create_pipeline ( ) elif args . type == "s3" : spinnakerapps = SpinnakerPipelineS3 ( app = args . app , trigger_job = args . triggerjob , prop_path = args . properties , base = args . base ) spinnakerapps . create_pipeline ( )
Creates a pipeline in Spinnaker
50,029
def convert_ini ( config_dict ) : config_lines = [ ] for env , configs in sorted ( config_dict . items ( ) ) : for resource , app_properties in sorted ( configs . items ( ) ) : try : for app_property , value in sorted ( app_properties . items ( ) ) : variable = '{env}_{resource}_{app_property}' . format ( env = env , resource = resource , app_property = app_property ) . upper ( ) if isinstance ( value , ( dict , DeepChainMap ) ) : safe_value = "'{0}'" . format ( json . dumps ( dict ( value ) ) ) else : safe_value = json . dumps ( value ) line = "{variable}={value}" . format ( variable = variable , value = safe_value ) LOG . debug ( 'INI line: %s' , line ) config_lines . append ( line ) except AttributeError : resource = resource . upper ( ) app_properties = "'{}'" . format ( json . dumps ( app_properties ) ) line = '{0}={1}' . format ( resource , app_properties ) LOG . debug ( 'INI line: %s' , line ) config_lines . append ( line ) return config_lines
Convert _config_dict_ into a list of INI formatted strings .
50,030
def write_variables ( app_configs = None , out_file = '' , git_short = '' ) : generated = gogoutils . Generator ( * gogoutils . Parser ( git_short ) . parse_url ( ) , formats = APP_FORMATS ) json_configs = { } for env , configs in app_configs . items ( ) : if env != 'pipeline' : instance_profile = generated . iam ( ) [ 'profile' ] rendered_configs = json . loads ( get_template ( 'configs/configs.json.j2' , env = env , app = generated . app_name ( ) , profile = instance_profile , formats = generated ) ) json_configs [ env ] = dict ( DeepChainMap ( configs , rendered_configs ) ) region_list = configs . get ( 'regions' , rendered_configs [ 'regions' ] ) json_configs [ env ] [ 'regions' ] = region_list for region in region_list : region_config = json_configs [ env ] [ region ] json_configs [ env ] [ region ] = dict ( DeepChainMap ( region_config , rendered_configs ) ) else : default_pipeline_json = json . loads ( get_template ( 'configs/pipeline.json.j2' , formats = generated ) ) json_configs [ 'pipeline' ] = dict ( DeepChainMap ( configs , default_pipeline_json ) ) LOG . debug ( 'Compiled configs:\n%s' , pformat ( json_configs ) ) config_lines = convert_ini ( json_configs ) with open ( out_file , 'at' ) as jenkins_vars : LOG . info ( 'Appending variables to %s.' , out_file ) jenkins_vars . write ( '\n' . join ( config_lines ) ) with open ( out_file + '.exports' , 'wt' ) as export_vars : LOG . info ( 'Writing sourceable variables to %s.' , export_vars . name ) export_vars . write ( '\n' . join ( 'export {0}' . format ( line ) for line in config_lines ) ) with open ( out_file + '.json' , 'wt' ) as json_handle : LOG . info ( 'Writing JSON to %s.' , json_handle . name ) LOG . debug ( 'Total JSON dict:\n%s' , json_configs ) json . dump ( json_configs , json_handle ) return json_configs
Append _application . json_ configs to _out_file_ . exports and . json .
50,031
def get_sns_subscriptions ( app_name , env , region ) : session = boto3 . Session ( profile_name = env , region_name = region ) sns_client = session . client ( 'sns' ) lambda_alias_arn = get_lambda_alias_arn ( app = app_name , account = env , region = region ) lambda_subscriptions = [ ] subscriptions = sns_client . list_subscriptions ( ) for subscription in subscriptions [ 'Subscriptions' ] : if subscription [ 'Protocol' ] == "lambda" and subscription [ 'Endpoint' ] == lambda_alias_arn : lambda_subscriptions . append ( subscription [ 'SubscriptionArn' ] ) if not lambda_subscriptions : LOG . debug ( 'SNS subscription for function %s not found' , lambda_alias_arn ) return lambda_subscriptions
List SNS lambda subscriptions .
50,032
def destroy_cloudwatch_log_event ( app = '' , env = 'dev' , region = '' ) : session = boto3 . Session ( profile_name = env , region_name = region ) cloudwatch_client = session . client ( 'logs' ) cloudwatch_client . delete_subscription_filter ( logGroupName = '/aws/lambda/awslimitchecker' , filterName = app ) return True
Destroy Cloudwatch log event .
50,033
def get_accounts ( self , provider = 'aws' ) : url = '{gate}/credentials' . format ( gate = API_URL ) response = requests . get ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) assert response . ok , 'Failed to get accounts: {0}' . format ( response . text ) all_accounts = response . json ( ) self . log . debug ( 'Accounts in Spinnaker:\n%s' , all_accounts ) filtered_accounts = [ ] for account in all_accounts : if account [ 'type' ] == provider : filtered_accounts . append ( account ) if not filtered_accounts : raise ForemastError ( 'No Accounts matching {0}.' . format ( provider ) ) return filtered_accounts
Get Accounts added to Spinnaker .
50,034
def create_app ( self ) : self . appinfo [ 'accounts' ] = self . get_accounts ( ) self . log . debug ( 'Pipeline Config\n%s' , pformat ( self . pipeline_config ) ) self . log . debug ( 'App info:\n%s' , pformat ( self . appinfo ) ) jsondata = self . retrieve_template ( ) wait_for_task ( jsondata ) self . log . info ( "Successfully created %s application" , self . appname ) return jsondata
Send a POST to spinnaker to create a new application with class variables .
50,035
def retrieve_template ( self ) : links = self . retrieve_instance_links ( ) self . log . debug ( 'Links is \n%s' , pformat ( links ) ) self . pipeline_config [ 'instance_links' ] . update ( links ) jsondata = get_template ( template_file = 'infrastructure/app_data.json.j2' , appinfo = self . appinfo , pipeline_config = self . pipeline_config , formats = self . generated , run_as_user = DEFAULT_RUN_AS_USER ) self . log . debug ( 'jsondata is %s' , pformat ( jsondata ) ) return jsondata
Sets the instance links with pipeline_configs and then renders template files
50,036
def retrieve_instance_links ( self ) : instance_links = { } self . log . debug ( "LINKS IS %s" , LINKS ) for key , value in LINKS . items ( ) : if value not in self . pipeline_config [ 'instance_links' ] . values ( ) : instance_links [ key ] = value return instance_links
Appends on existing instance links
50,037
def get_cloudwatch_event_rule ( app_name , account , region ) : session = boto3 . Session ( profile_name = account , region_name = region ) cloudwatch_client = session . client ( 'events' ) lambda_alias_arn = get_lambda_alias_arn ( app = app_name , account = account , region = region ) rule_names = cloudwatch_client . list_rule_names_by_target ( TargetArn = lambda_alias_arn ) if rule_names [ 'RuleNames' ] : all_rules = rule_names [ 'RuleNames' ] else : LOG . debug ( "No event rules found" ) all_rules = [ ] return all_rules
Get CloudWatch Event rule names .
50,038
def setup_pathing ( self ) : self . s3_version_uri = self . _path_formatter ( self . version ) self . s3_latest_uri = self . _path_formatter ( "LATEST" ) self . s3_canary_uri = self . _path_formatter ( "CANARY" ) self . s3_alpha_uri = self . _path_formatter ( "ALPHA" ) self . s3_mirror_uri = self . _path_formatter ( "MIRROR" )
Format pathing for S3 deployments .
50,039
def _path_formatter ( self , suffix ) : if suffix . lower ( ) == "mirror" : path_items = [ self . bucket , self . s3path ] else : path_items = [ self . bucket , self . s3path , suffix ] path = '/' . join ( path_items ) s3_format = "s3://{}" formatted_path = path . replace ( '//' , '/' ) full_path = s3_format . format ( formatted_path ) return full_path
Format the s3 path properly .
50,040
def upload_artifacts ( self ) : deploy_strategy = self . properties [ "deploy_strategy" ] mirror = False if deploy_strategy == "mirror" : mirror = True self . _upload_artifacts_to_path ( mirror = mirror ) if deploy_strategy == "highlander" : self . _sync_to_uri ( self . s3_latest_uri ) elif deploy_strategy == "canary" : self . _sync_to_uri ( self . s3_canary_uri ) elif deploy_strategy == "alpha" : self . _sync_to_uri ( self . s3_alpha_uri ) elif deploy_strategy == "mirror" : pass else : raise NotImplementedError
Upload artifacts to S3 and copy to correct path depending on strategy .
50,041
def promote_artifacts ( self , promote_stage = 'latest' ) : if promote_stage . lower ( ) == 'alpha' : self . _sync_to_uri ( self . s3_canary_uri ) elif promote_stage . lower ( ) == 'canary' : self . _sync_to_uri ( self . s3_latest_uri ) else : self . _sync_to_uri ( self . s3_latest_uri )
Promote artifact version to dest .
50,042
def _get_upload_cmd ( self , mirror = False ) : if mirror : dest_uri = self . s3_mirror_uri else : dest_uri = self . s3_version_uri cmd = 'aws s3 sync {} {} --delete --exact-timestamps --profile {}' . format ( self . artifact_path , dest_uri , self . env ) return cmd
Generate the S3 CLI upload command
50,043
def _upload_artifacts_to_path ( self , mirror = False ) : if not os . listdir ( self . artifact_path ) or not self . artifact_path : raise S3ArtifactNotFound uploaded = False if self . s3props . get ( "content_metadata" ) : LOG . info ( "Uploading in multiple parts to set metadata" ) uploaded = self . content_metadata_uploads ( mirror = mirror ) if not uploaded : cmd = self . _get_upload_cmd ( mirror = mirror ) result = subprocess . run ( cmd , check = True , shell = True , stdout = subprocess . PIPE ) LOG . debug ( "Upload Command Ouput: %s" , result . stdout ) LOG . info ( "Uploaded artifacts to %s bucket" , self . bucket )
Recursively upload directory contents to S3 .
50,044
def content_metadata_uploads ( self , mirror = False ) : excludes_str = '' includes_cmds = [ ] cmd_base = self . _get_upload_cmd ( mirror = mirror ) for content in self . s3props . get ( 'content_metadata' ) : full_path = os . path . join ( self . artifact_path , content [ 'path' ] ) if not os . listdir ( full_path ) : raise S3ArtifactNotFound excludes_str += '--exclude "{}/*" ' . format ( content [ 'path' ] ) include_cmd = '{} --exclude "*", --include "{}/*"' . format ( cmd_base , content [ 'path' ] ) include_cmd += ' --content-encoding {} --metadata-directive REPLACE' . format ( content [ 'content-encoding' ] ) includes_cmds . append ( include_cmd ) exclude_cmd = '{} {}' . format ( cmd_base , excludes_str ) result = subprocess . run ( exclude_cmd , check = True , shell = True , stdout = subprocess . PIPE ) LOG . info ( "Uploaded files without metadata with command: %s" , exclude_cmd ) LOG . debug ( "Upload Command Output: %s" , result . stdout ) for include_cmd in includes_cmds : result = subprocess . run ( include_cmd , check = True , shell = True , stdout = subprocess . PIPE ) LOG . info ( "Uploaded files with metadata with command: %s" , include_cmd ) LOG . debug ( "Upload Command Output: %s" , result . stdout ) return True
Finds all specified encoded directories and uploads in multiple parts setting metadata for objects .
50,045
def _sync_to_uri ( self , uri ) : cmd_cp = 'aws s3 cp {} {} --recursive --profile {}' . format ( self . s3_version_uri , uri , self . env ) cmd_sync = 'aws s3 sync {} {} --delete --exact-timestamps --profile {}' . format ( self . s3_version_uri , uri , self . env ) cp_result = subprocess . run ( cmd_cp , check = True , shell = True , stdout = subprocess . PIPE ) LOG . debug ( "Copy to %s before sync output: %s" , uri , cp_result . stdout ) LOG . info ( "Copied version %s to %s" , self . version , uri ) sync_result = subprocess . run ( cmd_sync , check = True , shell = True , stdout = subprocess . PIPE ) LOG . debug ( "Sync to %s command output: %s" , uri , sync_result . stdout ) LOG . info ( "Synced version %s to %s" , self . version , uri )
Copy and sync versioned directory to uri in S3 .
50,046
def get_vpc_id ( account , region ) : url = '{0}/networks/aws' . format ( API_URL ) response = requests . get ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) if not response . ok : raise SpinnakerVPCNotFound ( response . text ) vpcs = response . json ( ) for vpc in vpcs : LOG . debug ( 'VPC: %(name)s, %(account)s, %(region)s => %(id)s' , vpc ) if 'name' in vpc and all ( [ vpc [ 'name' ] == 'vpc' , vpc [ 'account' ] == account , vpc [ 'region' ] == region ] ) : LOG . info ( 'Found VPC ID for %s in %s: %s' , account , region , vpc [ 'id' ] ) vpc_id = vpc [ 'id' ] break else : LOG . fatal ( 'VPC list: %s' , vpcs ) raise SpinnakerVPCIDNotFound ( 'No VPC available for {0} [{1}].' . format ( account , region ) ) return vpc_id
Get VPC ID configured for account in region .
50,047
def get_subnets ( target = 'ec2' , purpose = 'internal' , env = '' , region = '' , ) : account_az_dict = defaultdict ( defaultdict ) subnet_id_dict = defaultdict ( defaultdict ) subnet_url = '{0}/subnets/aws' . format ( API_URL ) subnet_response = requests . get ( subnet_url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) if not subnet_response . ok : raise SpinnakerTimeout ( subnet_response . text ) subnet_list = subnet_response . json ( ) for subnet in subnet_list : LOG . debug ( 'Subnet: %(account)s\t%(region)s\t%(target)s\t%(vpcId)s\t' '%(availabilityZone)s' , subnet ) if subnet . get ( 'target' , '' ) == target : availability_zone = subnet [ 'availabilityZone' ] account = subnet [ 'account' ] subnet_region = subnet [ 'region' ] subnet_id = subnet [ 'id' ] try : if availability_zone not in account_az_dict [ account ] [ subnet_region ] : account_az_dict [ account ] [ subnet_region ] . append ( availability_zone ) except KeyError : account_az_dict [ account ] [ subnet_region ] = [ availability_zone ] if subnet [ 'purpose' ] == purpose : try : subnet_id_dict [ account ] [ subnet_region ] . append ( subnet_id ) except KeyError : subnet_id_dict [ account ] [ subnet_region ] = [ subnet_id ] LOG . debug ( '%s regions: %s' , account , list ( account_az_dict [ account ] . keys ( ) ) ) if all ( [ env , region ] ) : try : region_dict = { region : account_az_dict [ env ] [ region ] } region_dict [ 'subnet_ids' ] = { region : subnet_id_dict [ env ] [ region ] } LOG . debug ( 'Region dict: %s' , region_dict ) return region_dict except KeyError : raise SpinnakerSubnetError ( env = env , region = region ) LOG . debug ( 'AZ dict:\n%s' , pformat ( dict ( account_az_dict ) ) ) return account_az_dict
Get all availability zones for a given target .
50,048
def create_lambda_events ( self ) : remove_all_lambda_permissions ( app_name = self . app_name , env = self . env , region = self . region ) triggers = self . properties [ 'lambda_triggers' ] for trigger in triggers : if trigger [ 'type' ] == 'sns' : create_sns_event ( app_name = self . app_name , env = self . env , region = self . region , rules = trigger ) if trigger [ 'type' ] == 'cloudwatch-event' : create_cloudwatch_event ( app_name = self . app_name , env = self . env , region = self . region , rules = trigger ) if trigger [ 'type' ] == 'cloudwatch-logs' : create_cloudwatch_log_event ( app_name = self . app_name , env = self . env , region = self . region , rules = trigger ) if trigger [ 'type' ] == 'api-gateway' : apigateway = APIGateway ( app = self . app_name , env = self . env , region = self . region , rules = trigger , prop_path = self . prop_path ) apigateway . setup_lambda_api ( ) s3_triggers = [ x for x in triggers if x [ 'type' ] == 's3' ] bucket_triggers = dict ( ) for s3_trigger in s3_triggers : bucket = s3_trigger . get ( 'bucket' ) if bucket in bucket_triggers : bucket_triggers [ bucket ] . append ( s3_trigger ) else : bucket_triggers [ bucket ] = [ s3_trigger ] for bucket , triggers in bucket_triggers . items ( ) : create_s3_event ( app_name = self . app_name , env = self . env , region = self . region , bucket = bucket , triggers = triggers )
Create all defined lambda events for an lambda application .
50,049
def get_all_pipelines ( app = '' ) : url = '{host}/applications/{app}/pipelineConfigs' . format ( host = API_URL , app = app ) response = requests . get ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) assert response . ok , 'Could not retrieve Pipelines for {0}.' . format ( app ) pipelines = response . json ( ) LOG . debug ( 'Pipelines:\n%s' , pipelines ) return pipelines
Get a list of all the Pipelines in _app_ .
50,050
def get_pipeline_id ( app = '' , name = '' ) : return_id = None pipelines = get_all_pipelines ( app = app ) for pipeline in pipelines : LOG . debug ( 'ID of %(name)s: %(id)s' , pipeline ) if pipeline [ 'name' ] == name : return_id = pipeline [ 'id' ] LOG . info ( 'Pipeline %s found, ID: %s' , name , return_id ) break return return_id
Get the ID for Pipeline _name_ .
50,051
def normalize_pipeline_name ( name = '' ) : normalized_name = name for bad in '\\/?%#' : normalized_name = normalized_name . replace ( bad , '_' ) return normalized_name
Translate unsafe characters to underscores .
50,052
def get_all_apps ( ) : LOG . info ( 'Retreiving list of all Spinnaker applications' ) url = '{}/applications' . format ( API_URL ) response = requests . get ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) assert response . ok , 'Could not retrieve application list' pipelines = response . json ( ) LOG . debug ( 'All Applications:\n%s' , pipelines ) return pipelines
Get a list of all applications in Spinnaker .
50,053
def get_details ( app = 'groupproject' , env = 'dev' , region = 'us-east-1' ) : url = '{host}/applications/{app}' . format ( host = API_URL , app = app ) request = requests . get ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) if not request . ok : raise SpinnakerAppNotFound ( '"{0}" not found.' . format ( app ) ) app_details = request . json ( ) LOG . debug ( 'App details: %s' , app_details ) group = app_details [ 'attributes' ] . get ( 'repoProjectKey' ) project = app_details [ 'attributes' ] . get ( 'repoSlug' ) generated = gogoutils . Generator ( group , project , env = env , region = region , formats = APP_FORMATS ) LOG . debug ( 'Application details: %s' , generated ) return generated
Extract details for Application .
50,054
def create_pipeline ( self ) : clean_pipelines ( app = self . app_name , settings = self . settings ) pipeline_envs = self . environments self . log . debug ( 'Envs from pipeline.json: %s' , pipeline_envs ) regions_envs = collections . defaultdict ( list ) for env in pipeline_envs : for region in self . settings [ env ] [ 'regions' ] : regions_envs [ region ] . append ( env ) self . log . info ( 'Environments and Regions for Pipelines:\n%s' , json . dumps ( regions_envs , indent = 4 ) ) pipelines = { } for region , envs in regions_envs . items ( ) : pipelines [ region ] = self . render_wrapper ( region = region ) previous_env = None for env in envs : block = construct_pipeline_block_s3 ( env = env , generated = self . generated , previous_env = previous_env , region = region , settings = self . settings [ env ] [ region ] , pipeline_data = self . settings [ 'pipeline' ] ) pipelines [ region ] [ 'stages' ] . extend ( json . loads ( block ) ) previous_env = env self . log . debug ( 'Assembled Pipelines:\n%s' , pformat ( pipelines ) ) for region , pipeline in pipelines . items ( ) : renumerate_stages ( pipeline ) self . post_pipeline ( pipeline ) return True
Main wrapper for pipeline creation . 1 . Runs clean_pipelines to clean up existing ones 2 . determines which environments the pipeline needs 3 . Renders all of the pipeline blocks as defined in configs 4 . Runs post_pipeline to create pipeline
50,055
def _check_lambda ( self ) : exists = False try : self . lambda_client . get_function ( FunctionName = self . app_name ) exists = True except boto3 . exceptions . botocore . exceptions . ClientError : pass return exists
Check if lambda function exists .
50,056
def _check_lambda_alias ( self ) : aliases = self . lambda_client . list_aliases ( FunctionName = self . app_name ) matched_alias = False for alias in aliases [ 'Aliases' ] : if alias [ 'Name' ] == self . env : LOG . info ( 'Found alias %s for function %s' , self . env , self . app_name ) matched_alias = True break else : LOG . info ( 'No alias %s found for function %s' , self . env , self . app_name ) return matched_alias
Check if lambda alias exists .
50,057
def _vpc_config ( self ) : if self . vpc_enabled : subnets = get_subnets ( env = self . env , region = self . region , purpose = 'internal' ) [ 'subnet_ids' ] [ self . region ] security_groups = self . _get_sg_ids ( ) vpc_config = { 'SubnetIds' : subnets , 'SecurityGroupIds' : security_groups } else : vpc_config = { 'SubnetIds' : [ ] , 'SecurityGroupIds' : [ ] } LOG . debug ( "Lambda VPC config setup: %s" , vpc_config ) return vpc_config
Get VPC config .
50,058
def _get_sg_ids ( self ) : try : lambda_extras = self . settings [ 'security_groups' ] [ 'lambda_extras' ] except KeyError : lambda_extras = [ ] security_groups = [ self . app_name ] + lambda_extras sg_ids = [ ] for security_group in security_groups : sg_id = get_security_group_id ( name = security_group , env = self . env , region = self . region ) sg_ids . append ( sg_id ) return sg_ids
Get IDs for all defined security groups .
50,059
def update_function_configuration ( self , vpc_config ) : LOG . info ( 'Updating configuration for lambda function: %s' , self . app_name ) try : self . lambda_client . update_function_configuration ( Environment = self . lambda_environment , FunctionName = self . app_name , Runtime = self . runtime , Role = self . role_arn , Handler = self . handler , Description = self . description , Timeout = int ( self . timeout ) , MemorySize = int ( self . memory ) , VpcConfig = vpc_config ) if self . concurrency_limit : self . lambda_client . put_function_concurrency ( FunctionName = self . app_name , ReservedConcurrentExecutions = self . concurrency_limit ) else : self . lambda_client . delete_function_concurrency ( FunctionName = self . app_name ) except boto3 . exceptions . botocore . exceptions . ClientError as error : if 'CreateNetworkInterface' in error . response [ 'Error' ] [ 'Message' ] : message = '{0} is missing "ec2:CreateNetworkInterface"' . format ( self . role_arn ) LOG . debug ( message ) raise SystemExit ( message ) raise LOG . info ( 'Updating Lambda function tags' ) lambda_arn = get_lambda_arn ( self . app_name , self . env , self . region ) self . lambda_client . tag_resource ( Resource = lambda_arn , Tags = { 'app_group' : self . group , 'app_name' : self . app_name } ) LOG . info ( "Successfully updated Lambda configuration." )
Update existing Lambda function configuration .
50,060
def create_function ( self , vpc_config ) : zip_file = 'lambda-holder.zip' with zipfile . ZipFile ( zip_file , mode = 'w' ) as zipped : zipped . writestr ( 'index.py' , 'print "Hello world"' ) contents = '' with open ( 'lambda-holder.zip' , 'rb' ) as openfile : contents = openfile . read ( ) LOG . info ( 'Creating lambda function: %s' , self . app_name ) try : self . lambda_client . create_function ( Environment = self . lambda_environment , FunctionName = self . app_name , Runtime = self . runtime , Role = self . role_arn , Handler = self . handler , Code = { 'ZipFile' : contents } , Description = self . description , Timeout = int ( self . timeout ) , MemorySize = int ( self . memory ) , Publish = False , VpcConfig = vpc_config , Tags = { 'app_group' : self . group , 'app_name' : self . app_name } ) except boto3 . exceptions . botocore . exceptions . ClientError as error : if 'CreateNetworkInterface' in error . response [ 'Error' ] [ 'Message' ] : message = '{0} is missing "ec2:CreateNetworkInterface"' . format ( self . role_arn ) LOG . critical ( message ) raise SystemExit ( message ) raise LOG . info ( "Successfully created Lambda function and alias" )
Create lambda function configures lambda parameters .
50,061
def create_lambda_function ( self ) : vpc_config = self . _vpc_config ( ) if self . _check_lambda ( ) : self . update_function_configuration ( vpc_config ) else : self . create_function ( vpc_config ) if self . _check_lambda_alias ( ) : self . update_alias ( ) else : self . create_alias ( )
Create or update Lambda function .
50,062
def destroy_sg ( app = '' , env = '' , region = '' , ** _ ) : vpc = get_vpc_id ( account = env , region = region ) url = '{api}/securityGroups/{env}/{region}/{app}' . format ( api = API_URL , env = env , region = region , app = app ) payload = { 'vpcId' : vpc } security_group = requests . get ( url , params = payload , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) if not security_group : LOG . info ( 'Nothing to delete.' ) else : LOG . info ( 'Found Security Group in %(region)s: %(name)s' , security_group ) destroy_request = get_template ( 'destroy/destroy_sg.json.j2' , app = app , env = env , region = region , vpc = vpc ) wait_for_task ( destroy_request ) return True
Destroy Security Group .
50,063
def destroy_s3 ( app = '' , env = 'dev' , ** _ ) : session = boto3 . Session ( profile_name = env ) client = session . resource ( 's3' ) generated = get_details ( app = app , env = env ) archaius = generated . archaius ( ) bucket = client . Bucket ( archaius [ 'bucket' ] ) for item in bucket . objects . filter ( Prefix = archaius [ 'path' ] ) : item . Object ( ) . delete ( ) LOG . info ( 'Deleted: %s/%s' , item . bucket_name , item . key ) return True
Destroy S3 Resources for _app_ in _env_ .
50,064
def main ( ) : parser = argparse . ArgumentParser ( ) add_debug ( parser ) add_app ( parser ) parser . add_argument ( '--email' , help = 'Email address to associate with application' , default = 'PS-DevOpsTooling@example.com' ) parser . add_argument ( '--project' , help = 'Git project to associate with application' , default = 'None' ) parser . add_argument ( '--repo' , help = 'Git repo to associate with application' , default = 'None' ) parser . add_argument ( '--git' , help = 'Git URI' , default = None ) args = parser . parse_args ( ) logging . basicConfig ( format = LOGGING_FORMAT ) logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) if args . git and args . git != 'None' : parsed = gogoutils . Parser ( args . git ) . parse_url ( ) generated = gogoutils . Generator ( * parsed , formats = APP_FORMATS ) project = generated . project repo = generated . repo else : project = args . project repo = args . repo spinnakerapps = SpinnakerApp ( app = args . app , email = args . email , project = project , repo = repo ) spinnakerapps . create_app ( )
Entry point for creating a Spinnaker application .
50,065
def destroy_s3_event ( app , env , region ) : generated = get_details ( app = app , env = env ) bucket = generated . s3_app_bucket ( ) session = boto3 . Session ( profile_name = env , region_name = region ) s3_client = session . client ( 's3' ) config = { } s3_client . put_bucket_notification_configuration ( Bucket = bucket , NotificationConfiguration = config ) LOG . debug ( "Deleted Lambda S3 notification" ) return True
Destroy S3 event .
50,066
def destroy_iam ( app = '' , env = 'dev' , ** _ ) : session = boto3 . Session ( profile_name = env ) client = session . client ( 'iam' ) generated = get_details ( env = env , app = app ) generated_iam = generated . iam ( ) app_details = collections . namedtuple ( 'AppDetails' , generated_iam . keys ( ) ) details = app_details ( ** generated_iam ) LOG . debug ( 'Application details: %s' , details ) resource_action ( client , action = 'remove_user_from_group' , log_format = 'Removed user from group: %(UserName)s ~> %(GroupName)s' , GroupName = details . group , UserName = details . user ) resource_action ( client , action = 'delete_user' , log_format = 'Destroyed user: %(UserName)s' , UserName = details . user ) resource_action ( client , action = 'delete_group' , log_format = 'Destroyed group: %(GroupName)s' , GroupName = details . group ) resource_action ( client , action = 'remove_role_from_instance_profile' , log_format = 'Destroyed Instance Profile from Role: ' '%(InstanceProfileName)s ~> %(RoleName)s' , InstanceProfileName = details . profile , RoleName = details . role ) resource_action ( client , action = 'delete_instance_profile' , log_format = 'Destroyed Instance Profile: %(InstanceProfileName)s' , InstanceProfileName = details . profile ) role_policies = [ ] try : role_policies = resource_action ( client , action = 'list_role_policies' , log_format = 'Found Role Policies for %(RoleName)s.' , RoleName = details . role ) [ 'PolicyNames' ] except TypeError : LOG . info ( 'Role %s not found.' , details . role ) for policy in role_policies : resource_action ( client , action = 'delete_role_policy' , log_format = 'Removed Inline Policy from Role: ' '%(PolicyName)s ~> %(RoleName)s' , RoleName = details . role , PolicyName = policy ) attached_role_policies = [ ] try : attached_role_policies = resource_action ( client , action = 'list_attached_role_policies' , log_format = 'Found attached Role Polices for %(RoleName)s.' , RoleName = details . role ) [ 'AttachedPolicies' ] except TypeError : LOG . info ( 'Role %s not found.' , details . role ) for policy in attached_role_policies : resource_action ( client , action = 'detach_role_policy' , log_format = 'Detached Policy from Role: ' '%(PolicyArn)s ~> %(RoleName)s' , RoleName = details . role , PolicyArn = policy [ 'PolicyArn' ] ) resource_action ( client , action = 'delete_role' , log_format = 'Destroyed Role: %(RoleName)s' , RoleName = details . role )
Destroy IAM Resources .
50,067
def get_role_arn ( role_name , env , region ) : session = boto3 . Session ( profile_name = env , region_name = region ) iam_client = session . client ( 'iam' ) LOG . debug ( 'Searching for %s.' , role_name ) role = iam_client . get_role ( RoleName = role_name ) role_arn = role [ 'Role' ] [ 'Arn' ] LOG . debug ( "Found role's %s ARN %s" , role_name , role_arn ) return role_arn
Get role ARN given role name .
50,068
def render_policy_template ( account_number = '' , app = 'coreforrest' , env = 'dev' , group = 'forrest' , items = None , pipeline_settings = None , region = 'us-east-1' , service = '' ) : statements = [ ] rendered_service_policy = get_template ( 'infrastructure/iam/{0}.json.j2' . format ( service ) , account_number = account_number , app = app , env = env , group = group , region = region , items = items , settings = pipeline_settings ) try : statement_block = json . loads ( rendered_service_policy ) statements . append ( statement_block ) except ValueError : LOG . debug ( 'Need to make %s template into list.' , service ) statements = json . loads ( '[{0}]' . format ( rendered_service_policy ) ) LOG . debug ( 'Rendered IAM Policy statements: %s' , statements ) return statements
Render IAM Policy template .
50,069
def construct_policy ( app = 'coreforrest' , env = 'dev' , group = 'forrest' , region = 'us-east-1' , pipeline_settings = None ) : LOG . info ( 'Create custom IAM Policy for %s.' , app ) services = pipeline_settings . get ( 'services' , { } ) LOG . debug ( 'Found requested services: %s' , services ) services = auto_service ( pipeline_settings = pipeline_settings , services = services ) if services : credential = get_env_credential ( env = env ) account_number = credential [ 'accountId' ] statements = [ ] for service , value in services . items ( ) : if value is True : items = [ ] elif isinstance ( value , str ) : items = [ value ] else : items = value rendered_statements = render_policy_template ( account_number = account_number , app = app , env = env , group = group , items = items , pipeline_settings = pipeline_settings , region = region , service = service ) statements . extend ( rendered_statements ) if statements : policy_json = get_template ( 'infrastructure/iam/wrapper.json.j2' , statements = json . dumps ( statements ) ) else : LOG . info ( 'No services defined for %s.' , app ) policy_json = None return policy_json
Assemble IAM Policy for _app_ .
50,070
def validate_gate ( ) : try : credentials = get_env_credential ( ) LOG . debug ( 'Found credentials: %s' , credentials ) LOG . info ( 'Gate working.' ) except TypeError : LOG . fatal ( 'Gate connection not valid: API_URL = %s' , API_URL )
Check Gate connection .
50,071
def create_s3_event ( app_name , env , region , bucket , triggers ) : session = boto3 . Session ( profile_name = env , region_name = region ) s3_client = session . client ( 's3' ) lambda_alias_arn = get_lambda_alias_arn ( app_name , env , region ) LOG . debug ( "Lambda ARN for lambda function %s is %s." , app_name , lambda_alias_arn ) LOG . debug ( "Creating S3 events for bucket %s" , bucket ) principal = 's3.amazonaws.com' statement_id = "{}_s3_{}" . format ( app_name , bucket ) . replace ( '.' , '' ) source_arn = "arn:aws:s3:::{}" . format ( bucket ) add_lambda_permissions ( function = lambda_alias_arn , env = env , region = region , principal = principal , statement_id = statement_id , source_arn = source_arn ) template_kwargs = { "lambda_arn" : lambda_alias_arn , "triggers" : triggers } config = get_template ( template_file = 'infrastructure/lambda/s3_event.json.j2' , ** template_kwargs ) s3_client . put_bucket_notification_configuration ( Bucket = bucket , NotificationConfiguration = json . loads ( config ) ) LOG . info ( "Created lambda %s S3 event on bucket %s" , app_name , bucket )
Create S3 lambda events from triggers
50,072
def generate_packer_filename ( provider , region , builder ) : filename = '{0}_{1}_{2}.json' . format ( provider , region , builder ) return filename
Generate a filename to be used by packer .
50,073
def get_template ( template_file = '' , ** kwargs ) : template = get_template_object ( template_file ) LOG . info ( 'Rendering template %s' , template . filename ) for key , value in kwargs . items ( ) : LOG . debug ( '%s => %s' , key , value ) rendered_json = template . render ( ** kwargs ) LOG . debug ( 'Rendered JSON:\n%s' , rendered_json ) return rendered_json
Get the Jinja2 template and renders with dict _kwargs_ .
50,074
def renumerate_stages ( pipeline ) : stages = pipeline [ 'stages' ] main_index = 0 branch_index = 0 previous_refid = '' for stage in stages : current_refid = stage [ 'refId' ] . lower ( ) if current_refid == 'master' : if main_index == 0 : stage [ 'requisiteStageRefIds' ] = [ ] else : stage [ 'requisiteStageRefIds' ] = [ str ( main_index ) ] main_index += 1 stage [ 'refId' ] = str ( main_index ) elif current_refid == 'branch' : if previous_refid == 'branch' : branch_index += 1 else : branch_index = 0 stage [ 'refId' ] = str ( ( main_index * 100 ) + branch_index ) stage [ 'requisiteStageRefIds' ] = [ str ( main_index ) ] elif current_refid == 'merge' : pass previous_refid = current_refid LOG . debug ( 'step=%(name)s\trefId=%(refId)s\t' 'requisiteStageRefIds=%(requisiteStageRefIds)s' , stage ) return pipeline
Renumber Pipeline Stage reference IDs to account for dependencies .
50,075
def post_task ( task_data , task_uri = '/tasks' ) : url = '{}/{}' . format ( API_URL , task_uri . lstrip ( '/' ) ) if isinstance ( task_data , str ) : task_json = task_data else : task_json = json . dumps ( task_data ) resp = requests . post ( url , data = task_json , headers = HEADERS , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) resp_json = resp . json ( ) LOG . debug ( resp_json ) assert resp . ok , 'Spinnaker communication error: {0}' . format ( resp . text ) return resp_json [ 'ref' ]
Create Spinnaker Task .
50,076
def _check_task ( taskid ) : try : taskurl = taskid . get ( 'ref' , '0000' ) except AttributeError : taskurl = taskid taskid = taskurl . split ( '/tasks/' ) [ - 1 ] LOG . info ( 'Checking taskid %s' , taskid ) url = '{}/tasks/{}' . format ( API_URL , taskid ) task_response = requests . get ( url , headers = HEADERS , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) LOG . debug ( task_response . json ( ) ) assert task_response . ok , 'Spinnaker communication error: {0}' . format ( task_response . text ) task_state = task_response . json ( ) status = task_state [ 'status' ] LOG . info ( 'Current task status: %s' , status ) if status == 'SUCCEEDED' : return status elif status == 'TERMINAL' : raise SpinnakerTaskError ( task_state ) else : raise ValueError
Check Spinnaker Task status .
50,077
def check_task ( taskid , timeout = DEFAULT_TASK_TIMEOUT , wait = 2 ) : max_attempts = int ( timeout / wait ) try : return retry_call ( partial ( _check_task , taskid ) , max_attempts = max_attempts , wait = wait , exceptions = ( AssertionError , ValueError ) , ) except ValueError : raise SpinnakerTaskInconclusiveError ( 'Task failed to complete in {0} seconds: {1}' . format ( timeout , taskid ) )
Wrap check_task .
50,078
def wait_for_task ( task_data , task_uri = '/tasks' ) : taskid = post_task ( task_data , task_uri ) if isinstance ( task_data , str ) : json_data = json . loads ( task_data ) else : json_data = task_data job = json_data [ 'job' ] [ 0 ] env = job . get ( 'credentials' ) task_type = job . get ( 'type' ) timeout = TASK_TIMEOUTS . get ( env , dict ( ) ) . get ( task_type , DEFAULT_TASK_TIMEOUT ) LOG . debug ( "Task %s will timeout after %s" , task_type , timeout ) return check_task ( taskid , timeout )
Run task and check the result .
50,079
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) parser = argparse . ArgumentParser ( description = main . __doc__ ) add_debug ( parser ) add_app ( parser ) add_env ( parser ) add_properties ( parser ) add_region ( parser ) add_artifact_path ( parser ) add_artifact_version ( parser ) args = parser . parse_args ( ) logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) LOG . debug ( 'Args: %s' , vars ( args ) ) rendered_props = get_properties ( args . properties ) if rendered_props [ 'pipeline' ] [ 'type' ] == 's3' : s3app = S3Apps ( app = args . app , env = args . env , region = args . region , prop_path = args . properties ) s3app . create_bucket ( ) s3deploy = S3Deployment ( app = args . app , env = args . env , region = args . region , prop_path = args . properties , artifact_path = args . artifact_path , artifact_version = args . artifact_version ) s3deploy . upload_artifacts ( ) else : init_properties ( ** vars ( args ) )
Create application . properties for a given application .
50,080
def init_properties ( env = 'dev' , app = 'unnecessary' , ** _ ) : aws_env = boto3 . session . Session ( profile_name = env ) s3client = aws_env . resource ( 's3' ) generated = get_details ( app = app , env = env ) archaius = generated . archaius ( ) archaius_file = ( '{path}/application.properties' ) . format ( path = archaius [ 'path' ] ) try : s3client . Object ( archaius [ 'bucket' ] , archaius_file ) . get ( ) LOG . info ( 'Found: %(bucket)s/%(file)s' , { 'bucket' : archaius [ 'bucket' ] , 'file' : archaius_file } ) return True except boto3 . exceptions . botocore . client . ClientError : s3client . Object ( archaius [ 'bucket' ] , archaius_file ) . put ( ) LOG . info ( 'Created: %(bucket)s/%(file)s' , { 'bucket' : archaius [ 'bucket' ] , 'file' : archaius_file } ) return False
Make sure _application . properties_ file exists in S3 .
50,081
def create_cloudwatch_event ( app_name , env , region , rules ) : session = boto3 . Session ( profile_name = env , region_name = region ) cloudwatch_client = session . client ( 'events' ) rule_name = rules . get ( 'rule_name' ) schedule = rules . get ( 'schedule' ) rule_description = rules . get ( 'rule_description' ) json_input = rules . get ( 'json_input' , { } ) if schedule is None : LOG . critical ( 'Schedule is required and no schedule is defined!' ) raise InvalidEventConfiguration ( 'Schedule is required and no schedule is defined!' ) if rule_name is None : LOG . critical ( 'Rule name is required and no rule_name is defined!' ) raise InvalidEventConfiguration ( 'Rule name is required and no rule_name is defined!' ) else : LOG . info ( '%s and %s' , app_name , rule_name ) rule_name = "{}_{}" . format ( app_name , rule_name . replace ( ' ' , '_' ) ) if rule_description is None : rule_description = "{} - {}" . format ( app_name , rule_name ) lambda_arn = get_lambda_arn ( app = app_name , account = env , region = region ) account_id = get_env_credential ( env = env ) [ 'accountId' ] principal = "events.amazonaws.com" statement_id = '{}_cloudwatch_{}' . format ( app_name , rule_name ) source_arn = 'arn:aws:events:{}:{}:rule/{}' . format ( region , account_id , rule_name ) add_lambda_permissions ( function = lambda_arn , statement_id = statement_id , action = 'lambda:InvokeFunction' , principal = principal , source_arn = source_arn , env = env , region = region , ) cloudwatch_client . put_rule ( Name = rule_name , ScheduleExpression = schedule , State = 'ENABLED' , Description = rule_description , ) targets = [ ] json_payload = '{}' . format ( json . dumps ( json_input ) ) target = { "Id" : app_name , "Arn" : lambda_arn , "Input" : json_payload , } targets . append ( target ) put_targets_response = cloudwatch_client . put_targets ( Rule = rule_name , Targets = targets ) LOG . debug ( 'Cloudwatch put targets response: %s' , put_targets_response ) LOG . info ( 'Created Cloudwatch event "%s" with schedule: %s' , rule_name , schedule )
Create cloudwatch event for lambda from rules .
50,082
def find_api_id ( self ) : allapis = self . client . get_rest_apis ( ) api_name = self . trigger_settings [ 'api_name' ] api_id = None for api in allapis [ 'items' ] : if api [ 'name' ] == api_name : api_id = api [ 'id' ] self . log . info ( "Found API for: %s" , api_name ) break else : api_id = self . create_api ( ) return api_id
Given API name find API ID .
50,083
def find_resource_ids ( self ) : all_resources = self . client . get_resources ( restApiId = self . api_id ) parent_id = None resource_id = None for resource in all_resources [ 'items' ] : if resource [ 'path' ] == "/" : parent_id = resource [ 'id' ] if resource [ 'path' ] == self . trigger_settings [ 'resource' ] : resource_id = resource [ 'id' ] self . log . info ( "Found Resource ID for: %s" , resource [ 'path' ] ) return resource_id , parent_id
Given a resource path and API Id find resource Id .
50,084
def add_lambda_integration ( self ) : lambda_uri = self . generate_uris ( ) [ 'lambda_uri' ] self . client . put_integration ( restApiId = self . api_id , resourceId = self . resource_id , httpMethod = self . trigger_settings [ 'method' ] , integrationHttpMethod = 'POST' , uri = lambda_uri , type = 'AWS' ) self . add_integration_response ( ) self . log . info ( "Successfully added Lambda intergration to API" )
Attach lambda found to API .
50,085
def add_integration_response ( self ) : self . client . put_integration_response ( restApiId = self . api_id , resourceId = self . resource_id , httpMethod = self . trigger_settings [ 'method' ] , statusCode = '200' , responseTemplates = { 'application/json' : '' } )
Add an intergation response to the API for the lambda integration .
50,086
def add_permission ( self ) : statement_id = '{}_api_{}' . format ( self . app_name , self . trigger_settings [ 'api_name' ] ) principal = 'apigateway.amazonaws.com' lambda_alias_arn = get_lambda_alias_arn ( self . app_name , self . env , self . region ) lambda_unqualified_arn = get_lambda_arn ( self . app_name , self . env , self . region ) resource_name = self . trigger_settings . get ( 'resource' , '' ) resource_name = resource_name . replace ( '/' , '' ) method_api_source_arn = 'arn:aws:execute-api:{}:{}:{}/{}/{}/{}' . format ( self . region , self . account_id , self . api_id , self . env , self . trigger_settings [ 'method' ] , resource_name ) global_api_source_arn = 'arn:aws:execute-api:{}:{}:{}/*/*/{}' . format ( self . region , self . account_id , self . api_id , resource_name ) add_lambda_permissions ( function = lambda_alias_arn , statement_id = statement_id + self . trigger_settings [ 'method' ] , action = 'lambda:InvokeFunction' , principal = principal , env = self . env , region = self . region , source_arn = method_api_source_arn ) add_lambda_permissions ( function = lambda_alias_arn , statement_id = statement_id , action = 'lambda:InvokeFunction' , principal = principal , env = self . env , region = self . region , source_arn = global_api_source_arn ) add_lambda_permissions ( function = lambda_unqualified_arn , statement_id = statement_id + self . trigger_settings [ 'method' ] , action = 'lambda:InvokeFunction' , principal = principal , env = self . env , region = self . region , source_arn = method_api_source_arn ) add_lambda_permissions ( function = lambda_unqualified_arn , statement_id = statement_id , action = 'lambda:InvokeFunction' , principal = principal , env = self . env , region = self . region , source_arn = global_api_source_arn )
Add permission to Lambda for the API Trigger .
50,087
def create_api_deployment ( self ) : try : self . client . create_deployment ( restApiId = self . api_id , stageName = self . env ) self . log . info ( 'Created a deployment resource.' ) except botocore . exceptions . ClientError as error : error_code = error . response [ 'Error' ] [ 'Code' ] if error_code == 'TooManyRequestsException' : self . log . debug ( 'Retrying. We have hit api limit.' ) else : self . log . debug ( 'Retrying. We received %s.' , error_code )
Create API deployment of ENV name .
50,088
def create_api_key ( self ) : apikeys = self . client . get_api_keys ( ) for key in apikeys [ 'items' ] : if key [ 'name' ] == self . app_name : self . log . info ( "Key %s already exists" , self . app_name ) break else : self . client . create_api_key ( name = self . app_name , enabled = True , stageKeys = [ { 'restApiId' : self . api_id , 'stageName' : self . env } ] ) self . log . info ( "Successfully created API Key %s. Look in the AWS console for the key" , self . app_name )
Create API Key for API access .
50,089
def _format_base_path ( self , api_name ) : name = self . app_name if self . app_name != api_name : name = '{0}-{1}' . format ( self . app_name , api_name ) return name
Format the base path name .
50,090
def update_api_mappings ( self ) : response_provider = None response_action = None domain = self . generated . apigateway ( ) [ 'domain' ] try : response_provider = self . client . create_base_path_mapping ( domainName = domain , basePath = self . _format_base_path ( self . trigger_settings [ 'api_name' ] ) , restApiId = self . api_id , stage = self . env , ) response_action = 'API mapping added.' except botocore . exceptions . ClientError as error : error_code = error . response [ 'Error' ] [ 'Code' ] if error_code == 'ConflictException' : response_action = 'API mapping already exist.' else : response_action = 'Unknown error: {0}' . format ( error_code ) self . log . debug ( 'Provider response: %s' , response_provider ) self . log . info ( response_action ) return response_provider
Create a cname for the API deployment .
50,091
def generate_uris ( self ) : lambda_arn = "arn:aws:execute-api:{0}:{1}:{2}/*/{3}/{4}" . format ( self . region , self . account_id , self . api_id , self . trigger_settings [ 'method' ] , self . trigger_settings [ 'resource' ] ) lambda_uri = ( "arn:aws:apigateway:{0}:lambda:path/{1}/functions/" "arn:aws:lambda:{0}:{2}:function:{3}/invocations" ) . format ( self . region , self . api_version , self . account_id , self . app_name ) api_dns = "https://{0}.execute-api.{1}.amazonaws.com/{2}" . format ( self . api_id , self . region , self . env ) uri_dict = { 'lambda_arn' : lambda_arn , 'lambda_uri' : lambda_uri , 'api_dns' : api_dns } return uri_dict
Generate several lambda uris .
50,092
def create_api ( self ) : created_api = self . client . create_rest_api ( name = self . trigger_settings . get ( 'api_name' , self . app_name ) ) api_id = created_api [ 'id' ] self . log . info ( "Successfully created API" ) return api_id
Create the REST API .
50,093
def create_resource ( self , parent_id = "" ) : resource_name = self . trigger_settings . get ( 'resource' , '' ) resource_name = resource_name . replace ( '/' , '' ) if not self . resource_id : created_resource = self . client . create_resource ( restApiId = self . api_id , parentId = parent_id , pathPart = resource_name ) self . resource_id = created_resource [ 'id' ] self . log . info ( "Successfully created resource" ) else : self . log . info ( "Resource already exists. To update resource please delete existing resource: %s" , resource_name )
Create the specified resource .
50,094
def attach_method ( self , resource_id ) : try : _response = self . client . put_method ( restApiId = self . api_id , resourceId = resource_id , httpMethod = self . trigger_settings [ 'method' ] , authorizationType = "NONE" , apiKeyRequired = False , ) self . log . debug ( 'Response for resource (%s) push authorization: %s' , resource_id , _response ) _response = self . client . put_method_response ( restApiId = self . api_id , resourceId = resource_id , httpMethod = self . trigger_settings [ 'method' ] , statusCode = '200' ) self . log . debug ( 'Response for resource (%s) no authorization: %s' , resource_id , _response ) self . log . info ( "Successfully attached method: %s" , self . trigger_settings [ 'method' ] ) except botocore . exceptions . ClientError : self . log . info ( "Method %s already exists" , self . trigger_settings [ 'method' ] )
Attach the defined method .
50,095
def setup_lambda_api ( self ) : self . create_resource ( self . parent_id ) self . attach_method ( self . resource_id ) self . add_lambda_integration ( ) self . add_permission ( ) self . create_api_deployment ( ) self . create_api_key ( ) self . update_api_mappings ( )
A wrapper for all the steps needed to setup the integration .
50,096
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) log = logging . getLogger ( __name__ ) parser = argparse . ArgumentParser ( ) add_debug ( parser ) add_app ( parser ) add_env ( parser ) add_region ( parser ) add_properties ( parser ) parser . add_argument ( "--elb-subnet" , help = "Subnetnet type, e.g. external, internal" , required = True ) args = parser . parse_args ( ) logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) log . debug ( 'Parsed arguments: %s' , args ) spinnakerapps = SpinnakerDns ( app = args . app , env = args . env , region = args . region , prop_path = args . properties , elb_subnet = args . elb_subnet ) spinnakerapps . create_elb_dns ( )
Run newer stuffs .
50,097
def _validate_cidr ( self , rule ) : try : network = ipaddress . IPv4Network ( rule [ 'app' ] ) except ( ipaddress . NetmaskValueError , ValueError ) as error : raise SpinnakerSecurityGroupCreationFailed ( error ) self . log . debug ( 'Validating CIDR: %s' , network . exploded ) return True
Validate the cidr block in a rule .
50,098
def _process_rules ( self , rules ) : cidr = [ ] non_cidr = [ ] for rule in rules : if '.' in rule [ 'app' ] : self . log . debug ( 'Custom CIDR rule: %s' , rule ) self . _validate_cidr ( rule ) cidr . append ( rule ) else : self . log . debug ( 'SG reference rule: %s' , rule ) non_cidr . append ( rule ) self . log . debug ( 'Custom CIDR rules: %s' , cidr ) self . log . debug ( 'SG reference rules: %s' , non_cidr ) return non_cidr , cidr
Process rules into cidr and non - cidr lists .
50,099
def add_tags ( self ) : session = boto3 . session . Session ( profile_name = self . env , region_name = self . region ) resource = session . resource ( 'ec2' ) group_id = get_security_group_id ( self . app_name , self . env , self . region ) security_group = resource . SecurityGroup ( group_id ) try : tag = security_group . create_tags ( DryRun = False , Tags = [ { 'Key' : 'app_group' , 'Value' : self . group } , { 'Key' : 'app_name' , 'Value' : self . app_name } ] ) self . log . debug ( 'Security group has been tagged: %s' , tag ) except botocore . exceptions . ClientError as error : self . log . warning ( error ) return True
Add tags to security group .