idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
50,100
def add_cidr_rules ( self , rules ) : session = boto3 . session . Session ( profile_name = self . env , region_name = self . region ) client = session . client ( 'ec2' ) group_id = get_security_group_id ( self . app_name , self . env , self . region ) for rule in rules : data = { 'DryRun' : False , 'GroupId' : group_id , 'IpPermissions' : [ { 'IpProtocol' : rule [ 'protocol' ] , 'FromPort' : rule [ 'start_port' ] , 'ToPort' : rule [ 'end_port' ] , 'IpRanges' : [ { 'CidrIp' : rule [ 'app' ] } ] } ] } self . log . debug ( 'Security Group rule: %s' , data ) try : client . authorize_security_group_ingress ( ** data ) except botocore . exceptions . ClientError as error : if 'InvalidPermission.Duplicate' in str ( error ) : self . log . debug ( 'Duplicate rule exist, that is OK.' ) else : msg = 'Unable to add cidr rules to {}' . format ( rule . get ( 'app' ) ) self . log . error ( msg ) raise SpinnakerSecurityGroupError ( msg ) return True
Add cidr rules to security group via boto .
50,101
def update_default_rules ( self ) : app_ingress = self . properties [ 'security_group' ] [ 'ingress' ] ingress = conservative_merger . merge ( DEFAULT_SECURITYGROUP_RULES , app_ingress ) resolved_ingress = self . resolve_self_references ( ingress ) self . log . info ( 'Updated default rules:\n%s' , ingress ) return resolved_ingress
Concatinate application and global security group rules .
50,102
def _create_security_group ( self , ingress ) : template_kwargs = { 'app' : self . app_name , 'env' : self . env , 'region' : self . region , 'vpc' : get_vpc_id ( self . env , self . region ) , 'description' : self . properties [ 'security_group' ] [ 'description' ] , 'ingress' : ingress , } secgroup_json = get_template ( template_file = 'infrastructure/securitygroup_data.json.j2' , formats = self . generated , ** template_kwargs ) wait_for_task ( secgroup_json ) return True
Send a POST to spinnaker to create a new security group .
50,103
def create_security_group ( self ) : ingress_rules = [ ] try : security_id = get_security_group_id ( name = self . app_name , env = self . env , region = self . region ) except ( SpinnakerSecurityGroupError , AssertionError ) : self . _create_security_group ( ingress_rules ) else : self . log . debug ( 'Security Group ID %s found for %s.' , security_id , self . app_name ) try : ingress = self . update_default_rules ( ) except KeyError : msg = 'Possible missing configuration for "{0}".' . format ( self . env ) self . log . error ( msg ) raise ForemastConfigurationFileError ( msg ) for app in ingress : rules = ingress [ app ] for rule in rules : ingress_rule = self . create_ingress_rule ( app , rule ) ingress_rules . append ( ingress_rule ) ingress_rules_no_cidr , ingress_rules_cidr = self . _process_rules ( ingress_rules ) self . _create_security_group ( ingress_rules_no_cidr ) self . add_cidr_rules ( ingress_rules_cidr ) self . add_tags ( ) self . log . info ( 'Successfully created %s security group' , self . app_name ) return True
Send a POST to spinnaker to create or update a security group .
50,104
def create_ingress_rule ( self , app , rule ) : if isinstance ( rule , dict ) : start_port = rule . get ( 'start_port' ) end_port = rule . get ( 'end_port' ) protocol = rule . get ( 'protocol' , 'tcp' ) requested_cross_account = rule . get ( 'env' , self . env ) if self . env == requested_cross_account : cross_account_env = None cross_account_vpc_id = None else : cross_account_env = requested_cross_account cross_account_vpc_id = get_vpc_id ( cross_account_env , self . region ) else : start_port = rule end_port = rule protocol = 'tcp' cross_account_env = None cross_account_vpc_id = None created_rule = { 'app' : app , 'start_port' : start_port , 'end_port' : end_port , 'protocol' : protocol , 'cross_account_env' : cross_account_env , 'cross_account_vpc_id' : cross_account_vpc_id } self . log . debug ( 'Normalized ingress rule: %s' , created_rule ) return created_rule
Create a normalized ingress rule .
50,105
def get_lambda_arn ( app , account , region ) : session = boto3 . Session ( profile_name = account , region_name = region ) lambda_client = session . client ( 'lambda' ) lambda_arn = None paginator = lambda_client . get_paginator ( 'list_functions' ) for lambda_functions in paginator . paginate ( ) : for lambda_function in lambda_functions [ 'Functions' ] : if lambda_function [ 'FunctionName' ] == app : lambda_arn = lambda_function [ 'FunctionArn' ] LOG . debug ( "Lambda ARN for lambda function %s is %s." , app , lambda_arn ) break if lambda_arn : break if not lambda_arn : LOG . fatal ( 'Lambda function with name %s not found in %s %s' , app , account , region ) raise LambdaFunctionDoesNotExist ( 'Lambda function with name {0} not found in {1} {2}' . format ( app , account , region ) ) return lambda_arn
Get lambda ARN .
50,106
def get_lambda_alias_arn ( app , account , region ) : session = boto3 . Session ( profile_name = account , region_name = region ) lambda_client = session . client ( 'lambda' ) lambda_aliases = lambda_client . list_aliases ( FunctionName = app ) matched_alias = None for alias in lambda_aliases [ 'Aliases' ] : if alias [ 'Name' ] == account : lambda_alias_arn = alias [ 'AliasArn' ] LOG . info ( 'Found ARN for alias %s for function %s' , account , app ) matched_alias = lambda_alias_arn break else : fatal_message = 'Lambda alias {0} of function {1} not found' . format ( account , app ) LOG . fatal ( fatal_message ) raise LambdaAliasDoesNotExist ( fatal_message ) return matched_alias
Get lambda alias ARN . Assumes that account name is equal to alias name .
50,107
def add_lambda_permissions ( function = '' , statement_id = '' , action = 'lambda:InvokeFunction' , principal = '' , source_arn = '' , env = '' , region = 'us-east-1' ) : session = boto3 . Session ( profile_name = env , region_name = region ) lambda_client = session . client ( 'lambda' ) response_action = None prefixed_sid = FOREMAST_PREFIX + statement_id add_permissions_kwargs = { 'FunctionName' : function , 'StatementId' : prefixed_sid , 'Action' : action , 'Principal' : principal , } if source_arn : add_permissions_kwargs [ 'SourceArn' ] = source_arn try : lambda_client . add_permission ( ** add_permissions_kwargs ) response_action = 'Add permission with Sid: {}' . format ( prefixed_sid ) except boto3 . exceptions . botocore . exceptions . ClientError as error : LOG . debug ( 'Add permission error: %s' , error ) response_action = "Did not add permissions" LOG . debug ( 'Related StatementId (SID): %s' , prefixed_sid ) LOG . info ( response_action )
Add permission to Lambda for the event trigger .
50,108
def resource_action ( client , action = '' , log_format = 'item: %(key)s' , ** kwargs ) : result = None try : result = getattr ( client , action ) ( ** kwargs ) LOG . info ( log_format , kwargs ) except botocore . exceptions . ClientError as error : error_code = error . response [ 'Error' ] [ 'Code' ] if error_code == 'AccessDenied' : LOG . fatal ( error ) raise elif error_code == 'EntityAlreadyExists' : LOG . info ( ' ' . join ( ( 'Found' , log_format ) ) , kwargs ) else : LOG . fatal ( error ) return result
Call _action_ using boto3 _client_ with _kwargs_ .
50,109
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) parser = argparse . ArgumentParser ( description = 'Example with non-optional arguments' ) add_debug ( parser ) add_app ( parser ) add_env ( parser ) add_region ( parser ) add_properties ( parser ) args = parser . parse_args ( ) logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) elb = SpinnakerELB ( app = args . app , env = args . env , region = args . region , prop_path = args . properties ) elb . create_elb ( )
Entry point for ELB creation
50,110
def make_elb_json ( self ) : env = self . env region = self . region elb_settings = self . properties [ 'elb' ] LOG . debug ( 'Block ELB Settings:\n%s' , pformat ( elb_settings ) ) health_settings = elb_settings [ 'health' ] elb_subnet_purpose = elb_settings . get ( 'subnet_purpose' , 'internal' ) region_subnets = get_subnets ( target = 'elb' , purpose = elb_subnet_purpose , env = env , region = region ) region_subnets . pop ( "subnet_ids" , None ) if elb_subnet_purpose == 'internal' : is_internal = 'true' else : is_internal = 'false' target = elb_settings . get ( 'target' , 'HTTP:80/health' ) health = splay_health ( target ) listeners = format_listeners ( elb_settings = elb_settings , env = self . env , region = region ) idle_timeout = elb_settings . get ( 'idle_timeout' , None ) access_log = elb_settings . get ( 'access_log' , { } ) connection_draining_timeout = elb_settings . get ( 'connection_draining_timeout' , None ) security_groups = DEFAULT_ELB_SECURITYGROUPS [ env ] security_groups . append ( self . app ) security_groups . extend ( self . properties [ 'security_group' ] [ 'elb_extras' ] ) security_groups = remove_duplicate_sg ( security_groups ) template_kwargs = { 'access_log' : json . dumps ( access_log ) , 'app_name' : self . app , 'availability_zones' : json . dumps ( region_subnets ) , 'connection_draining_timeout' : json . dumps ( connection_draining_timeout ) , 'env' : env , 'hc_string' : target , 'health_interval' : health_settings [ 'interval' ] , 'health_path' : health . path , 'health_port' : health . port , 'health_protocol' : health . proto , 'health_timeout' : health_settings [ 'timeout' ] , 'healthy_threshold' : health_settings [ 'threshold' ] , 'idle_timeout' : json . dumps ( idle_timeout ) , 'isInternal' : is_internal , 'listeners' : json . dumps ( listeners ) , 'region_zones' : json . dumps ( region_subnets [ region ] ) , 'region' : region , 'security_groups' : json . dumps ( security_groups ) , 'subnet_type' : elb_subnet_purpose , 'unhealthy_threshold' : health_settings [ 'unhealthy_threshold' ] , 'vpc_id' : get_vpc_id ( env , region ) , } rendered_template = get_template ( template_file = 'infrastructure/elb_data.json.j2' , ** template_kwargs ) return rendered_template
Render the JSON template with arguments .
50,111
def create_elb ( self ) : json_data = self . make_elb_json ( ) LOG . debug ( 'Block ELB JSON Data:\n%s' , pformat ( json_data ) ) wait_for_task ( json_data ) self . add_listener_policy ( json_data ) self . add_backend_policy ( json_data ) self . configure_attributes ( json_data )
Create or Update the ELB after rendering JSON data from configs . Asserts that the ELB task was successful .
50,112
def add_listener_policy ( self , json_data ) : env = boto3 . session . Session ( profile_name = self . env , region_name = self . region ) elbclient = env . client ( 'elb' ) stickiness = { } elb_settings = self . properties [ 'elb' ] if elb_settings . get ( 'ports' ) : ports = elb_settings [ 'ports' ] for listener in ports : if listener . get ( "stickiness" ) : stickiness = self . add_stickiness ( ) LOG . info ( 'Stickiness Found: %s' , stickiness ) break for job in json . loads ( json_data ) [ 'job' ] : for listener in job [ 'listeners' ] : policies = [ ] ext_port = listener [ 'externalPort' ] if listener [ 'listenerPolicies' ] : policies . extend ( listener [ 'listenerPolicies' ] ) if stickiness . get ( ext_port ) : policies . append ( stickiness . get ( ext_port ) ) if policies : LOG . info ( 'Adding listener policies: %s' , policies ) elbclient . set_load_balancer_policies_of_listener ( LoadBalancerName = self . app , LoadBalancerPort = ext_port , PolicyNames = policies )
Attaches listerner policies to an ELB
50,113
def add_backend_policy ( self , json_data ) : env = boto3 . session . Session ( profile_name = self . env , region_name = self . region ) elbclient = env . client ( 'elb' ) for job in json . loads ( json_data ) [ 'job' ] : for listener in job [ 'listeners' ] : instance_port = listener [ 'internalPort' ] backend_policy_list = listener [ 'backendPolicies' ] if backend_policy_list : LOG . info ( 'Adding backend server policies: %s' , backend_policy_list ) elbclient . set_load_balancer_policies_for_backend_server ( LoadBalancerName = self . app , InstancePort = instance_port , PolicyNames = backend_policy_list )
Attaches backend server policies to an ELB
50,114
def add_stickiness ( self ) : stickiness_dict = { } env = boto3 . session . Session ( profile_name = self . env , region_name = self . region ) elbclient = env . client ( 'elb' ) elb_settings = self . properties [ 'elb' ] for listener in elb_settings . get ( 'ports' ) : if listener . get ( "stickiness" ) : sticky_type = listener [ 'stickiness' ] [ 'type' ] . lower ( ) externalport = int ( listener [ 'loadbalancer' ] . split ( ":" ) [ - 1 ] ) policyname_tmp = "{0}-{1}-{2}-{3}" if sticky_type == 'app' : cookiename = listener [ 'stickiness' ] [ 'cookie_name' ] policy_key = cookiename . replace ( '.' , '' ) policyname = policyname_tmp . format ( self . app , sticky_type , externalport , policy_key ) elbclient . create_app_cookie_stickiness_policy ( LoadBalancerName = self . app , PolicyName = policyname , CookieName = cookiename ) stickiness_dict [ externalport ] = policyname elif sticky_type == 'elb' : cookie_ttl = listener [ 'stickiness' ] . get ( 'cookie_ttl' , None ) policyname = policyname_tmp . format ( self . app , sticky_type , externalport , cookie_ttl ) if cookie_ttl : elbclient . create_lb_cookie_stickiness_policy ( LoadBalancerName = self . app , PolicyName = policyname , CookieExpirationPeriod = cookie_ttl ) else : elbclient . create_lb_cookie_stickiness_policy ( LoadBalancerName = self . app , PolicyName = policyname ) stickiness_dict [ externalport ] = policyname return stickiness_dict
Adds stickiness policy to created ELB
50,115
def configure_attributes ( self , json_data ) : env = boto3 . session . Session ( profile_name = self . env , region_name = self . region ) elbclient = env . client ( 'elb' ) elb_settings = self . properties [ 'elb' ] LOG . debug ( 'Block ELB Settings Pre Configure Load Balancer Attributes:\n%s' , pformat ( elb_settings ) ) for job in json . loads ( json_data ) [ 'job' ] : load_balancer_attributes = { 'CrossZoneLoadBalancing' : { 'Enabled' : True } , 'AccessLog' : { 'Enabled' : False , } , 'ConnectionDraining' : { 'Enabled' : False , } , 'ConnectionSettings' : { 'IdleTimeout' : 60 } } if elb_settings . get ( 'connection_draining_timeout' ) : connection_draining_timeout = int ( elb_settings [ 'connection_draining_timeout' ] ) LOG . info ( 'Applying Custom Load Balancer Connection Draining Timeout: %d' , connection_draining_timeout ) load_balancer_attributes [ 'ConnectionDraining' ] = { 'Enabled' : True , 'Timeout' : connection_draining_timeout } if elb_settings . get ( 'idle_timeout' ) : idle_timeout = int ( elb_settings [ 'idle_timeout' ] ) LOG . info ( 'Applying Custom Load Balancer Idle Timeout: %d' , idle_timeout ) load_balancer_attributes [ 'ConnectionSettings' ] = { 'IdleTimeout' : idle_timeout } if elb_settings . get ( 'access_log' ) : access_log_bucket_name = elb_settings [ 'access_log' ] [ 'bucket_name' ] access_log_bucket_prefix = elb_settings [ 'access_log' ] [ 'bucket_prefix' ] access_log_emit_interval = int ( elb_settings [ 'access_log' ] [ 'emit_interval' ] ) LOG . info ( 'Applying Custom Load Balancer Access Log: %s/%s every %d minutes' , access_log_bucket_name , access_log_bucket_prefix , access_log_emit_interval ) load_balancer_attributes [ 'AccessLog' ] = { 'Enabled' : True , 'S3BucketName' : access_log_bucket_name , 'EmitInterval' : access_log_emit_interval , 'S3BucketPrefix' : access_log_bucket_prefix } LOG . info ( 'Applying Load Balancer Attributes' ) LOG . debug ( 'Load Balancer Attributes:\n%s' , pformat ( load_balancer_attributes ) ) elbclient . modify_load_balancer_attributes ( LoadBalancerName = self . app , LoadBalancerAttributes = load_balancer_attributes )
Configure load balancer attributes such as idle timeout connection draining etc
50,116
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) parser = argparse . ArgumentParser ( description = main . __doc__ ) add_debug ( parser ) parser . add_argument ( '-o' , '--output' , required = True , help = 'Name of environment file to append to' ) parser . add_argument ( '-g' , '--git-short' , metavar = 'GROUP/PROJECT' , required = True , help = 'Short name for Git, e.g. forrest/core' ) parser . add_argument ( '-r' , '--runway-dir' , help = 'Runway directory with app.json files, requires --git-short' ) args = parser . parse_args ( ) LOG . setLevel ( args . debug ) logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) generated = gogoutils . Generator ( * gogoutils . Parser ( args . git_short ) . parse_url ( ) , formats = APP_FORMATS ) git_short = generated . gitlab ( ) [ 'main' ] if args . runway_dir : configs = process_runway_configs ( runway_dir = args . runway_dir ) else : configs = process_git_configs ( git_short = git_short ) write_variables ( app_configs = configs , out_file = args . output , git_short = git_short )
Append Application Configurations to a given file in multiple formats .
50,117
def add_infra ( subparsers ) : infra_parser = subparsers . add_parser ( 'infra' , help = runner . prepare_infrastructure . __doc__ ) infra_parser . set_defaults ( func = runner . prepare_infrastructure )
Infrastructure subcommands .
50,118
def add_pipeline ( subparsers ) : pipeline_parser = subparsers . add_parser ( 'pipeline' , help = add_pipeline . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter ) pipeline_parser . set_defaults ( func = pipeline_parser . print_help ) pipeline_subparsers = pipeline_parser . add_subparsers ( title = 'Pipelines' ) pipeline_full_parser = pipeline_subparsers . add_parser ( 'app' , help = runner . prepare_app_pipeline . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter ) pipeline_full_parser . set_defaults ( func = runner . prepare_app_pipeline ) pipeline_onetime_parser = pipeline_subparsers . add_parser ( 'onetime' , help = runner . prepare_onetime_pipeline . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter ) pipeline_onetime_parser . set_defaults ( func = runner . prepare_onetime_pipeline ) add_env ( pipeline_onetime_parser )
Pipeline subcommands .
50,119
def add_rebuild ( subparsers ) : rebuild_parser = subparsers . add_parser ( 'rebuild' , help = runner . rebuild_pipelines . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter ) rebuild_parser . set_defaults ( func = runner . rebuild_pipelines ) rebuild_parser . add_argument ( '-a' , '--all' , action = 'store_true' , help = 'Rebuild all Pipelines' ) rebuild_parser . add_argument ( 'project' , nargs = '?' , default = os . getenv ( 'REBUILD_PROJECT' ) , help = 'Project to rebuild, overrides $REBUILD_PROJECT' )
Rebuild Pipeline subcommands .
50,120
def add_autoscaling ( subparsers ) : autoscaling_parser = subparsers . add_parser ( 'autoscaling' , help = runner . create_scaling_policy . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter ) autoscaling_parser . set_defaults ( func = runner . create_scaling_policy )
Auto Scaling Group Policy subcommands .
50,121
def add_validate ( subparsers ) : validate_parser = subparsers . add_parser ( 'validate' , help = add_validate . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter ) validate_parser . set_defaults ( func = validate_parser . print_help ) validate_subparsers = validate_parser . add_subparsers ( title = 'Testers' ) validate_all_parser = validate_subparsers . add_parser ( 'all' , help = validate . validate_all . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter ) validate_all_parser . set_defaults ( func = validate . validate_all ) validate_gate_parser = validate_subparsers . add_parser ( 'gate' , help = validate . validate_gate . __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter ) validate_gate_parser . set_defaults ( func = validate . validate_gate )
Validate Spinnaker setup .
50,122
def get_existing_pipelines ( self ) : url = "{0}/applications/{1}/pipelineConfigs" . format ( API_URL , self . app_name ) resp = requests . get ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) assert resp . ok , 'Failed to lookup pipelines for {0}: {1}' . format ( self . app_name , resp . text ) return resp . json ( )
Get existing pipeline configs for specific application .
50,123
def compare_with_existing ( self , region = 'us-east-1' , onetime = False ) : pipelines = self . get_existing_pipelines ( ) pipeline_id = None found = False for pipeline in pipelines : correct_app_and_region = ( pipeline [ 'application' ] == self . app_name ) and ( region in pipeline [ 'name' ] ) if onetime : onetime_str = "(onetime-{})" . format ( self . environments [ 0 ] ) if correct_app_and_region and onetime_str in pipeline [ 'name' ] : found = True elif correct_app_and_region : found = True if found : self . log . info ( 'Existing pipeline found - %s' , pipeline [ 'name' ] ) pipeline_id = pipeline [ 'id' ] break else : self . log . info ( 'No existing pipeline found' ) return pipeline_id
Compare desired pipeline with existing pipelines .
50,124
def create_pipeline ( self ) : clean_pipelines ( app = self . app_name , settings = self . settings ) pipeline_envs = self . environments self . log . debug ( 'Envs from pipeline.json: %s' , pipeline_envs ) regions_envs = collections . defaultdict ( list ) for env in pipeline_envs : for region in self . settings [ env ] [ 'regions' ] : regions_envs [ region ] . append ( env ) self . log . info ( 'Environments and Regions for Pipelines:\n%s' , json . dumps ( regions_envs , indent = 4 ) ) subnets = None pipelines = { } for region , envs in regions_envs . items ( ) : self . generated . data . update ( { 'region' : region , } ) pipelines [ region ] = self . render_wrapper ( region = region ) previous_env = None for env in envs : self . generated . data . update ( { 'env' : env , } ) pipeline_block_data = { "env" : env , "generated" : self . generated , "previous_env" : previous_env , "region" : region , "settings" : self . settings [ env ] [ region ] , "pipeline_data" : self . settings [ 'pipeline' ] , } if self . settings [ 'pipeline' ] [ 'type' ] in EC2_PIPELINE_TYPES : if not subnets : subnets = get_subnets ( ) try : region_subnets = { region : subnets [ env ] [ region ] } except KeyError : self . log . info ( '%s is not available for %s.' , env , region ) continue pipeline_block_data [ 'region_subnets' ] = region_subnets block = construct_pipeline_block ( ** pipeline_block_data ) pipelines [ region ] [ 'stages' ] . extend ( json . loads ( block ) ) previous_env = env self . log . debug ( 'Assembled Pipelines:\n%s' , pformat ( pipelines ) ) for region , pipeline in pipelines . items ( ) : renumerate_stages ( pipeline ) self . post_pipeline ( pipeline ) return True
Main wrapper for pipeline creation . 1 . Runs clean_pipelines to clean up existing ones 2 . determines which environments the pipeline needs 3 . gets all subnets for template rendering 4 . Renders all of the pipeline blocks as defined in configs 5 . Runs post_pipeline to create pipeline
50,125
def ami_lookup ( region = 'us-east-1' , name = 'tomcat8' ) : if AMI_JSON_URL : ami_dict = _get_ami_dict ( AMI_JSON_URL ) ami_id = ami_dict [ region ] [ name ] elif GITLAB_TOKEN : warn_user ( 'Use AMI_JSON_URL feature instead.' ) ami_contents = _get_ami_file ( region = region ) ami_dict = json . loads ( ami_contents ) ami_id = ami_dict [ name ] else : ami_id = name LOG . info ( 'Using AMI: %s' , ami_id ) return ami_id
Look up AMI ID .
50,126
def _get_ami_file ( region = 'us-east-1' ) : LOG . info ( "Getting AMI from Gitlab" ) lookup = FileLookup ( git_short = 'devops/ansible' ) filename = 'scripts/{0}.json' . format ( region ) ami_contents = lookup . remote_file ( filename = filename , branch = 'master' ) LOG . debug ( 'AMI file contents in %s: %s' , filename , ami_contents ) return ami_contents
Get file from Gitlab .
50,127
def _get_ami_dict ( json_url ) : LOG . info ( "Getting AMI from %s" , json_url ) response = requests . get ( json_url ) assert response . ok , "Error getting ami info from {}" . format ( json_url ) ami_dict = response . json ( ) LOG . debug ( 'AMI json contents: %s' , ami_dict ) return ami_dict
Get ami from a web url .
50,128
def get_gitlab_project ( self ) : self . server = gitlab . Gitlab ( GIT_URL , private_token = GITLAB_TOKEN , api_version = 4 ) project = self . server . projects . get ( self . git_short ) if not project : raise GitLabApiError ( 'Could not get Project "{0}" from GitLab API.' . format ( self . git_short ) ) self . project = project return self . project
Get numerical GitLab Project ID .
50,129
def local_file ( self , filename ) : LOG . info ( 'Retrieving "%s" from "%s".' , filename , self . runway_dir ) file_contents = '' file_path = os . path . join ( self . runway_dir , filename ) try : with open ( file_path , 'rt' ) as lookup_file : file_contents = lookup_file . read ( ) except FileNotFoundError : LOG . warning ( 'File missing "%s".' , file_path ) raise LOG . debug ( 'Local file contents:\n%s' , file_contents ) return file_contents
Read the local file in _self . runway_dir_ .
50,130
def remote_file ( self , branch = 'master' , filename = '' ) : LOG . info ( 'Retrieving "%s" from "%s".' , filename , self . git_short ) file_contents = '' try : file_blob = self . project . files . get ( file_path = filename , ref = branch ) except gitlab . exceptions . GitlabGetError : file_blob = None LOG . debug ( 'GitLab file response:\n%s' , file_blob ) if not file_blob : msg = 'Project "{0}" is missing file "{1}" in "{2}" branch.' . format ( self . git_short , filename , branch ) LOG . warning ( msg ) raise FileNotFoundError ( msg ) else : file_contents = b64decode ( file_blob . content ) . decode ( ) LOG . debug ( 'Remote file contents:\n%s' , file_contents ) return file_contents
Read the remote file on Git Server .
50,131
def banner ( text , border = '=' , width = 80 ) : text_padding = '{0:^%d}' % ( width ) LOG . info ( border * width ) LOG . info ( text_padding . format ( text ) ) LOG . info ( border * width )
Center _text_ in a banner _width_ wide with _border_ characters .
50,132
def get_sns_topic_arn ( topic_name , account , region ) : if topic_name . count ( ':' ) == 5 and topic_name . startswith ( 'arn:aws:sns:' ) : return topic_name session = boto3 . Session ( profile_name = account , region_name = region ) sns_client = session . client ( 'sns' ) topics = sns_client . list_topics ( ) [ 'Topics' ] matched_topic = None for topic in topics : topic_arn = topic [ 'TopicArn' ] if topic_name == topic_arn . split ( ':' ) [ - 1 ] : matched_topic = topic_arn break else : LOG . critical ( "No topic with name %s found." , topic_name ) raise SNSTopicNotFound ( 'No topic with name {0} found' . format ( topic_name ) ) return matched_topic
Get SNS topic ARN .
50,133
def notify_slack_channel ( self ) : message = get_template ( template_file = 'slack/pipeline-prepare-ran.j2' , info = self . info ) if self . settings [ 'pipeline' ] [ 'notifications' ] [ 'slack' ] : post_slack_message ( message = message , channel = self . settings [ 'pipeline' ] [ 'notifications' ] [ 'slack' ] , username = 'pipeline-bot' , icon_emoji = ':gear:' )
Post message to a defined Slack channel .
50,134
def get_properties ( properties_file = 'raw.properties.json' , env = None , region = None ) : with open ( properties_file , 'rt' ) as file_handle : properties = json . load ( file_handle ) env_properties = properties . get ( env , properties ) contents = env_properties . get ( region , env_properties ) LOG . debug ( 'Found properties for %s:\n%s' , env , contents ) return contents
Get contents of _properties_file_ for the _env_ .
50,135
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) parser = argparse . ArgumentParser ( description = main . __doc__ ) add_debug ( parser ) add_app ( parser ) add_env ( parser ) add_region ( parser ) args = parser . parse_args ( ) logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) assert destroy_elb ( ** vars ( args ) )
Destroy any ELB related Resources .
50,136
def get_security_group_id ( name = '' , env = '' , region = '' ) : vpc_id = get_vpc_id ( env , region ) LOG . info ( 'Find %s sg in %s [%s] in %s' , name , env , region , vpc_id ) url = '{0}/securityGroups/{1}/{2}/{3}?vpcId={4}' . format ( API_URL , env , region , name , vpc_id ) response = requests . get ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) assert response . ok result = response . json ( ) try : security_group_id = result [ 'id' ] except KeyError : msg = 'Security group ({0}) not found' . format ( name ) raise SpinnakerSecurityGroupError ( msg ) LOG . info ( 'Found: %s' , security_group_id ) return security_group_id
Get a security group ID .
50,137
def remove_duplicate_sg ( security_groups ) : for each_sg , duplicate_sg_name in SECURITYGROUP_REPLACEMENTS . items ( ) : if each_sg in security_groups and duplicate_sg_name in security_groups : LOG . info ( 'Duplicate SG found. Removing %s in favor of %s.' , duplicate_sg_name , each_sg ) security_groups . remove ( duplicate_sg_name ) return security_groups
Removes duplicate Security Groups that share a same name alias
50,138
def generate_encoded_user_data ( env = 'dev' , region = 'us-east-1' , generated = None , group_name = '' , pipeline_type = '' , canary = False , ) : r if env in [ "prod" , "prodp" , "prods" ] : env_c , env_p , env_s = "prod" , "prodp" , "prods" else : env_c , env_p , env_s = env , env , env user_data = get_template ( template_file = 'infrastructure/user_data.sh.j2' , env = env , env_c = env_c , env_p = env_p , env_s = env_s , region = region , app_name = generated . app_name ( ) , group_name = group_name , pipeline_type = pipeline_type , canary = canary , formats = generated , ) return base64 . b64encode ( user_data . encode ( ) ) . decode ( )
r Generate base64 encoded User Data .
50,139
def prepare_infrastructure ( ) : runner = ForemastRunner ( ) runner . write_configs ( ) runner . create_app ( ) archaius = runner . configs [ runner . env ] [ 'app' ] [ 'archaius_enabled' ] eureka = runner . configs [ runner . env ] [ 'app' ] [ 'eureka_enabled' ] deploy_type = runner . configs [ 'pipeline' ] [ 'type' ] if deploy_type not in [ 's3' , 'datapipeline' ] : runner . create_iam ( ) if archaius : runner . create_archaius ( ) runner . create_secgroups ( ) if eureka : LOG . info ( "Eureka Enabled, skipping ELB and DNS setup" ) elif deploy_type == "lambda" : LOG . info ( "Lambda Enabled, skipping ELB and DNS setup" ) runner . create_awslambda ( ) elif deploy_type == "s3" : runner . create_s3app ( ) elif deploy_type == 'datapipeline' : runner . create_datapipeline ( ) else : LOG . info ( "No Eureka, running ELB and DNS setup" ) runner . create_elb ( ) runner . create_dns ( ) runner . slack_notify ( ) runner . cleanup ( )
Entry point for preparing the infrastructure in a specific env .
50,140
def prepare_app_pipeline ( ) : runner = ForemastRunner ( ) runner . write_configs ( ) runner . create_app ( ) runner . create_pipeline ( ) runner . cleanup ( )
Entry point for application setup and initial pipeline in Spinnaker .
50,141
def prepare_onetime_pipeline ( ) : runner = ForemastRunner ( ) runner . write_configs ( ) runner . create_pipeline ( onetime = os . getenv ( 'ENV' ) ) runner . cleanup ( )
Entry point for single use pipeline setup in the defined app .
50,142
def write_configs ( self ) : utils . banner ( "Generating Configs" ) if not self . runway_dir : app_configs = configs . process_git_configs ( git_short = self . git_short ) else : app_configs = configs . process_runway_configs ( runway_dir = self . runway_dir ) self . configs = configs . write_variables ( app_configs = app_configs , out_file = self . raw_path , git_short = self . git_short )
Generate the configurations needed for pipes .
50,143
def create_app ( self ) : utils . banner ( "Creating Spinnaker App" ) spinnakerapp = app . SpinnakerApp ( app = self . app , email = self . email , project = self . group , repo = self . repo , pipeline_config = self . configs [ 'pipeline' ] ) spinnakerapp . create_app ( )
Create the spinnaker application .
50,144
def create_iam ( self ) : utils . banner ( "Creating IAM" ) iam . create_iam_resources ( env = self . env , app = self . app )
Create IAM resources .
50,145
def create_archaius ( self ) : utils . banner ( "Creating S3" ) s3 . init_properties ( env = self . env , app = self . app )
Create S3 bucket for Archaius .
50,146
def create_s3app ( self ) : utils . banner ( "Creating S3 App Infrastructure" ) primary_region = self . configs [ 'pipeline' ] [ 'primary_region' ] s3obj = s3 . S3Apps ( app = self . app , env = self . env , region = self . region , prop_path = self . json_path , primary_region = primary_region ) s3obj . create_bucket ( )
Create S3 infra for s3 applications
50,147
def deploy_s3app ( self ) : utils . banner ( "Deploying S3 App" ) primary_region = self . configs [ 'pipeline' ] [ 'primary_region' ] s3obj = s3 . S3Deployment ( app = self . app , env = self . env , region = self . region , prop_path = self . json_path , artifact_path = self . artifact_path , artifact_version = self . artifact_version , primary_region = primary_region ) s3obj . upload_artifacts ( )
Deploys artifacts contents to S3 bucket
50,148
def promote_s3app ( self ) : utils . banner ( "Promoting S3 App" ) primary_region = self . configs [ 'pipeline' ] [ 'primary_region' ] s3obj = s3 . S3Deployment ( app = self . app , env = self . env , region = self . region , prop_path = self . json_path , artifact_path = self . artifact_path , artifact_version = self . artifact_version , primary_region = primary_region ) s3obj . promote_artifacts ( promote_stage = self . promote_stage )
promotes S3 deployment to LATEST
50,149
def create_elb ( self ) : utils . banner ( "Creating ELB" ) elbobj = elb . SpinnakerELB ( app = self . app , env = self . env , region = self . region , prop_path = self . json_path ) elbobj . create_elb ( )
Create the ELB for the defined environment .
50,150
def create_dns ( self ) : utils . banner ( "Creating DNS" ) elb_subnet = self . configs [ self . env ] [ 'elb' ] [ 'subnet_purpose' ] regions = self . configs [ self . env ] [ 'regions' ] failover = self . configs [ self . env ] [ 'dns' ] [ 'failover_dns' ] primary_region = self . configs [ 'pipeline' ] [ 'primary_region' ] regionspecific_dns = self . configs [ self . env ] [ 'dns' ] [ 'region_specific' ] dnsobj = dns . SpinnakerDns ( app = self . app , env = self . env , region = self . region , prop_path = self . json_path , elb_subnet = elb_subnet ) if len ( regions ) > 1 and failover : dnsobj . create_elb_dns ( regionspecific = True ) dnsobj . create_failover_dns ( primary_region = primary_region ) else : if regionspecific_dns : dnsobj . create_elb_dns ( regionspecific = True ) if self . region == primary_region : dnsobj . create_elb_dns ( regionspecific = False )
Create DNS for the defined app and environment .
50,151
def create_autoscaling_policy ( self ) : utils . banner ( "Creating Scaling Policy" ) policyobj = autoscaling_policy . AutoScalingPolicy ( app = self . app , env = self . env , region = self . region , prop_path = self . json_path ) policyobj . create_policy ( )
Create Scaling Policy for app in environment
50,152
def create_datapipeline ( self ) : utils . banner ( "Creating Data Pipeline" ) dpobj = datapipeline . AWSDataPipeline ( app = self . app , env = self . env , region = self . region , prop_path = self . json_path ) dpobj . create_datapipeline ( ) dpobj . set_pipeline_definition ( ) if self . configs [ self . env ] . get ( 'datapipeline' ) . get ( 'activate_on_deploy' ) : dpobj . activate_pipeline ( )
Creates data pipeline and adds definition
50,153
def slack_notify ( self ) : utils . banner ( "Sending slack notification" ) if self . env . startswith ( "prod" ) : notify = slacknotify . SlackNotification ( app = self . app , env = self . env , prop_path = self . json_path ) notify . post_message ( ) else : LOG . info ( "No slack message sent, not production environment" )
Send out a slack notification .
50,154
def add_debug ( parser ) : parser . add_argument ( '-d' , '--debug' , action = 'store_const' , const = logging . DEBUG , default = logging . INFO , help = 'Set DEBUG output' )
Add a debug flag to the _parser_ .
50,155
def add_env ( parser ) : parser . add_argument ( '-e' , '--env' , choices = ENVS , default = os . getenv ( 'ENV' , default = 'dev' ) , help = 'Deploy environment, overrides $ENV' )
Add an env flag to the _parser_ .
50,156
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) log = logging . getLogger ( __name__ ) parser = argparse . ArgumentParser ( ) add_debug ( parser ) add_app ( parser ) add_properties ( parser ) add_env ( parser ) add_region ( parser ) args = parser . parse_args ( ) logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) log . debug ( 'Parsed arguments: %s' , args ) asgpolicy = AutoScalingPolicy ( app = args . app , prop_path = args . properties , env = args . env , region = args . region ) asgpolicy . create_policy ( )
CLI entrypoint for scaling policy creation
50,157
def main ( ) : logging . basicConfig ( format = LOGGING_FORMAT ) log = logging . getLogger ( __name__ ) parser = argparse . ArgumentParser ( description = main . __doc__ ) add_debug ( parser ) add_app ( parser ) add_env ( parser ) add_properties ( parser ) add_region ( parser ) args = parser . parse_args ( ) logging . getLogger ( __package__ . split ( '.' ) [ 0 ] ) . setLevel ( args . debug ) log . debug ( 'Parsed arguments: %s' , args ) lambda_function = LambdaFunction ( app = args . app , env = args . env , region = args . region , prop_path = args . properties ) lambda_function . create_lambda_function ( ) lambda_event = LambdaEvent ( app = args . app , env = args . env , region = args . region , prop_path = args . properties ) lambda_event . create_lambda_events ( )
Create Lambda events .
50,158
def post_slack_message ( message = None , channel = None , username = None , icon_emoji = None ) : LOG . debug ( 'Slack Channel: %s\nSlack Message: %s' , channel , message ) slack = slacker . Slacker ( SLACK_TOKEN ) try : slack . chat . post_message ( channel = channel , text = message , username = username , icon_emoji = icon_emoji ) LOG . info ( 'Message posted to %s' , channel ) except slacker . Error : LOG . info ( "error posted message to %s" , channel )
Format the message and post to the appropriate slack channel .
50,159
def destroy_dns ( app = '' , env = 'dev' , ** _ ) : client = boto3 . Session ( profile_name = env ) . client ( 'route53' ) generated = get_details ( app = app , env = env ) record = generated . dns_elb ( ) zone_ids = get_dns_zone_ids ( env = env , facing = 'external' ) for zone_id in zone_ids : record_sets = client . list_resource_record_sets ( HostedZoneId = zone_id , StartRecordName = record , StartRecordType = 'CNAME' , MaxItems = '1' ) for found_record in record_sets [ 'ResourceRecordSets' ] : assert destroy_record ( client = client , found_record = found_record , record = record , zone_id = zone_id ) return True
Destroy DNS records .
50,160
def destroy_record ( client = None , found_record = None , record = '' , zone_id = '' ) : LOG . debug ( 'Found DNS record: %s' , found_record ) if found_record [ 'Name' ] . strip ( '.' ) == record : dns_json = get_template ( template_file = 'destroy/destroy_dns.json.j2' , record = json . dumps ( found_record ) ) dns_dict = json . loads ( dns_json ) client . change_resource_record_sets ( HostedZoneId = zone_id , ChangeBatch = dns_dict ) LOG . info ( 'Destroyed "%s" in %s' , found_record [ 'Name' ] , zone_id ) else : LOG . info ( 'DNS record "%s" missing from %s.' , record , zone_id ) LOG . debug ( 'Found someone else\'s record: %s' , found_record [ 'Name' ] ) return True
Destroy an individual DNS record .
50,161
def create_sns_event ( app_name , env , region , rules ) : session = boto3 . Session ( profile_name = env , region_name = region ) sns_client = session . client ( 'sns' ) topic_name = rules . get ( 'topic' ) lambda_alias_arn = get_lambda_alias_arn ( app = app_name , account = env , region = region ) topic_arn = get_sns_topic_arn ( topic_name = topic_name , account = env , region = region ) protocol = 'lambda' statement_id = '{}_sns_{}' . format ( app_name , topic_name ) principal = 'sns.amazonaws.com' add_lambda_permissions ( function = lambda_alias_arn , statement_id = statement_id , action = 'lambda:InvokeFunction' , principal = principal , source_arn = topic_arn , env = env , region = region ) sns_client . subscribe ( TopicArn = topic_arn , Protocol = protocol , Endpoint = lambda_alias_arn ) LOG . debug ( "SNS Lambda event created" ) LOG . info ( "Created SNS event subscription on topic %s" , topic_name )
Create SNS lambda event from rules .
50,162
def destroy_sns_event ( app_name , env , region ) : session = boto3 . Session ( profile_name = env , region_name = region ) sns_client = session . client ( 'sns' ) lambda_subscriptions = get_sns_subscriptions ( app_name = app_name , env = env , region = region ) for subscription_arn in lambda_subscriptions : sns_client . unsubscribe ( SubscriptionArn = subscription_arn ) LOG . debug ( "Lambda SNS event deleted" ) return True
Destroy all Lambda SNS subscriptions .
50,163
def destroy_elb ( app = '' , env = 'dev' , region = 'us-east-1' , ** _ ) : task_json = get_template ( template_file = 'destroy/destroy_elb.json.j2' , app = app , env = env , region = region , vpc = get_vpc_id ( account = env , region = region ) ) wait_for_task ( task_json ) return True
Destroy ELB Resources .
50,164
def delete_pipeline ( app = '' , pipeline_name = '' ) : safe_pipeline_name = normalize_pipeline_name ( name = pipeline_name ) LOG . warning ( 'Deleting Pipeline: %s' , safe_pipeline_name ) url = '{host}/pipelines/{app}/{pipeline}' . format ( host = API_URL , app = app , pipeline = safe_pipeline_name ) response = requests . delete ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) if not response . ok : LOG . debug ( 'Delete response code: %d' , response . status_code ) if response . status_code == requests . status_codes . codes [ 'method_not_allowed' ] : raise SpinnakerPipelineDeletionFailed ( 'Failed to delete "{0}" from "{1}", ' 'possibly invalid Pipeline name.' . format ( safe_pipeline_name , app ) ) else : LOG . debug ( 'Pipeline missing, no delete required.' ) LOG . debug ( 'Deleted "%s" Pipeline response:\n%s' , safe_pipeline_name , response . text ) return response . text
Delete _pipeline_name_ from _app_ .
50,165
def clean_pipelines ( app = '' , settings = None ) : pipelines = get_all_pipelines ( app = app ) envs = settings [ 'pipeline' ] [ 'env' ] LOG . debug ( 'Find Regions in: %s' , envs ) regions = set ( ) for env in envs : try : regions . update ( settings [ env ] [ 'regions' ] ) except KeyError : error_msg = 'Missing "{}/application-master-{}.json".' . format ( RUNWAY_BASE_PATH , env ) raise SpinnakerPipelineCreationFailed ( error_msg ) LOG . debug ( 'Regions defined: %s' , regions ) for pipeline in pipelines : pipeline_name = pipeline [ 'name' ] try : region = check_managed_pipeline ( name = pipeline_name , app_name = app ) except ValueError : LOG . info ( '"%s" is not managed.' , pipeline_name ) continue LOG . debug ( 'Check "%s" in defined Regions.' , region ) if region not in regions : delete_pipeline ( app = app , pipeline_name = pipeline_name ) return True
Delete Pipelines for regions not defined in application . json files .
50,166
def extract_formats ( config_handle ) : configurations = dict ( config_handle ) formats = dict ( configurations . get ( 'formats' , { } ) ) return formats
Get application formats .
50,167
def load_dynamic_config ( config_file = DEFAULT_DYNAMIC_CONFIG_FILE ) : dynamic_configurations = { } sys . path . insert ( 0 , path . dirname ( path . abspath ( config_file ) ) ) try : config_module = __import__ ( 'config' ) dynamic_configurations = config_module . CONFIG except ImportError : LOG . error ( 'ImportError: Unable to load dynamic config. Check config.py file imports!' ) return dynamic_configurations
Load and parse dynamic config
50,168
def _remove_empty_entries ( entries ) : valid_entries = [ ] for entry in set ( entries ) : if entry : valid_entries . append ( entry ) return sorted ( valid_entries )
Remove empty entries in a list
50,169
def _convert_string_to_native ( value ) : result = None try : result = ast . literal_eval ( str ( value ) ) except ( SyntaxError , ValueError ) : result = value . split ( ',' ) return result
Convert a string to its native python type
50,170
def _generate_security_groups ( config_key ) : raw_default_groups = validate_key_values ( CONFIG , 'base' , config_key , default = '' ) default_groups = _convert_string_to_native ( raw_default_groups ) LOG . debug ( 'Default security group for %s is %s' , config_key , default_groups ) entries = { } for env in ENVS : entries [ env ] = [ ] if isinstance ( default_groups , ( list ) ) : groups = _remove_empty_entries ( default_groups ) for env in entries : entries [ env ] = groups elif isinstance ( default_groups , ( dict ) ) : entries . update ( default_groups ) LOG . debug ( 'Generated security group: %s' , entries ) return entries
Read config file and generate security group dict by environment .
50,171
def create_datapipeline ( self ) : tags = [ { "key" : "app_group" , "value" : self . group } , { "key" : "app_name" , "value" : self . app_name } ] response = self . client . create_pipeline ( name = self . datapipeline_data . get ( 'name' , self . app_name ) , uniqueId = self . app_name , description = self . datapipeline_data [ 'description' ] , tags = tags ) self . pipeline_id = response . get ( 'pipelineId' ) LOG . debug ( response ) LOG . info ( "Successfully configured Data Pipeline - %s" , self . app_name ) return response
Creates the data pipeline if it does not already exist
50,172
def set_pipeline_definition ( self ) : if not self . pipeline_id : self . get_pipeline_id ( ) json_def = self . datapipeline_data [ 'json_definition' ] try : pipelineobjects = translator . definition_to_api_objects ( json_def ) parameterobjects = translator . definition_to_api_parameters ( json_def ) parametervalues = translator . definition_to_parameter_values ( json_def ) except translator . PipelineDefinitionError as error : LOG . warning ( error ) raise DataPipelineDefinitionError response = self . client . put_pipeline_definition ( pipelineId = self . pipeline_id , pipelineObjects = pipelineobjects , parameterObjects = parameterobjects , parameterValues = parametervalues ) LOG . debug ( response ) LOG . info ( "Successfully applied pipeline definition" ) return response
Translates the json definition and puts it on created pipeline
50,173
def get_pipeline_id ( self ) : all_pipelines = [ ] paginiator = self . client . get_paginator ( 'list_pipelines' ) for page in paginiator . paginate ( ) : all_pipelines . extend ( page [ 'pipelineIdList' ] ) for pipeline in all_pipelines : if pipeline [ 'name' ] == self . datapipeline_data . get ( 'name' , self . app_name ) : self . pipeline_id = pipeline [ 'id' ] LOG . info ( "Pipeline ID Found" ) return LOG . info ( "Pipeline ID Not Found for %s" , self . app_name )
Finds the pipeline ID for configured pipeline
50,174
def activate_pipeline ( self ) : self . client . activate_pipeline ( pipelineId = self . pipeline_id ) LOG . info ( "Activated Pipeline %s" , self . pipeline_id )
Activates a deployed pipeline useful for OnDemand pipelines
50,175
def get_dns_zone_ids ( env = 'dev' , facing = 'internal' ) : client = boto3 . Session ( profile_name = env ) . client ( 'route53' ) zones = client . list_hosted_zones_by_name ( DNSName = '.' . join ( [ env , DOMAIN ] ) ) zone_ids = [ ] for zone in zones [ 'HostedZones' ] : LOG . debug ( 'Found Hosted Zone: %s' , zone ) if facing == 'external' or zone [ 'Config' ] [ 'PrivateZone' ] : LOG . info ( 'Using %(Id)s for "%(Name)s", %(Config)s' , zone ) zone_ids . append ( zone [ 'Id' ] ) LOG . debug ( 'Zone IDs: %s' , zone_ids ) return zone_ids
Get Route 53 Hosted Zone IDs for _env_ .
50,176
def update_dns_zone_record ( env , zone_id , ** kwargs ) : client = boto3 . Session ( profile_name = env ) . client ( 'route53' ) response = { } hosted_zone_info = client . get_hosted_zone ( Id = zone_id ) zone_name = hosted_zone_info [ 'HostedZone' ] [ 'Name' ] . rstrip ( '.' ) dns_name = kwargs . get ( 'dns_name' ) if dns_name and dns_name . endswith ( zone_name ) : dns_name_aws = kwargs . get ( 'dns_name_aws' ) dns_json = get_template ( template_file = 'infrastructure/dns_upsert.json.j2' , ** kwargs ) LOG . info ( 'Attempting to create DNS record %s (%s) in Hosted Zone %s (%s)' , dns_name , dns_name_aws , zone_id , zone_name ) try : response = client . change_resource_record_sets ( HostedZoneId = zone_id , ChangeBatch = json . loads ( dns_json ) , ) LOG . info ( 'Upserted DNS record %s (%s) in Hosted Zone %s (%s)' , dns_name , dns_name_aws , zone_id , zone_name ) except botocore . exceptions . ClientError as error : LOG . info ( 'Error creating DNS record %s (%s) in Hosted Zone %s (%s)' , dns_name , dns_name_aws , zone_id , zone_name ) LOG . debug ( error ) else : LOG . info ( 'Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)' , dns_name , zone_id , zone_name ) LOG . debug ( 'Route53 JSON Response: \n%s' , pformat ( response ) )
Create a Route53 CNAME record in _env_ zone .
50,177
def find_existing_record ( env , zone_id , dns_name , check_key = None , check_value = None ) : client = boto3 . Session ( profile_name = env ) . client ( 'route53' ) pager = client . get_paginator ( 'list_resource_record_sets' ) existingrecord = None for rset in pager . paginate ( HostedZoneId = zone_id ) : for record in rset [ 'ResourceRecordSets' ] : if check_key : if record [ 'Name' ] . rstrip ( '.' ) == dns_name and record . get ( check_key ) == check_value : LOG . info ( "Found existing record: %s" , record ) existingrecord = record break return existingrecord
Check if a specific DNS record exists .
50,178
def delete_existing_cname ( env , zone_id , dns_name ) : client = boto3 . Session ( profile_name = env ) . client ( 'route53' ) startrecord = None newrecord_name = dns_name startrecord = find_existing_record ( env , zone_id , newrecord_name , check_key = 'Type' , check_value = 'CNAME' ) if startrecord : LOG . info ( "Deleting old record: %s" , newrecord_name ) _response = client . change_resource_record_sets ( HostedZoneId = zone_id , ChangeBatch = { 'Changes' : [ { 'Action' : 'DELETE' , 'ResourceRecordSet' : startrecord } ] } ) LOG . debug ( 'Response from deleting %s: %s' , dns_name , _response )
Delete an existing CNAME record .
50,179
def update_failover_dns_record ( env , zone_id , ** kwargs ) : client = boto3 . Session ( profile_name = env ) . client ( 'route53' ) response = { } hosted_zone_info = client . get_hosted_zone ( Id = zone_id ) zone_name = hosted_zone_info [ 'HostedZone' ] [ 'Name' ] . rstrip ( '.' ) dns_name = kwargs . get ( 'dns_name' ) failover_state = kwargs . get ( 'failover_state' ) if failover_state . lower ( ) != 'primary' : primary_record = find_existing_record ( env , zone_id , dns_name , check_key = 'Failover' , check_value = 'PRIMARY' ) if not primary_record : raise PrimaryDNSRecordNotFound ( "Primary Failover DNS record not found: {}" . format ( dns_name ) ) if dns_name and dns_name . endswith ( zone_name ) : dns_json = get_template ( template_file = 'infrastructure/dns_failover_upsert.json.j2' , ** kwargs ) LOG . info ( 'Attempting to create DNS Failover record %s (%s) in Hosted Zone %s (%s)' , dns_name , kwargs [ 'elb_aws_dns' ] , zone_id , zone_name ) try : delete_existing_cname ( env , zone_id , dns_name ) response = client . change_resource_record_sets ( HostedZoneId = zone_id , ChangeBatch = json . loads ( dns_json ) , ) LOG . info ( 'Upserted DNS Failover record %s (%s) in Hosted Zone %s (%s)' , dns_name , kwargs [ 'elb_aws_dns' ] , zone_id , zone_name ) except botocore . exceptions . ClientError as error : LOG . info ( 'Error creating DNS Failover record %s (%s) in Hosted Zone %s (%s)' , dns_name , kwargs [ 'elb_aws_dns' ] , zone_id , zone_name ) LOG . debug ( error ) else : LOG . info ( 'Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)' , dns_name , zone_id , zone_name ) LOG . debug ( 'Route53 JSON Response: \n%s' , pformat ( response ) )
Create a Failover Route53 alias record in _env_ zone .
50,180
def create_cloudwatch_log_event ( app_name , env , region , rules ) : session = boto3 . Session ( profile_name = env , region_name = region ) cloudwatch_client = session . client ( 'logs' ) log_group = rules . get ( 'log_group' ) filter_name = rules . get ( 'filter_name' ) filter_pattern = rules . get ( 'filter_pattern' ) if not log_group : LOG . critical ( 'Log group is required and no "log_group" is defined!' ) raise InvalidEventConfiguration ( 'Log group is required and no "log_group" is defined!' ) if not filter_name : LOG . critical ( 'Filter name is required and no filter_name is defined!' ) raise InvalidEventConfiguration ( 'Filter name is required and no filter_name is defined!' ) if filter_pattern is None : LOG . critical ( 'Filter pattern is required and no filter_pattern is defined!' ) raise InvalidEventConfiguration ( 'Filter pattern is required and no filter_pattern is defined!' ) lambda_alias_arn = get_lambda_alias_arn ( app = app_name , account = env , region = region ) statement_id = '{}_cloudwatchlog_{}' . format ( app_name , filter_name . replace ( " " , "_" ) ) principal = 'logs.{}.amazonaws.com' . format ( region ) account_id = get_env_credential ( env = env ) [ 'accountId' ] source_arn = "arn:aws:logs:{0}:{1}:log-group:{2}:*" . format ( region , account_id , log_group ) add_lambda_permissions ( function = lambda_alias_arn , statement_id = statement_id , action = 'lambda:InvokeFunction' , principal = principal , source_arn = source_arn , env = env , region = region ) cloudwatch_client . put_subscription_filter ( logGroupName = log_group , filterName = filter_name , filterPattern = filter_pattern , destinationArn = lambda_alias_arn ) LOG . info ( "Created Cloudwatch log event with filter: %s" , filter_pattern )
Create cloudwatch log event for lambda from rules .
50,181
def prepare_policy_template ( self , scaling_type , period_sec , server_group ) : template_kwargs = { 'app' : self . app , 'env' : self . env , 'region' : self . region , 'server_group' : server_group , 'period_sec' : period_sec , 'scaling_policy' : self . settings [ 'asg' ] [ 'scaling_policy' ] , } if scaling_type == 'scale_up' : template_kwargs [ 'operation' ] = 'increase' template_kwargs [ 'comparisonOperator' ] = 'GreaterThanThreshold' template_kwargs [ 'scalingAdjustment' ] = 1 elif scaling_type == 'scale_down' : cur_threshold = int ( self . settings [ 'asg' ] [ 'scaling_policy' ] [ 'threshold' ] ) self . settings [ 'asg' ] [ 'scaling_policy' ] [ 'threshold' ] = floor ( cur_threshold * 0.5 ) template_kwargs [ 'operation' ] = 'decrease' template_kwargs [ 'comparisonOperator' ] = 'LessThanThreshold' template_kwargs [ 'scalingAdjustment' ] = - 1 rendered_template = get_template ( template_file = 'infrastructure/autoscaling_policy.json.j2' , ** template_kwargs ) self . log . info ( 'Creating a %s policy in %s for %s' , scaling_type , self . env , self . app ) wait_for_task ( rendered_template ) self . log . info ( 'Successfully created a %s policy in %s for %s' , scaling_type , self . env , self . app )
Renders scaling policy templates based on configs and variables . After rendering POSTs the json to Spinnaker for creation .
50,182
def create_policy ( self ) : if not self . settings [ 'asg' ] [ 'scaling_policy' ] : self . log . info ( "No scaling policy found, skipping..." ) return server_group = self . get_server_group ( ) scaling_policies = self . get_all_existing ( server_group ) for policy in scaling_policies : for subpolicy in policy : self . delete_existing_policy ( subpolicy , server_group ) if self . settings [ 'asg' ] [ 'scaling_policy' ] [ 'period_minutes' ] : period_sec = int ( self . settings [ 'asg' ] [ 'scaling_policy' ] [ 'period_minutes' ] ) * 60 else : period_sec = 1800 self . prepare_policy_template ( 'scale_up' , period_sec , server_group ) if self . settings [ 'asg' ] [ 'scaling_policy' ] . get ( 'scale_down' , True ) : self . prepare_policy_template ( 'scale_down' , period_sec , server_group )
Wrapper function . Gets the server group sets sane defaults deletes existing policies and then runs self . prepare_policy_template for scaling up and scaling down policies . This function acts as the main driver for the scaling policy creationprocess
50,183
def get_server_group ( self ) : api_url = "{0}/applications/{1}" . format ( API_URL , self . app ) response = requests . get ( api_url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) for server_group in response . json ( ) [ 'clusters' ] [ self . env ] : return server_group [ 'serverGroups' ] [ - 1 ]
Finds the most recently deployed server group for the application . This is the server group that the scaling policy will be applied to .
50,184
def delete_existing_policy ( self , scaling_policy , server_group ) : self . log . info ( "Deleting policy %s on %s" , scaling_policy [ 'policyName' ] , server_group ) delete_dict = { "application" : self . app , "description" : "Delete scaling policy" , "job" : [ { "policyName" : scaling_policy [ 'policyName' ] , "serverGroupName" : server_group , "credentials" : self . env , "region" : self . region , "provider" : "aws" , "type" : "deleteScalingPolicy" , "user" : "foremast-autoscaling-policy" } ] } wait_for_task ( json . dumps ( delete_dict ) )
Given a scaling_policy and server_group deletes the existing scaling_policy . Scaling policies need to be deleted instead of upserted for consistency .
50,185
def get_all_existing ( self , server_group ) : self . log . info ( "Checking for existing scaling policy" ) url = "{0}/applications/{1}/clusters/{2}/{1}/serverGroups" . format ( API_URL , self . app , self . env ) response = requests . get ( url , verify = GATE_CA_BUNDLE , cert = GATE_CLIENT_CERT ) assert response . ok , "Error looking for existing Autoscaling Policy for {0}: {1}" . format ( self . app , response . text ) scalingpolicies = [ ] for servergroup in response . json ( ) : if servergroup [ 'scalingPolicies' ] and servergroup [ 'asg' ] [ 'autoScalingGroupName' ] == server_group : self . log . info ( "Found policies on %s" , server_group ) scalingpolicies . append ( servergroup [ 'scalingPolicies' ] ) self . log . debug ( "Scaling policies: %s" , scalingpolicies ) return scalingpolicies
Finds all existing scaling policies for an application
50,186
def destroy_cloudwatch_event ( app = '' , env = 'dev' , region = '' ) : session = boto3 . Session ( profile_name = env , region_name = region ) cloudwatch_client = session . client ( 'events' ) event_rules = get_cloudwatch_event_rule ( app_name = app , account = env , region = region ) for rule in event_rules : cloudwatch_client . remove_targets ( Rule = rule , Ids = [ app ] ) return True
Destroy Cloudwatch event subscription .
50,187
def create_bucket ( self ) : bucket_exists = self . _bucket_exists ( ) if self . s3props . get ( 'shared_bucket_target' ) : if bucket_exists : LOG . info ( 'App uses shared bucket - %s ' , self . bucket ) else : LOG . error ( "Shared bucket %s does not exist" , self . bucket ) raise S3SharedBucketNotFound else : if self . region == 'us-east-1' : _response = self . s3client . create_bucket ( ACL = self . s3props [ 'bucket_acl' ] , Bucket = self . bucket ) else : if not bucket_exists : _response = self . s3client . create_bucket ( ACL = self . s3props [ 'bucket_acl' ] , Bucket = self . bucket , CreateBucketConfiguration = { 'LocationConstraint' : self . region } ) else : _response = "bucket already exists, skipping create for non-standard region buckets." LOG . debug ( 'Response creating bucket: %s' , _response ) LOG . info ( '%s - S3 Bucket Upserted' , self . bucket ) self . _put_bucket_policy ( ) self . _put_bucket_website ( ) self . _put_bucket_logging ( ) self . _put_bucket_lifecycle ( ) self . _put_bucket_versioning ( ) self . _put_bucket_encryption ( ) self . _put_bucket_tagging ( )
Create or update bucket based on app name .
50,188
def _bucket_exists ( self ) : try : self . s3client . get_bucket_location ( Bucket = self . bucket ) return True except ClientError as error : LOG . error ( error ) return False
Check if the bucket exists .
50,189
def _put_bucket_policy ( self ) : if self . s3props [ 'bucket_policy' ] : policy_str = json . dumps ( self . s3props [ 'bucket_policy' ] ) _response = self . s3client . put_bucket_policy ( Bucket = self . bucket , Policy = policy_str ) else : _response = self . s3client . delete_bucket_policy ( Bucket = self . bucket ) LOG . debug ( 'Response adding bucket policy: %s' , _response ) LOG . info ( 'S3 Bucket Policy Attached' )
Attach a bucket policy to app bucket .
50,190
def _put_bucket_website ( self ) : if self . s3props [ 'website' ] [ 'enabled' ] : website_config = { 'ErrorDocument' : { 'Key' : self . s3props [ 'website' ] [ 'error_document' ] } , 'IndexDocument' : { 'Suffix' : self . s3props [ 'website' ] [ 'index_suffix' ] } } _response = self . s3client . put_bucket_website ( Bucket = self . bucket , WebsiteConfiguration = website_config ) self . _put_bucket_cors ( ) self . _set_bucket_dns ( ) else : _response = self . s3client . delete_bucket_website ( Bucket = self . bucket ) self . _put_bucket_cors ( ) LOG . debug ( 'Response setting up S3 website: %s' , _response ) LOG . info ( 'S3 website settings updated' )
Configure static website on S3 bucket .
50,191
def _set_bucket_dns ( self ) : dotformat_regions = [ "eu-west-2" , "eu-central-1" , "ap-northeast-2" , "ap-south-1" , "ca-central-1" , "us-east-2" ] if self . region in dotformat_regions : s3_endpoint = "{0}.s3-website.{1}.amazonaws.com" . format ( self . bucket , self . region ) else : s3_endpoint = "{0}.s3-website-{1}.amazonaws.com" . format ( self . bucket , self . region ) zone_ids = get_dns_zone_ids ( env = self . env , facing = "public" ) dns_kwargs = { 'dns_name' : self . bucket , 'dns_name_aws' : s3_endpoint , 'dns_ttl' : self . properties [ 'dns' ] [ 'ttl' ] } for zone_id in zone_ids : LOG . debug ( 'zone_id: %s' , zone_id ) update_dns_zone_record ( self . env , zone_id , ** dns_kwargs ) LOG . info ( "Created DNS %s for Bucket" , self . bucket )
Create CNAME for S3 endpoint .
50,192
def _put_bucket_cors ( self ) : if self . s3props [ 'cors' ] [ 'enabled' ] and self . s3props [ 'website' ] [ 'enabled' ] : cors_config = { } cors_rules = [ ] for each_rule in self . s3props [ 'cors' ] [ 'cors_rules' ] : cors_rules . append ( { 'AllowedHeaders' : each_rule [ 'cors_headers' ] , 'AllowedMethods' : each_rule [ 'cors_methods' ] , 'AllowedOrigins' : each_rule [ 'cors_origins' ] , 'ExposeHeaders' : each_rule [ 'cors_expose_headers' ] , 'MaxAgeSeconds' : each_rule [ 'cors_max_age' ] } ) cors_config = { 'CORSRules' : cors_rules } LOG . debug ( cors_config ) _response = self . s3client . put_bucket_cors ( Bucket = self . bucket , CORSConfiguration = cors_config ) else : _response = self . s3client . delete_bucket_cors ( Bucket = self . bucket ) LOG . debug ( 'Response setting up S3 CORS: %s' , _response ) LOG . info ( 'S3 CORS configuration updated' )
Adds bucket cors configuration .
50,193
def _put_bucket_encryption ( self ) : if self . s3props [ 'encryption' ] [ 'enabled' ] : encryption_config = { 'Rules' : [ { } ] } encryption_config = { 'Rules' : self . s3props [ 'encryption' ] [ 'encryption_rules' ] } LOG . debug ( encryption_config ) _response = self . s3client . put_bucket_encryption ( Bucket = self . bucket , ServerSideEncryptionConfiguration = encryption_config ) else : _response = self . s3client . delete_bucket_encryption ( Bucket = self . bucket ) LOG . debug ( 'Response setting up S3 encryption: %s' , _response ) LOG . info ( 'S3 encryption configuration updated' )
Adds bucket encryption configuration .
50,194
def _put_bucket_lifecycle ( self ) : status = 'deleted' if self . s3props [ 'lifecycle' ] [ 'enabled' ] : lifecycle_config = { 'Rules' : self . s3props [ 'lifecycle' ] [ 'lifecycle_rules' ] } LOG . debug ( 'Lifecycle Config: %s' , lifecycle_config ) _response = self . s3client . put_bucket_lifecycle_configuration ( Bucket = self . bucket , LifecycleConfiguration = lifecycle_config ) status = 'applied' else : _response = self . s3client . delete_bucket_lifecycle ( Bucket = self . bucket ) LOG . debug ( 'Response setting up S3 lifecycle: %s' , _response ) LOG . info ( 'S3 lifecycle configuration %s' , status )
Adds bucket lifecycle configuration .
50,195
def _put_bucket_logging ( self ) : logging_config = { } if self . s3props [ 'logging' ] [ 'enabled' ] : logging_config = { 'LoggingEnabled' : { 'TargetBucket' : self . s3props [ 'logging' ] [ 'logging_bucket' ] , 'TargetGrants' : self . s3props [ 'logging' ] [ 'logging_grants' ] , 'TargetPrefix' : self . s3props [ 'logging' ] [ 'logging_bucket_prefix' ] } } _response = self . s3client . put_bucket_logging ( Bucket = self . bucket , BucketLoggingStatus = logging_config ) LOG . debug ( 'Response setting up S3 logging: %s' , _response ) LOG . info ( 'S3 logging configuration updated' )
Adds bucket logging policy to bucket for s3 access requests
50,196
def _put_bucket_tagging ( self ) : all_tags = self . s3props [ 'tagging' ] [ 'tags' ] all_tags . update ( { 'app_group' : self . group , 'app_name' : self . app_name } ) tag_set = generate_s3_tags . generated_tag_data ( all_tags ) tagging_config = { 'TagSet' : tag_set } self . s3client . put_bucket_tagging ( Bucket = self . bucket , Tagging = tagging_config ) LOG . info ( "Adding tagging %s for Bucket" , tag_set )
Add bucket tags to bucket .
50,197
def _put_bucket_versioning ( self ) : status = 'Suspended' if self . s3props [ 'versioning' ] [ 'enabled' ] : status = 'Enabled' versioning_config = { 'MFADelete' : self . s3props [ 'versioning' ] [ 'mfa_delete' ] , 'Status' : status } _response = self . s3client . put_bucket_versioning ( Bucket = self . bucket , VersioningConfiguration = versioning_config ) LOG . debug ( 'Response setting up S3 versioning: %s' , _response ) LOG . info ( 'S3 versioning configuration updated' )
Adds bucket versioning policy to bucket
50,198
def process_git_configs ( git_short = '' ) : LOG . info ( 'Processing application.json files from GitLab "%s".' , git_short ) file_lookup = FileLookup ( git_short = git_short ) app_configs = process_configs ( file_lookup , RUNWAY_BASE_PATH + '/application-master-{env}.json' , RUNWAY_BASE_PATH + '/pipeline.json' ) commit_obj = file_lookup . project . commits . get ( 'master' ) config_commit = commit_obj . attributes [ 'id' ] LOG . info ( 'Commit ID used: %s' , config_commit ) app_configs [ 'pipeline' ] [ 'config_commit' ] = config_commit return app_configs
Retrieve _application . json_ files from GitLab .
50,199
def process_runway_configs ( runway_dir = '' ) : LOG . info ( 'Processing application.json files from local directory "%s".' , runway_dir ) file_lookup = FileLookup ( runway_dir = runway_dir ) app_configs = process_configs ( file_lookup , 'application-master-{env}.json' , 'pipeline.json' ) return app_configs
Read the _application . json_ files .