idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
|---|---|---|
6,700
|
def upload_file_to_s3 ( awsclient , bucket , key , filename ) : client_s3 = awsclient . get_client ( 's3' ) transfer = S3Transfer ( client_s3 ) transfer . upload_file ( filename , bucket , key ) response = client_s3 . head_object ( Bucket = bucket , Key = key ) etag = response . get ( 'ETag' ) version_id = response . get ( 'VersionId' , None ) return etag , version_id
|
Upload a file to AWS S3 bucket .
|
6,701
|
def remove_file_from_s3 ( awsclient , bucket , key ) : client_s3 = awsclient . get_client ( 's3' ) response = client_s3 . delete_object ( Bucket = bucket , Key = key )
|
Remove a file from an AWS S3 bucket .
|
6,702
|
def ls ( awsclient , bucket , prefix = None ) : params = { 'Bucket' : bucket } if prefix : params [ 'Prefix' ] = prefix client_s3 = awsclient . get_client ( 's3' ) objects = client_s3 . list_objects_v2 ( ** params ) if objects [ 'KeyCount' ] > 0 : keys = [ k [ 'Key' ] for k in objects [ 'Contents' ] ] return keys
|
List bucket contents
|
6,703
|
def TST ( self , params ) : Ra , Rb = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params ) self . check_arguments ( low_registers = ( Ra , Rb ) ) def TST_func ( ) : result = self . register [ Ra ] & self . register [ Rb ] self . set_NZ_flags ( result ) return TST_func
|
TST Ra Rb
|
6,704
|
def render_to_mail ( template , context , ** kwargs ) : lines = iter ( line . rstrip ( ) for line in render_to_string ( "%s.txt" % template , context ) . splitlines ( ) ) subject = "" try : while True : line = next ( lines ) if line : subject = line break except StopIteration : pass body = "\n" . join ( lines ) . strip ( "\n" ) message = EmailMultiAlternatives ( subject = subject , body = body , ** kwargs ) try : message . attach_alternative ( render_to_string ( "%s.html" % template , context ) , "text/html" ) except TemplateDoesNotExist : pass return message
|
Renders a mail and returns the resulting EmailMultiAlternatives instance
|
6,705
|
def get_confirmation_url ( email , request , name = "email_registration_confirm" , ** kwargs ) : return request . build_absolute_uri ( reverse ( name , kwargs = { "code" : get_confirmation_code ( email , request , ** kwargs ) } ) )
|
Returns the confirmation URL
|
6,706
|
def _retrieve_stack_host_zone_name ( awsclient , default_stack_name = None ) : global _host_zone_name if _host_zone_name is not None : return _host_zone_name env = get_env ( ) if env is None : print ( "Please set environment..." ) sys . exit ( ) if default_stack_name is None : default_stack_name = 'dp-%s' % env default_stack_output = get_outputs_for_stack ( awsclient , default_stack_name ) if HOST_ZONE_NAME__STACK_OUTPUT_NAME not in default_stack_output : print ( "Please debug why default stack '{}' does not contain '{}'..." . format ( default_stack_name , HOST_ZONE_NAME__STACK_OUTPUT_NAME , ) ) sys . exit ( ) _host_zone_name = default_stack_output [ HOST_ZONE_NAME__STACK_OUTPUT_NAME ] + "." return _host_zone_name
|
Use service discovery to get the host zone name from the default stack
|
6,707
|
def delete_log_group ( awsclient , log_group_name ) : client_logs = awsclient . get_client ( 'logs' ) response = client_logs . delete_log_group ( logGroupName = log_group_name )
|
Delete the specified log group
|
6,708
|
def put_retention_policy ( awsclient , log_group_name , retention_in_days ) : try : create_log_group ( awsclient , log_group_name ) except GracefulExit : raise except Exception : pass client_logs = awsclient . get_client ( 'logs' ) response = client_logs . put_retention_policy ( logGroupName = log_group_name , retentionInDays = retention_in_days )
|
Sets the retention of the specified log group if the log group does not yet exist than it will be created first .
|
6,709
|
def describe_log_group ( awsclient , log_group_name ) : client_logs = awsclient . get_client ( 'logs' ) request = { 'logGroupNamePrefix' : log_group_name , 'limit' : 1 } response = client_logs . describe_log_groups ( ** request ) if response [ 'logGroups' ] : return response [ 'logGroups' ] [ 0 ] else : return
|
Get info on the specified log group
|
6,710
|
def describe_log_stream ( awsclient , log_group_name , log_stream_name ) : client_logs = awsclient . get_client ( 'logs' ) response = client_logs . describe_log_streams ( logGroupName = log_group_name , logStreamNamePrefix = log_stream_name , limit = 1 ) if response [ 'logStreams' ] : return response [ 'logStreams' ] [ 0 ] else : return
|
Get info on the specified log stream
|
6,711
|
def create_log_group ( awsclient , log_group_name ) : client_logs = awsclient . get_client ( 'logs' ) response = client_logs . create_log_group ( logGroupName = log_group_name , )
|
Creates a log group with the specified name .
|
6,712
|
def create_log_stream ( awsclient , log_group_name , log_stream_name ) : client_logs = awsclient . get_client ( 'logs' ) response = client_logs . create_log_stream ( logGroupName = log_group_name , logStreamName = log_stream_name )
|
Creates a log stream for the specified log group .
|
6,713
|
def put_log_events ( awsclient , log_group_name , log_stream_name , log_events , sequence_token = None ) : client_logs = awsclient . get_client ( 'logs' ) request = { 'logGroupName' : log_group_name , 'logStreamName' : log_stream_name , 'logEvents' : log_events } if sequence_token : request [ 'sequenceToken' ] = sequence_token response = client_logs . put_log_events ( ** request ) if 'rejectedLogEventsInfo' in response : log . warn ( response [ 'rejectedLogEventsInfo' ] ) if 'nextSequenceToken' in response : return response [ 'nextSequenceToken' ]
|
Put log events for the specified log group and stream .
|
6,714
|
def get_log_events ( awsclient , log_group_name , log_stream_name , start_ts = None ) : client_logs = awsclient . get_client ( 'logs' ) request = { 'logGroupName' : log_group_name , 'logStreamName' : log_stream_name } if start_ts : request [ 'startTime' ] = start_ts response = client_logs . get_log_events ( ** request ) if 'events' in response and response [ 'events' ] : return [ { 'timestamp' : e [ 'timestamp' ] , 'message' : e [ 'message' ] } for e in response [ 'events' ] ]
|
Get log events for the specified log group and stream . this is used in tenkai output instance diagnostics
|
6,715
|
def reload ( self ) : config = self . _default_configuration ( ) if self . _file_path : config . update ( self . _load_config_file ( ) ) if config != self . _values : self . _values = config return True return False
|
Reload the configuration from disk returning True if the configuration has changed from the previous values .
|
6,716
|
def _load_config_file ( self ) : LOGGER . info ( 'Loading configuration from %s' , self . _file_path ) if self . _file_path . endswith ( 'json' ) : config = self . _load_json_config ( ) else : config = self . _load_yaml_config ( ) for key , value in [ ( k , v ) for k , v in config . items ( ) ] : if key . title ( ) != key : config [ key . title ( ) ] = value del config [ key ] return flatdict . FlatDict ( config )
|
Load the configuration file into memory returning the content .
|
6,717
|
def _load_json_config ( self ) : try : return json . loads ( self . _read_config ( ) ) except ValueError as error : raise ValueError ( 'Could not read configuration file: {}' . format ( error ) )
|
Load the configuration file in JSON format
|
6,718
|
def _load_yaml_config ( self ) : try : config = self . _read_config ( ) except OSError as error : raise ValueError ( 'Could not read configuration file: %s' % error ) try : return yaml . safe_load ( config ) except yaml . YAMLError as error : message = '\n' . join ( [ ' > %s' % line for line in str ( error ) . split ( '\n' ) ] ) sys . stderr . write ( '\n\n Error in the configuration file:\n\n' '{}\n\n' . format ( message ) ) sys . stderr . write ( ' Configuration should be a valid YAML file.\n' ) sys . stderr . write ( ' YAML format validation available at ' 'http://yamllint.com\n' ) raise ValueError ( error )
|
Loads the configuration file from a . yaml or . yml file
|
6,719
|
def _normalize_file_path ( file_path ) : if not file_path : return None elif file_path . startswith ( 's3://' ) or file_path . startswith ( 'http://' ) or file_path . startswith ( 'https://' ) : return file_path return path . abspath ( file_path )
|
Normalize the file path value .
|
6,720
|
def _read_config ( self ) : if not self . _file_path : return None elif self . _file_path . startswith ( 's3://' ) : return self . _read_s3_config ( ) elif self . _file_path . startswith ( 'http://' ) or self . _file_path . startswith ( 'https://' ) : return self . _read_remote_config ( ) elif not path . exists ( self . _file_path ) : raise ValueError ( 'Configuration file not found: {}' . format ( self . _file_path ) ) with open ( self . _file_path , 'r' ) as handle : return handle . read ( )
|
Read the configuration from the various places it may be read from .
|
6,721
|
def _read_remote_config ( self ) : try : import requests except ImportError : requests = None if not requests : raise ValueError ( 'Remote config URL specified but requests not installed' ) result = requests . get ( self . _file_path ) if not result . ok : raise ValueError ( 'Failed to retrieve remote config: {}' . format ( result . status_code ) ) return result . text
|
Read a remote config via URL .
|
6,722
|
def _read_s3_config ( self ) : try : import boto3 import botocore . exceptions except ImportError : boto3 , botocore = None , None if not boto3 : raise ValueError ( 's3 URL specified for configuration but boto3 not installed' ) parsed = parse . urlparse ( self . _file_path ) try : response = boto3 . client ( 's3' , endpoint_url = os . environ . get ( 'S3_ENDPOINT' ) ) . get_object ( Bucket = parsed . netloc , Key = parsed . path . lstrip ( '/' ) ) except botocore . exceptions . ClientError as e : raise ValueError ( 'Failed to download configuration from S3: {}' . format ( e ) ) return response [ 'Body' ] . read ( ) . decode ( 'utf-8' )
|
Read in the value of the configuration file in Amazon S3 .
|
6,723
|
def update ( self , configuration , debug = None ) : if self . config != dict ( configuration ) and debug != self . debug : self . config = dict ( configuration ) self . debug = debug self . configure ( ) return True return False
|
Update the internal configuration values removing debug_only handlers if debug is False . Returns True if the configuration has changed from previous configuration values .
|
6,724
|
def configure ( self ) : if self . debug is not None and not self . debug : self . _remove_debug_handlers ( ) self . _remove_debug_only ( ) logging . config . dictConfig ( self . config ) try : logging . captureWarnings ( True ) except AttributeError : pass
|
Configure the Python stdlib logger
|
6,725
|
def _remove_debug_handlers ( self ) : remove = list ( ) for handler in self . config [ self . HANDLERS ] : if self . config [ self . HANDLERS ] [ handler ] . get ( 'debug_only' ) : remove . append ( handler ) for handler in remove : del self . config [ self . HANDLERS ] [ handler ] for logger in self . config [ self . LOGGERS ] . keys ( ) : logger = self . config [ self . LOGGERS ] [ logger ] if handler in logger [ self . HANDLERS ] : logger [ self . HANDLERS ] . remove ( handler ) self . _remove_debug_only ( )
|
Remove any handlers with an attribute of debug_only that is True and remove the references to said handlers from any loggers that are referencing them .
|
6,726
|
def _remove_debug_only ( self ) : LOGGER . debug ( 'Removing debug only from handlers' ) for handler in self . config [ self . HANDLERS ] : if self . DEBUG_ONLY in self . config [ self . HANDLERS ] [ handler ] : del self . config [ self . HANDLERS ] [ handler ] [ self . DEBUG_ONLY ]
|
Iterate through each handler removing the invalid dictConfig key of debug_only .
|
6,727
|
def as_dictionary ( self ) : return { to_camel_case ( i ) : Serializable . _convert_to_dictionary ( self . __dict__ [ i ] ) for i in self . __dict__ if self . __dict__ [ i ] is not None }
|
Convert this object to a dictionary with formatting appropriate for a PIF .
|
6,728
|
def _convert_to_dictionary ( obj ) : if isinstance ( obj , list ) : return [ Serializable . _convert_to_dictionary ( i ) for i in obj ] elif hasattr ( obj , 'as_dictionary' ) : return obj . as_dictionary ( ) else : return obj
|
Convert obj to a dictionary with formatting appropriate for a PIF . This function attempts to treat obj as a Pio object and otherwise returns obj .
|
6,729
|
def _get_object ( class_ , obj ) : if isinstance ( obj , list ) : return [ Serializable . _get_object ( class_ , i ) for i in obj ] elif isinstance ( obj , dict ) : return class_ ( ** keys_to_snake_case ( obj ) ) else : return obj
|
Helper function that returns an object or if it is a dictionary initializes it from class_ .
|
6,730
|
def total_level ( source_levels ) : sums = 0.0 for l in source_levels : if l is None : continue if l == 0 : continue sums += pow ( 10.0 , float ( l ) / 10.0 ) level = 10.0 * math . log10 ( sums ) return level
|
Calculates the total sound pressure level based on multiple source levels
|
6,731
|
def total_rated_level ( octave_frequencies ) : sums = 0.0 for band in OCTAVE_BANDS . keys ( ) : if band not in octave_frequencies : continue if octave_frequencies [ band ] is None : continue if octave_frequencies [ band ] == 0 : continue sums += pow ( 10.0 , ( ( float ( octave_frequencies [ band ] ) + OCTAVE_BANDS [ band ] [ 1 ] ) / 10.0 ) ) level = 10.0 * math . log10 ( sums ) return level
|
Calculates the A - rated total sound pressure level based on octave band frequencies
|
6,732
|
def distant_level ( reference_level , distance , reference_distance = 1.0 ) : rel_dist = float ( reference_distance ) / float ( distance ) level = float ( reference_level ) + 20.0 * ( math . log ( rel_dist ) / math . log ( 10 ) ) return level
|
Calculates the sound pressure level in dependence of a distance where a perfect ball - shaped source and spread is assumed .
|
6,733
|
def distant_total_damped_rated_level ( octave_frequencies , distance , temp , relhum , reference_distance = 1.0 ) : damping_distance = distance - reference_distance sums = 0.0 for band in OCTAVE_BANDS . keys ( ) : if band not in octave_frequencies : continue if octave_frequencies [ band ] is None : continue distant_val = distant_level ( reference_level = float ( octave_frequencies [ band ] ) , distance = distance , reference_distance = reference_distance ) damp_per_meter = damping ( temp = temp , relhum = relhum , freq = OCTAVE_BANDS [ band ] [ 0 ] ) distant_val = distant_val - ( damping_distance * damp_per_meter ) distant_val += OCTAVE_BANDS [ band ] [ 1 ] sums += pow ( 10.0 , ( distant_val / 10.0 ) ) level = 10.0 * math . log10 ( sums ) return level
|
Calculates the damped A - rated total sound pressure level in a given distance temperature and relative humidity from octave frequency sound pressure levels in a reference distance
|
6,734
|
def getLogger ( name ) : logger = logging . getLogger ( name ) logger . setLevel ( logging_config [ 'loggers' ] [ 'gcdt' ] [ 'level' ] ) return logger
|
This is used by gcdt plugins to get a logger with the right level .
|
6,735
|
def keep_session_alive ( self ) : try : self . resources ( ) except xmlrpclib . Fault as fault : if fault . faultCode == 5 : self . login ( ) else : raise
|
If the session expired logs back in .
|
6,736
|
def help ( self ) : print ( 'Resources:' ) print ( '' ) for name in sorted ( self . _resources . keys ( ) ) : methods = sorted ( self . _resources [ name ] . _methods . keys ( ) ) print ( '{}: {}' . format ( bold ( name ) , ', ' . join ( methods ) ) )
|
Prints discovered resources and their associated methods . Nice when noodling in the terminal to wrap your head around Magento s insanity .
|
6,737
|
def run ( self ) : segments = self . controller . split ( '.' ) controller_class = reduce ( getattr , segments [ 1 : ] , __import__ ( '.' . join ( segments [ : - 1 ] ) ) ) cmd_line = [ '-f' ] if self . configuration is not None : cmd_line . extend ( [ '-c' , self . configuration ] ) args = parser . get ( ) . parse_args ( cmd_line ) controller_instance = controller_class ( args , platform ) try : controller_instance . start ( ) except KeyboardInterrupt : controller_instance . stop ( )
|
Import the controller and run it .
|
6,738
|
def info_hash_base32 ( self ) : if getattr ( self , '_data' , None ) : return b32encode ( sha1 ( bencode ( self . _data [ 'info' ] ) ) . digest ( ) ) else : raise exceptions . TorrentNotGeneratedException
|
Returns the base32 info hash of the torrent . Useful for generating magnet links .
|
6,739
|
def deploy ( awsclient , applicationName , deploymentGroupName , deploymentConfigName , bucket , bundlefile ) : etag , version = upload_file_to_s3 ( awsclient , bucket , _build_bundle_key ( applicationName ) , bundlefile ) client_codedeploy = awsclient . get_client ( 'codedeploy' ) response = client_codedeploy . create_deployment ( applicationName = applicationName , deploymentGroupName = deploymentGroupName , revision = { 'revisionType' : 'S3' , 's3Location' : { 'bucket' : bucket , 'key' : _build_bundle_key ( applicationName ) , 'bundleType' : 'tgz' , 'eTag' : etag , 'version' : version , } , } , deploymentConfigName = deploymentConfigName , description = 'deploy with tenkai' , ignoreApplicationStopFailures = True ) log . info ( "Deployment: {} -> URL: https://{}.console.aws.amazon.com/codedeploy/home?region={}#/deployments/{}" . format ( Fore . MAGENTA + response [ 'deploymentId' ] + Fore . RESET , client_codedeploy . meta . region_name , client_codedeploy . meta . region_name , response [ 'deploymentId' ] , ) ) return response [ 'deploymentId' ]
|
Upload bundle and deploy to deployment group . This includes the bundle - action .
|
6,740
|
def output_deployment_status ( awsclient , deployment_id , iterations = 100 ) : counter = 0 steady_states = [ 'Succeeded' , 'Failed' , 'Stopped' ] client_codedeploy = awsclient . get_client ( 'codedeploy' ) while counter <= iterations : response = client_codedeploy . get_deployment ( deploymentId = deployment_id ) status = response [ 'deploymentInfo' ] [ 'status' ] if status not in steady_states : log . info ( 'Deployment: %s - State: %s' % ( deployment_id , status ) ) time . sleep ( 10 ) elif status == 'Failed' : log . info ( colored . red ( 'Deployment: {} failed: {}' . format ( deployment_id , json . dumps ( response [ 'deploymentInfo' ] [ 'errorInformation' ] , indent = 2 ) ) ) ) return 1 else : log . info ( 'Deployment: %s - State: %s' % ( deployment_id , status ) ) break return 0
|
Wait until an deployment is in an steady state and output information .
|
6,741
|
def stop_deployment ( awsclient , deployment_id ) : log . info ( 'Deployment: %s - stopping active deployment.' , deployment_id ) client_codedeploy = awsclient . get_client ( 'codedeploy' ) response = client_codedeploy . stop_deployment ( deploymentId = deployment_id , autoRollbackEnabled = True )
|
stop tenkai deployment .
|
6,742
|
def _list_deployment_instances ( awsclient , deployment_id ) : client_codedeploy = awsclient . get_client ( 'codedeploy' ) instances = [ ] next_token = None while True : request = { 'deploymentId' : deployment_id } if next_token : request [ 'nextToken' ] = next_token response = client_codedeploy . list_deployment_instances ( ** request ) instances . extend ( response [ 'instancesList' ] ) if 'nextToken' not in response : break next_token = response [ 'nextToken' ] return instances
|
list deployment instances .
|
6,743
|
def _get_deployment_instance_summary ( awsclient , deployment_id , instance_id ) : client_codedeploy = awsclient . get_client ( 'codedeploy' ) request = { 'deploymentId' : deployment_id , 'instanceId' : instance_id } response = client_codedeploy . get_deployment_instance ( ** request ) return response [ 'instanceSummary' ] [ 'status' ] , response [ 'instanceSummary' ] [ 'lifecycleEvents' ] [ - 1 ] [ 'lifecycleEventName' ]
|
instance summary .
|
6,744
|
def _get_deployment_instance_diagnostics ( awsclient , deployment_id , instance_id ) : client_codedeploy = awsclient . get_client ( 'codedeploy' ) request = { 'deploymentId' : deployment_id , 'instanceId' : instance_id } response = client_codedeploy . get_deployment_instance ( ** request ) for i , event in enumerate ( response [ 'instanceSummary' ] [ 'lifecycleEvents' ] ) : if event [ 'status' ] == 'Failed' : return event [ 'diagnostics' ] [ 'errorCode' ] , event [ 'diagnostics' ] [ 'scriptName' ] , event [ 'diagnostics' ] [ 'message' ] , event [ 'diagnostics' ] [ 'logTail' ] return None
|
Gets you the diagnostics details for the first Failed event .
|
6,745
|
def directive_SPACE ( self , label , params ) : params = params . strip ( ) try : self . convert_to_integer ( params ) except ValueError : warnings . warn ( "Unknown parameters; {}" . format ( params ) ) return self . labels [ label ] = self . space_pointer if params in self . equates : params = self . equates [ params ] self . space_pointer += self . convert_to_integer ( params )
|
label SPACE num
|
6,746
|
def instance_ik_model_receiver ( fn ) : @ wraps ( fn ) def receiver ( self , sender , ** kwargs ) : if not inspect . isclass ( sender ) : return for src in self . _source_groups : if issubclass ( sender , src . model_class ) : fn ( self , sender = sender , ** kwargs ) return return receiver
|
A method decorator that filters out sign_original_specals coming from models that don t have fields that function as ImageFieldSourceGroup sources .
|
6,747
|
def get_source_fields ( self , instance ) : return set ( src . image_field for src in self . _source_groups if isinstance ( instance , src . model_class ) )
|
Returns a list of the source fields for the given instance .
|
6,748
|
def check_hook_mechanism_is_intact ( module ) : result = True if check_register_present ( module ) : result = not result if check_deregister_present ( module ) : result = not result return result
|
Check if the hook configuration is absent or has both register AND deregister .
|
6,749
|
def cfn_viz ( template , parameters = { } , outputs = { } , out = sys . stdout ) : known_sg , open_sg = _analyze_sg ( template [ 'Resources' ] ) ( graph , edges ) = _extract_graph ( template . get ( 'Description' , '' ) , template [ 'Resources' ] , known_sg , open_sg ) graph [ 'edges' ] . extend ( edges ) _handle_terminals ( template , graph , 'Parameters' , 'source' , parameters ) _handle_terminals ( template , graph , 'Outputs' , 'sink' , outputs ) graph [ 'subgraphs' ] . append ( _handle_pseudo_params ( graph [ 'edges' ] ) ) _render ( graph , out = out )
|
Render dot output for cloudformation . template in json format .
|
6,750
|
def start ( controller_class ) : args = parser . parse ( ) obj = controller_class ( args , platform . operating_system ( ) ) if args . foreground : try : obj . start ( ) except KeyboardInterrupt : obj . stop ( ) else : try : with platform . Daemon ( obj ) as daemon : daemon . start ( ) except ( OSError , ValueError ) as error : sys . stderr . write ( '\nError starting %s: %s\n\n' % ( sys . argv [ 0 ] , error ) ) sys . exit ( 1 )
|
Start the Helper controller either in the foreground or as a daemon process .
|
6,751
|
def _validate_type ( self , name , obj , * args ) : if obj is None : return for arg in args : if isinstance ( obj , arg ) : return raise TypeError ( self . __class__ . __name__ + '.' + name + ' is of type ' + type ( obj ) . __name__ + '. Must be equal to None or one of the following types: ' + str ( args ) )
|
Helper function that checks the input object type against each in a list of classes . This function also allows the input value to be equal to None .
|
6,752
|
def _validate_list_type ( self , name , obj , * args ) : if obj is None : return if isinstance ( obj , list ) : for i in obj : self . _validate_type_not_null ( name , i , * args ) else : self . _validate_type ( name , obj , * args )
|
Helper function that checks the input object type against each in a list of classes or if the input object is a list each value that it contains against that list .
|
6,753
|
def _validate_nested_list_type ( self , name , obj , nested_level , * args ) : if nested_level <= 1 : self . _validate_list_type ( name , obj , * args ) else : if obj is None : return if not isinstance ( obj , list ) : raise TypeError ( self . __class__ . __name__ + '.' + name + ' contains value of type ' + type ( obj ) . __name__ + ' where a list is expected' ) for sub_obj in obj : self . _validate_nested_list_type ( name , sub_obj , nested_level - 1 , * args )
|
Helper function that checks the input object as a list then recursively until nested_level is 1 .
|
6,754
|
def get_version ( version ) : assert len ( version ) == 5 version_parts = version [ : 2 ] if version [ 2 ] == 0 else version [ : 3 ] major = '.' . join ( str ( x ) for x in version_parts ) if version [ 3 ] == 'final' : return major sub = '' . join ( str ( x ) for x in version [ 3 : 5 ] ) if version [ 3 ] == 'dev' : timestamp = get_git_changeset ( ) sub = 'dev%s' % ( timestamp if timestamp else version [ 4 ] ) return '%s.%s' % ( major , sub ) if version [ 3 ] == 'post' : return '%s.%s' % ( major , sub ) elif version [ 3 ] in ( 'a' , 'b' , 'rc' ) : return '%s%s' % ( major , sub ) else : raise ValueError ( 'Invalid version: %s' % str ( version ) )
|
Returns a PEP 440 - compliant version number from VERSION .
|
6,755
|
def get_git_changeset ( ) : repo_dir = os . path . dirname ( os . path . abspath ( __file__ ) ) git_log = subprocess . Popen ( 'git log --pretty=format:%ct --quiet -1 HEAD' , stdout = subprocess . PIPE , stderr = subprocess . PIPE , shell = True , cwd = repo_dir , universal_newlines = True ) timestamp = git_log . communicate ( ) [ 0 ] try : timestamp = datetime . datetime . utcfromtimestamp ( int ( timestamp ) ) return timestamp . strftime ( '%Y%m%d%H%M%S' ) except ValueError : return None
|
Returns a numeric identifier of the latest git changeset .
|
6,756
|
def load_cloudformation_template ( path = None ) : if not path : path = os . path . abspath ( 'cloudformation.py' ) else : path = os . path . abspath ( path ) if isinstance ( path , six . string_types ) : try : sp = sys . path sys . path . append ( os . path . abspath ( os . path . dirname ( path ) ) ) cloudformation = imp . load_source ( 'cloudformation' , path ) sys . path = sp if not check_hook_mechanism_is_intact ( cloudformation ) : log . debug ( 'No valid hook configuration: \'%s\'. Not using hooks!' , path ) else : if check_register_present ( cloudformation ) : cloudformation . register ( ) return cloudformation , True except GracefulExit : raise except ImportError as e : print ( 'could not find package for import: %s' % e ) except Exception as e : print ( 'could not import cloudformation.py, maybe something wrong ' , 'with your code?' ) print ( e ) return None , False
|
Load cloudformation template from path .
|
6,757
|
def get_parameter_diff ( awsclient , config ) : client_cf = awsclient . get_client ( 'cloudformation' ) try : stack_name = config [ 'stack' ] [ 'StackName' ] if stack_name : response = client_cf . describe_stacks ( StackName = stack_name ) if response [ 'Stacks' ] : stack_id = response [ 'Stacks' ] [ 0 ] [ 'StackId' ] stack = response [ 'Stacks' ] [ 0 ] else : return None else : print ( 'StackName is not configured, could not create parameter diff' ) return None except GracefulExit : raise except Exception : return None changed = 0 table = [ ] table . append ( [ 'Parameter' , 'Current Value' , 'New Value' ] ) if 'Parameters' in stack : for param in stack [ 'Parameters' ] : try : old = str ( param [ 'ParameterValue' ] ) new = config [ 'parameters' ] [ param [ 'ParameterKey' ] ] if old != new : if old . startswith ( '***' ) : new = old table . append ( [ param [ 'ParameterKey' ] , old , new ] ) changed += 1 except GracefulExit : raise except Exception : print ( 'Did not find %s in local config file' % param [ 'ParameterKey' ] ) if changed > 0 : print ( tabulate ( table , tablefmt = 'fancy_grid' ) ) return changed > 0
|
get differences between local config and currently active config
|
6,758
|
def call_pre_hook ( awsclient , cloudformation ) : if not hasattr ( cloudformation , 'pre_hook' ) : return hook_func = getattr ( cloudformation , 'pre_hook' ) if not hook_func . func_code . co_argcount : hook_func ( ) else : log . error ( 'pre_hock can not have any arguments. The pre_hook it is ' + 'executed BEFORE config is read' )
|
Invoke the pre_hook BEFORE the config is read .
|
6,759
|
def deploy_stack ( awsclient , context , conf , cloudformation , override_stack_policy = False ) : stack_name = _get_stack_name ( conf ) parameters = _generate_parameters ( conf ) if stack_exists ( awsclient , stack_name ) : exit_code = _update_stack ( awsclient , context , conf , cloudformation , parameters , override_stack_policy ) else : exit_code = _create_stack ( awsclient , context , conf , cloudformation , parameters ) context [ 'stack_output' ] = _get_stack_outputs ( awsclient . get_client ( 'cloudformation' ) , stack_name ) _call_hook ( awsclient , conf , stack_name , parameters , cloudformation , hook = 'post_hook' , message = 'CloudFormation is done, now executing post hook...' ) return exit_code
|
Deploy the stack to AWS cloud . Does either create or update the stack .
|
6,760
|
def delete_stack ( awsclient , conf , feedback = True ) : client_cf = awsclient . get_client ( 'cloudformation' ) stack_name = _get_stack_name ( conf ) last_event = _get_stack_events_last_timestamp ( awsclient , stack_name ) request = { } dict_selective_merge ( request , conf [ 'stack' ] , [ 'StackName' , 'RoleARN' ] ) response = client_cf . delete_stack ( ** request ) if feedback : return _poll_stack_events ( awsclient , stack_name , last_event )
|
Delete the stack from AWS cloud .
|
6,761
|
def list_stacks ( awsclient ) : client_cf = awsclient . get_client ( 'cloudformation' ) response = client_cf . list_stacks ( StackStatusFilter = [ 'CREATE_IN_PROGRESS' , 'CREATE_COMPLETE' , 'ROLLBACK_IN_PROGRESS' , 'ROLLBACK_COMPLETE' , 'DELETE_IN_PROGRESS' , 'DELETE_FAILED' , 'UPDATE_IN_PROGRESS' , 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS' , 'UPDATE_COMPLETE' , 'UPDATE_ROLLBACK_IN_PROGRESS' , 'UPDATE_ROLLBACK_FAILED' , 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS' , 'UPDATE_ROLLBACK_COMPLETE' , ] ) result = { } stack_sum = 0 for summary in response [ 'StackSummaries' ] : result [ 'StackName' ] = summary [ "StackName" ] result [ 'CreationTime' ] = summary [ 'CreationTime' ] result [ 'StackStatus' ] = summary [ 'StackStatus' ] print ( json2table ( result ) ) stack_sum += 1 print ( 'listed %s stacks' % str ( stack_sum ) )
|
Print out the list of stacks deployed at AWS cloud .
|
6,762
|
def describe_change_set ( awsclient , change_set_name , stack_name ) : client = awsclient . get_client ( 'cloudformation' ) status = None while status not in [ 'CREATE_COMPLETE' , 'FAILED' ] : response = client . describe_change_set ( ChangeSetName = change_set_name , StackName = stack_name ) status = response [ 'Status' ] if status == 'FAILED' : print ( response [ 'StatusReason' ] ) elif status == 'CREATE_COMPLETE' : for change in response [ 'Changes' ] : print ( json2table ( change [ 'ResourceChange' ] ) )
|
Print out the change_set to console . This needs to run create_change_set first .
|
6,763
|
def delete_change_set ( awsclient , change_set_name , stack_name ) : client = awsclient . get_client ( 'cloudformation' ) response = client . delete_change_set ( ChangeSetName = change_set_name , StackName = stack_name )
|
Delete specified change set . Currently we only use this during automated regression testing . But we have plans so lets locate this functionality here
|
6,764
|
def write_template_to_file ( conf , template_body ) : template_file_name = _get_stack_name ( conf ) + '-generated-cf-template.json' with open ( template_file_name , 'w' ) as opened_file : opened_file . write ( template_body ) print ( 'wrote cf-template for %s to disk: %s' % ( get_env ( ) , template_file_name ) ) return template_file_name
|
Writes the template to disk
|
6,765
|
def info ( awsclient , config , format = None ) : if format is None : format = 'tabular' stack_name = _get_stack_name ( config ) client_cfn = awsclient . get_client ( 'cloudformation' ) resources = all_pages ( client_cfn . list_stack_resources , { 'StackName' : stack_name } , lambda x : [ ( r [ 'ResourceType' ] , r [ 'LogicalResourceId' ] , r [ 'ResourceStatus' ] ) for r in x [ 'StackResourceSummaries' ] ] ) infos = { 'stack_output' : _get_stack_outputs ( client_cfn , stack_name ) , 'stack_state' : _get_stack_state ( client_cfn , stack_name ) , 'resources' : resources } if format == 'json' : print ( json . dumps ( infos ) ) elif format == 'tabular' : print ( 'stack output:' ) print ( tabulate ( infos [ 'stack_output' ] , tablefmt = 'fancy_grid' ) ) print ( '\nstack_state: %s' % infos [ 'stack_state' ] ) print ( '\nresources:' ) print ( tabulate ( infos [ 'resources' ] , tablefmt = 'fancy_grid' ) )
|
collect info and output to console
|
6,766
|
def get_queryset ( self , request ) : queryset = super ( MetricGroupAdmin , self ) . get_queryset ( request ) qs_values = queryset . values ( 'id' , 'name' ) distinct_names = { } for metric in qs_values : distinct_names [ metric [ 'name' ] ] = metric [ 'id' ] queryset = self . model . objects . filter ( id__in = distinct_names . values ( ) ) return queryset
|
Shows one entry per distinct metric name
|
6,767
|
def save_model ( self , request , obj , form , change ) : like_metrics = self . model . objects . filter ( name = obj . name ) updates = { } for key in form . changed_data : updates [ key ] = form . cleaned_data [ key ] like_metrics . update ( ** updates )
|
Updates all metrics with the same name
|
6,768
|
def parse_lines ( self , code ) : remove_comments = re . compile ( r'^([^;@\n]*);?.*$' , re . MULTILINE ) code = '\n' . join ( remove_comments . findall ( code ) ) parser = re . compile ( r'^(\S*)?[\s]*(\S*)([^\n]*)$' , re . MULTILINE ) res = parser . findall ( code ) res = [ ( label . upper ( ) , instruction . upper ( ) , parameters . strip ( ) ) for ( label , instruction , parameters ) in res ] return res
|
Return a list of the parsed code
|
6,769
|
def rule_low_registers ( self , arg ) : r_num = self . check_register ( arg ) if r_num > 7 : raise iarm . exceptions . RuleError ( "Register {} is not a low register" . format ( arg ) )
|
Low registers are R0 - R7
|
6,770
|
def get_two_parameters ( self , regex_exp , parameters ) : Rx , Ry , other = self . get_parameters ( regex_exp , parameters ) if other is not None and other . strip ( ) : raise iarm . exceptions . ParsingError ( "Extra arguments found: {}" . format ( other ) ) if Rx and Ry : return Rx . upper ( ) , Ry . upper ( ) elif not Rx : raise iarm . exceptions . ParsingError ( "Missing first positional argument" ) else : raise iarm . exceptions . ParsingError ( "Missing second positional argument" )
|
Get two parameters from a given regex expression
|
6,771
|
def rule_special_registers ( self , arg ) : special_registers = "PSR APSR IPSR EPSR PRIMASK FAULTMASK BASEPRI CONTROL" if arg not in special_registers . split ( ) : raise iarm . exceptions . RuleError ( "{} is not a special register; Must be [{}]" . format ( arg , special_registers ) )
|
Raises an exception if the register is not a special register
|
6,772
|
def version ( ) : log . info ( 'gcdt version %s' % __version__ ) tools = get_plugin_versions ( 'gcdttool10' ) if tools : log . info ( 'gcdt tools:' ) for p , v in tools . items ( ) : log . info ( ' * %s version %s' % ( p , v ) ) log . info ( 'gcdt plugins:' ) for p , v in get_plugin_versions ( ) . items ( ) : log . info ( ' * %s version %s' % ( p , v ) ) generators = get_plugin_versions ( 'gcdtgen10' ) if generators : log . info ( 'gcdt scaffolding generators:' ) for p , v in generators . items ( ) : log . info ( ' * %s version %s' % ( p , v ) )
|
Output version of gcdt tools and plugins .
|
6,773
|
def retries ( max_tries , delay = 1 , backoff = 2 , exceptions = ( Exception , ) , hook = None ) : def dec ( func ) : def f2 ( * args , ** kwargs ) : mydelay = delay tries = range ( max_tries - 1 , - 1 , - 1 ) for tries_remaining in tries : try : return func ( * args , ** kwargs ) except GracefulExit : raise except exceptions as e : if tries_remaining > 0 : if hook is not None : hook ( tries_remaining , e , mydelay ) sleep ( mydelay ) mydelay *= backoff else : raise return f2 return dec
|
Function decorator implementing retrying logic .
|
6,774
|
def get_context ( awsclient , env , tool , command , arguments = None ) : if arguments is None : arguments = { } context = { '_awsclient' : awsclient , 'env' : env , 'tool' : tool , 'command' : command , '_arguments' : arguments , 'version' : __version__ , 'user' : _get_user ( ) , 'plugins' : get_plugin_versions ( ) . keys ( ) } return context
|
This assembles the tool context . Private members are preceded by a _ .
|
6,775
|
def get_command ( arguments ) : return [ k for k , v in arguments . items ( ) if not k . startswith ( '-' ) and v is True ] [ 0 ]
|
Extract the first argument from arguments parsed by docopt .
|
6,776
|
def check_gcdt_update ( ) : try : inst_version , latest_version = get_package_versions ( 'gcdt' ) if inst_version < latest_version : log . warn ( 'Please consider an update to gcdt version: %s' % latest_version ) except GracefulExit : raise except Exception : log . warn ( 'PyPi appears to be down - we currently can\'t check for newer gcdt versions' )
|
Check whether a newer gcdt is available and output a warning .
|
6,777
|
def dict_selective_merge ( a , b , selection , path = None ) : if path is None : path = [ ] for key in b : if key in selection : if key in a : if isinstance ( a [ key ] , dict ) and isinstance ( b [ key ] , dict ) : dict_selective_merge ( a [ key ] , b [ key ] , b [ key ] . keys ( ) , path + [ str ( key ) ] ) elif a [ key ] != b [ key ] : a [ key ] = b [ key ] else : a [ key ] = b [ key ] return a
|
Conditionally merges b into a if b s keys are contained in selection
|
6,778
|
def are_credentials_still_valid ( awsclient ) : client = awsclient . get_client ( 'lambda' ) try : client . list_functions ( ) except GracefulExit : raise except Exception as e : log . debug ( e ) log . error ( e ) return 1 return 0
|
Check whether the credentials have expired .
|
6,779
|
def flatten ( lis ) : new_lis = [ ] for item in lis : if isinstance ( item , collections . Sequence ) and not isinstance ( item , basestring ) : new_lis . extend ( flatten ( item ) ) else : new_lis . append ( item ) return new_lis
|
Given a list possibly nested to any level return it flattened .
|
6,780
|
def random_string ( length = 6 ) : return '' . join ( [ random . choice ( string . ascii_lowercase ) for i in range ( length ) ] )
|
Create a random 6 character string .
|
6,781
|
def load_template ( ) : cloudformation , found = load_cloudformation_template ( ) if not found : print ( colored . red ( 'could not load cloudformation.py, bailing out...' ) ) sys . exit ( 1 ) return cloudformation
|
Bail out if template is not found .
|
6,782
|
def create_argument_parser ( ) : parser = argparse . ArgumentParser ( prog = 'haas' ) parser . add_argument ( '--version' , action = 'version' , version = '%(prog)s {0}' . format ( haas . __version__ ) ) verbosity = parser . add_mutually_exclusive_group ( ) verbosity . add_argument ( '-v' , '--verbose' , action = 'store_const' , default = 1 , dest = 'verbosity' , const = 2 , help = 'Verbose output' ) verbosity . add_argument ( '-q' , '--quiet' , action = 'store_const' , const = 0 , dest = 'verbosity' , help = 'Quiet output' ) parser . add_argument ( '-f' , '--failfast' , action = 'store_true' , default = False , help = 'Stop on first fail or error' ) parser . add_argument ( '-c' , '--catch' , dest = 'catch_interrupt' , action = 'store_true' , default = False , help = ( '(Ignored) Catch ctrl-C and display results so ' 'far' ) ) parser . add_argument ( '-b' , '--buffer' , action = 'store_true' , default = False , help = 'Buffer stdout and stderr during tests' ) parser . add_argument ( 'start' , nargs = '*' , default = [ os . getcwd ( ) ] , help = ( 'One or more directories or dotted package/module names from ' 'which to start searching for tests' ) ) parser . add_argument ( '-p' , '--pattern' , default = 'test*.py' , help = "Pattern to match tests ('test*.py' default)" ) parser . add_argument ( '-t' , '--top-level-directory' , default = None , help = ( 'Top level directory of project (defaults to ' 'start directory)' ) ) _add_log_level_option ( parser ) return parser
|
Creates the argument parser for haas .
|
6,783
|
def run ( self , plugin_manager = None ) : if plugin_manager is None : plugin_manager = PluginManager ( ) plugin_manager . add_plugin_arguments ( self . parser ) args = self . parser . parse_args ( self . argv [ 1 : ] ) environment_plugins = plugin_manager . get_enabled_hook_plugins ( plugin_manager . ENVIRONMENT_HOOK , args ) runner = plugin_manager . get_driver ( plugin_manager . TEST_RUNNER , args ) with PluginContext ( environment_plugins ) : loader = Loader ( ) discoverer = plugin_manager . get_driver ( plugin_manager . TEST_DISCOVERY , args , loader = loader ) suites = [ discoverer . discover ( start = start , top_level_directory = args . top_level_directory , pattern = args . pattern , ) for start in args . start ] if len ( suites ) == 1 : suite = suites [ 0 ] else : suite = loader . create_suite ( suites ) test_count = suite . countTestCases ( ) result_handlers = plugin_manager . get_enabled_hook_plugins ( plugin_manager . RESULT_HANDLERS , args , test_count = test_count ) result_collector = ResultCollector ( buffer = args . buffer , failfast = args . failfast ) for result_handler in result_handlers : result_collector . add_result_handler ( result_handler ) result = runner . run ( result_collector , suite ) return not result . wasSuccessful ( )
|
Run the haas test runner .
|
6,784
|
def cache_request_user ( user_cls , request , user_id ) : pk_field = user_cls . pk_field ( ) user = getattr ( request , '_user' , None ) if user is None or getattr ( user , pk_field , None ) != user_id : request . _user = user_cls . get_item ( ** { pk_field : user_id } )
|
Helper function to cache currently logged in user .
|
6,785
|
def get_authuser_by_userid ( cls , request ) : userid = authenticated_userid ( request ) if userid : cache_request_user ( cls , request , userid ) return request . _user
|
Get user by ID .
|
6,786
|
def get_authuser_by_name ( cls , request ) : username = authenticated_userid ( request ) if username : return cls . get_item ( username = username )
|
Get user by username
|
6,787
|
def includeme ( config ) : from . models import ( AuthUserMixin , random_uuid , lower_strip , encrypt_password , ) add_proc = config . add_field_processors add_proc ( [ random_uuid , lower_strip ] , model = AuthUserMixin , field = 'username' ) add_proc ( [ lower_strip ] , model = AuthUserMixin , field = 'email' ) add_proc ( [ encrypt_password ] , model = AuthUserMixin , field = 'password' )
|
Set up event subscribers .
|
6,788
|
def _make_request ( self , method , params ) : if self . blocked_until is not None and datetime . datetime . utcnow ( ) < self . blocked_until : raise SlackError ( "Too many requests - wait until {0}" . format ( self . blocked_until ) ) url = "%s/%s" % ( SlackClient . BASE_URL , method ) params [ 'token' ] = self . token response = requests . post ( url , data = params , verify = self . verify ) if response . status_code == 429 : retry_after = int ( response . headers . get ( 'retry-after' , '1' ) ) self . blocked_until = datetime . datetime . utcnow ( ) + datetime . timedelta ( seconds = retry_after ) raise SlackError ( "Too many requests - retry after {0} second(s)" . format ( retry_after ) ) result = response . json ( ) if not result [ 'ok' ] : raise SlackError ( result [ 'error' ] ) return result
|
Make request to API endpoint
|
6,789
|
def channels_list ( self , exclude_archived = True , ** params ) : method = 'channels.list' params . update ( { 'exclude_archived' : exclude_archived and 1 or 0 } ) return self . _make_request ( method , params )
|
channels . list
|
6,790
|
def channel_name_to_id ( self , channel_name , force_lookup = False ) : if force_lookup or not self . channel_name_id_map : channels = self . channels_list ( ) [ 'channels' ] self . channel_name_id_map = { channel [ 'name' ] : channel [ 'id' ] for channel in channels } channel = channel_name . startswith ( '#' ) and channel_name [ 1 : ] or channel_name return self . channel_name_id_map . get ( channel )
|
Helper name for getting a channel s id from its name
|
6,791
|
def chat_post_message ( self , channel , text , ** params ) : method = 'chat.postMessage' params . update ( { 'channel' : channel , 'text' : text , } ) return self . _make_request ( method , params )
|
chat . postMessage
|
6,792
|
def chat_update_message ( self , channel , text , timestamp , ** params ) : method = 'chat.update' if self . _channel_is_name ( channel ) : channel = self . channel_name_to_id ( channel ) params . update ( { 'channel' : channel , 'text' : text , 'ts' : timestamp , } ) return self . _make_request ( method , params )
|
chat . update
|
6,793
|
def includeme ( config ) : root = config . get_root_resource ( ) root . add ( 'nef_polymorphic' , '{collections:.+,.+}' , view = PolymorphicESView , factory = PolymorphicACL )
|
Connect view to route that catches all URIs like something something ...
|
6,794
|
def get_collections ( self ) : collections = self . request . matchdict [ 'collections' ] . split ( '/' ) [ 0 ] collections = [ coll . strip ( ) for coll in collections . split ( ',' ) ] return set ( collections )
|
Get names of collections from request matchdict .
|
6,795
|
def _get_least_permissions_aces ( self , resources ) : factories = [ res . view . _factory for res in resources ] contexts = [ factory ( self . request ) for factory in factories ] for ctx in contexts : if not self . request . has_permission ( 'view' , ctx ) : return else : return [ ( Allow , principal , 'view' ) for principal in self . request . effective_principals ]
|
Get ACEs with the least permissions that fit all resources .
|
6,796
|
def set_collections_acl ( self ) : acl = [ ( Allow , 'g:admin' , ALL_PERMISSIONS ) ] collections = self . get_collections ( ) resources = self . get_resources ( collections ) aces = self . _get_least_permissions_aces ( resources ) if aces is not None : for ace in aces : acl . append ( ace ) acl . append ( DENY_ALL ) self . __acl__ = tuple ( acl )
|
Calculate and set ACL valid for requested collections .
|
6,797
|
def determine_types ( self ) : from nefertari . elasticsearch import ES collections = self . get_collections ( ) resources = self . get_resources ( collections ) models = set ( [ res . view . Model for res in resources ] ) es_models = [ mdl for mdl in models if mdl and getattr ( mdl , '_index_enabled' , False ) ] types = [ ES . src2type ( mdl . __name__ ) for mdl in es_models ] return types
|
Determine ES type names from request data .
|
6,798
|
def get_command ( domain_name , command_name ) : def send_command ( self , ** kwargs ) : return self . ws . send_message ( '{0}.{1}' . format ( domain_name , command_name ) , kwargs ) return send_command
|
Returns a closure function that dispatches message to the WebSocket .
|
6,799
|
def DomainFactory ( domain_name , cmds ) : klass = type ( str ( domain_name ) , ( BaseDomain , ) , { } ) for c in cmds : command = get_command ( domain_name , c [ 'name' ] ) setattr ( klass , c [ 'name' ] , classmethod ( command ) ) return klass
|
Dynamically create Domain class and set it s methods .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.