idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
|---|---|---|
6,600
|
def drape ( raster , feature ) : coords = feature [ 'geometry' ] [ 'coordinates' ] geom_type = feature [ 'geometry' ] [ 'type' ] if geom_type == 'Point' : xyz = sample ( raster , [ coords ] ) result = Point ( xyz [ 0 ] ) elif geom_type == 'LineString' : xyz = sample ( raster , coords ) points = [ Point ( x , y , z ) for x , y , z in xyz ] result = LineString ( points ) else : logging . error ( 'drape not implemented for {}' . format ( geom_type ) ) return result
|
Convert a 2D feature to a 3D feature by sampling a raster
|
6,601
|
def sample ( raster , coords ) : if len ( coords [ 0 ] ) == 3 : logging . info ( 'Input is a 3D geometry, z coordinate will be updated.' ) z = raster . sample ( [ ( x , y ) for x , y , z in coords ] , indexes = raster . indexes ) else : z = raster . sample ( coords , indexes = raster . indexes ) result = [ ( vert [ 0 ] , vert [ 1 ] , vert_z ) for vert , vert_z in zip ( coords , z ) ] return result
|
Sample a raster at given coordinates
|
6,602
|
def cli ( source_f , raster_f , output , verbose ) : with fiona . open ( source_f , 'r' ) as source : source_driver = source . driver source_crs = source . crs sink_schema = source . schema . copy ( ) source_geom = source . schema [ 'geometry' ] if source_geom == 'Point' : sink_schema [ 'geometry' ] = '3D Point' elif source_geom == 'LineString' : sink_schema [ 'geometry' ] = '3D LineString' elif source_geom == '3D Point' or source_geom == '3D LineString' : pass else : click . BadParameter ( "Source geometry type {} not implemented" . format ( source_geom ) ) with rasterio . open ( raster_f ) as raster : if source_crs != raster . crs : click . BadParameter ( "Features and raster have different CRS." ) if raster . count > 1 : warnings . warn ( "Found {0} bands in {1}, expected a single band raster" . format ( raster . bands , raster_f ) ) supported = [ 'int16' , 'int32' , 'float32' , 'float64' ] if raster . dtypes [ 0 ] not in supported : warnings . warn ( "Found {0} type in {1}, expected one of {2}" . format ( raster . dtypes [ 0 ] ) , raster_f , supported ) with fiona . open ( output , 'w' , driver = source_driver , crs = source_crs , schema = sink_schema ) as sink : for feature in source : try : feature_z = drapery . drape ( raster , feature ) sink . write ( { 'geometry' : mapping ( feature_z ) , 'properties' : feature [ 'properties' ] , } ) except Exception : logging . exception ( "Error processing feature %s:" , feature [ 'id' ] )
|
Converts 2D geometries to 3D using GEOS sample through fiona .
|
6,603
|
def eval ( self , command ) : 'Blocking call, returns the value of the execution in JS' event = threading . Event ( ) import random job_id = str ( random . random ( ) ) server . EVALUATIONS [ job_id ] = event message = '?' + job_id + '=' + command logging . info ( ( 'message:' , [ message ] ) ) for listener in server . LISTENERS . get ( self . path , [ ] ) : logging . debug ( ( 'listener:' , listener ) ) listener . write_message ( message ) success = event . wait ( timeout = 30 ) if success : value_parser = server . RESULTS [ job_id ] del server . EVALUATIONS [ job_id ] del server . RESULTS [ job_id ] return value_parser ( ) else : del server . EVALUATIONS [ job_id ] if job_id in server . RESULTS : del server . RESULTS [ job_id ] raise IOError ( 'Evaluation failed.' )
|
Blocking call returns the value of the execution in JS
|
6,604
|
def launch_exception ( message ) : error_name = message [ 'name' ] error_descr = message [ 'description' ] mapping = { 'ReferenceError' : NameError , } if message [ 'name' ] in mapping : raise mapping [ error_name ] ( error_descr ) else : raise Exception ( '{}: {}' . format ( error_name , error_descr ) )
|
Launch a Python exception from an error that took place in the browser .
|
6,605
|
def unflatten_dct ( obj ) : def reduce_func ( accum , key_string_and_value ) : key_string = key_string_and_value [ 0 ] value = key_string_and_value [ 1 ] item_key_path = key_string_to_lens_path ( key_string ) container_key_path = init ( item_key_path ) container = unless ( both ( always ( length ( container_key_path ) ) , fake_lens_path_view ( container_key_path ) ) , lambda x : default_to ( if_else ( lambda segment : segment . isnumeric ( ) , always ( [ ] ) , always ( { } ) ) ( head ( item_key_path ) ) ) ( x ) ) ( accum ) return fake_lens_path_set ( item_key_path , value , container ) return compose ( reduce ( reduce_func , None ) , to_pairs ) ( obj )
|
Undoes the work of flatten_dict
|
6,606
|
def change_view ( self , request , object_id , form_url = '' , extra_context = None ) : context = { 'has_moderate_tool' : True } if extra_context : context . update ( extra_context ) return super ( AdminModeratorMixin , self ) . change_view ( request = request , object_id = object_id , form_url = form_url , extra_context = context )
|
Override change view to add extra context enabling moderate tool .
|
6,607
|
def get_urls ( self ) : from django . conf . urls import url urls = super ( AdminModeratorMixin , self ) . get_urls ( ) info = self . model . _meta . app_label , self . model . _meta . model_name return [ url ( r'^(.+)/moderate/$' , self . admin_site . admin_view ( self . moderate_view ) , name = '%s_%s_moderate' % info ) , ] + urls
|
Add aditional moderate url .
|
6,608
|
def operating_system ( ) : if platform . system ( ) == 'Darwin' : return 'OS X Version %s' % platform . mac_ver ( ) [ 0 ] distribution = ' ' . join ( platform . linux_distribution ( ) ) . strip ( ) os_platform = platform . platform ( True , True ) if distribution : os_platform += ' (%s)' % distribution return os_platform
|
Return a string identifying the operating system the application is running on .
|
6,609
|
def start ( self ) : if self . _is_already_running ( ) : LOGGER . error ( 'Is already running' ) sys . exit ( 1 ) try : self . _daemonize ( ) self . controller . start ( ) except Exception as error : sys . stderr . write ( '\nERROR: Startup of %s Failed\n.' % sys . argv [ 0 ] . split ( '/' ) [ - 1 ] ) exception_log = self . _get_exception_log_path ( ) if exception_log : with open ( exception_log , 'a' ) as handle : timestamp = datetime . datetime . now ( ) . isoformat ( ) handle . write ( '{:->80}\n' . format ( ' [START]' ) ) handle . write ( '%s Exception [%s]\n' % ( sys . argv [ 0 ] , timestamp ) ) handle . write ( '{:->80}\n' . format ( ' [INFO]' ) ) handle . write ( 'Interpreter: %s\n' % sys . executable ) handle . write ( 'CLI arguments: %s\n' % ' ' . join ( sys . argv ) ) handle . write ( 'Exception: %s\n' % error ) handle . write ( 'Traceback:\n' ) output = traceback . format_exception ( * sys . exc_info ( ) ) _dev_null = [ ( handle . write ( line ) , sys . stdout . write ( line ) ) for line in output ] handle . write ( '{:->80}\n' . format ( ' [END]' ) ) handle . flush ( ) sys . stderr . write ( '\nException log: %s\n\n' % exception_log ) sys . exit ( 1 )
|
Daemonize if the process is not already running .
|
6,610
|
def gid ( self ) : if not self . _gid : if self . controller . config . daemon . group : self . _gid = grp . getgrnam ( self . config . daemon . group ) . gr_gid else : self . _gid = os . getgid ( ) return self . _gid
|
Return the group id that the daemon will run with
|
6,611
|
def uid ( self ) : if not self . _uid : if self . config . daemon . user : self . _uid = pwd . getpwnam ( self . config . daemon . user ) . pw_uid else : self . _uid = os . getuid ( ) return self . _uid
|
Return the user id that the process will run as
|
6,612
|
def _get_exception_log_path ( ) : app = sys . argv [ 0 ] . split ( '/' ) [ - 1 ] for exception_log in [ '/var/log/%s.errors' % app , '/var/tmp/%s.errors' % app , '/tmp/%s.errors' % app ] : if os . access ( path . dirname ( exception_log ) , os . W_OK ) : return exception_log return None
|
Return the normalized path for the connection log raising an exception if it can not written to .
|
6,613
|
def _get_pidfile_path ( self ) : if self . config . daemon . pidfile : pidfile = path . abspath ( self . config . daemon . pidfile ) if not os . access ( path . dirname ( pidfile ) , os . W_OK ) : raise ValueError ( 'Cannot write to specified pid file path' ' %s' % pidfile ) return pidfile app = sys . argv [ 0 ] . split ( '/' ) [ - 1 ] for pidfile in [ '%s/pids/%s.pid' % ( os . getcwd ( ) , app ) , '/var/run/%s.pid' % app , '/var/run/%s/%s.pid' % ( app , app ) , '/var/tmp/%s.pid' % app , '/tmp/%s.pid' % app , '%s.pid' % app ] : if os . access ( path . dirname ( pidfile ) , os . W_OK ) : return pidfile raise OSError ( 'Could not find an appropriate place for a pid file' )
|
Return the normalized path for the pidfile raising an exception if it can not written to .
|
6,614
|
def _is_already_running ( self ) : pidfile = self . _get_pidfile_path ( ) if os . path . exists ( pidfile ) : pid = open ( pidfile ) . read ( ) . strip ( ) try : os . kill ( int ( pid ) , 0 ) sys . stderr . write ( 'Process already running as pid # %s\n' % pid ) return True except OSError as error : LOGGER . debug ( 'Found pidfile, no process # %s' , error ) os . unlink ( pidfile ) pattern = ' ' . join ( sys . argv ) pattern = '[%s]%s' % ( pattern [ 0 ] , pattern [ 1 : ] ) try : output = subprocess . check_output ( 'ps a | grep "%s"' % pattern , shell = True ) except AttributeError : stdin , stdout , stderr = os . popen3 ( 'ps a | grep "%s"' % pattern ) output = stdout . read ( ) except subprocess . CalledProcessError : return False pids = [ int ( pid ) for pid in ( re . findall ( r'^([0-9]+)\s' , output . decode ( 'latin-1' ) ) ) ] if os . getpid ( ) in pids : pids . remove ( os . getpid ( ) ) if not pids : return False if len ( pids ) == 1 : pids = pids [ 0 ] sys . stderr . write ( 'Process already running as pid # %s\n' % pids ) return True
|
Check to see if the process is running first looking for a pidfile then shelling out in either case removing a pidfile if it exists but the process is not running .
|
6,615
|
def _remove_pidfile ( self ) : LOGGER . debug ( 'Removing pidfile: %s' , self . pidfile_path ) try : os . unlink ( self . pidfile_path ) except OSError : pass
|
Remove the pid file from the filesystem
|
6,616
|
def _write_pidfile ( self ) : LOGGER . debug ( 'Writing pidfile: %s' , self . pidfile_path ) with open ( self . pidfile_path , "w" ) as handle : handle . write ( str ( os . getpid ( ) ) )
|
Write the pid file out with the process number in the pid file
|
6,617
|
def to_camel_case ( snake_case_string ) : parts = snake_case_string . lstrip ( '_' ) . split ( '_' ) return parts [ 0 ] + '' . join ( [ i . title ( ) for i in parts [ 1 : ] ] )
|
Convert a string from snake case to camel case . For example some_var would become someVar .
|
6,618
|
def to_capitalized_camel_case ( snake_case_string ) : parts = snake_case_string . split ( '_' ) return '' . join ( [ i . title ( ) for i in parts ] )
|
Convert a string from snake case to camel case with the first letter capitalized . For example some_var would become SomeVar .
|
6,619
|
def to_snake_case ( camel_case_string ) : first_pass = _first_camel_case_regex . sub ( r'\1_\2' , camel_case_string ) return _second_camel_case_regex . sub ( r'\1_\2' , first_pass ) . lower ( )
|
Convert a string from camel case to snake case . From example someVar would become some_var .
|
6,620
|
def keys_to_snake_case ( camel_case_dict ) : return dict ( ( to_snake_case ( key ) , value ) for ( key , value ) in camel_case_dict . items ( ) )
|
Make a copy of a dictionary with all keys converted to snake case . This is just calls to_snake_case on each of the keys in the dictionary and returns a new dictionary .
|
6,621
|
def list_functions ( awsclient ) : client_lambda = awsclient . get_client ( 'lambda' ) response = client_lambda . list_functions ( ) for function in response [ 'Functions' ] : log . info ( function [ 'FunctionName' ] ) log . info ( '\t' 'Memory: ' + str ( function [ 'MemorySize' ] ) ) log . info ( '\t' 'Timeout: ' + str ( function [ 'Timeout' ] ) ) log . info ( '\t' 'Role: ' + str ( function [ 'Role' ] ) ) log . info ( '\t' 'Current Version: ' + str ( function [ 'Version' ] ) ) log . info ( '\t' 'Last Modified: ' + str ( function [ 'LastModified' ] ) ) log . info ( '\t' 'CodeSha256: ' + str ( function [ 'CodeSha256' ] ) ) log . info ( '\n' ) return 0
|
List the deployed lambda functions and print configuration .
|
6,622
|
def deploy_lambda ( awsclient , function_name , role , handler_filename , handler_function , folders , description , timeout , memory , subnet_ids = None , security_groups = None , artifact_bucket = None , zipfile = None , fail_deployment_on_unsuccessful_ping = False , runtime = 'python2.7' , settings = None , environment = None , retention_in_days = None ) : if lambda_exists ( awsclient , function_name ) : function_version = _update_lambda ( awsclient , function_name , handler_filename , handler_function , folders , role , description , timeout , memory , subnet_ids , security_groups , artifact_bucket = artifact_bucket , zipfile = zipfile , environment = environment ) else : if not zipfile : return 1 log . info ( 'buffer size: %0.2f MB' % float ( len ( zipfile ) / 1000000.0 ) ) function_version = _create_lambda ( awsclient , function_name , role , handler_filename , handler_function , folders , description , timeout , memory , subnet_ids , security_groups , artifact_bucket , zipfile , runtime = runtime , environment = environment ) if retention_in_days : log_group_name = '/aws/lambda/%s' % function_name put_retention_policy ( awsclient , log_group_name , retention_in_days ) pong = ping ( awsclient , function_name , version = function_version ) if 'alive' in str ( pong ) : log . info ( colored . green ( 'Great you\'re already accepting a ping ' + 'in your Lambda function' ) ) elif fail_deployment_on_unsuccessful_ping and not 'alive' in pong : log . info ( colored . red ( 'Pinging your lambda function failed' ) ) return 1 else : log . info ( colored . red ( 'Please consider adding a reaction to a ' + 'ping event to your lambda function' ) ) _deploy_alias ( awsclient , function_name , function_version ) return 0
|
Create or update a lambda function .
|
6,623
|
def bundle_lambda ( zipfile ) : if not zipfile : return 1 with open ( 'bundle.zip' , 'wb' ) as zfile : zfile . write ( zipfile ) log . info ( 'Finished - a bundle.zip is waiting for you...' ) return 0
|
Write zipfile contents to file .
|
6,624
|
def get_metrics ( awsclient , name ) : metrics = [ 'Duration' , 'Errors' , 'Invocations' , 'Throttles' ] client_cw = awsclient . get_client ( 'cloudwatch' ) for metric in metrics : response = client_cw . get_metric_statistics ( Namespace = 'AWS/Lambda' , MetricName = metric , Dimensions = [ { 'Name' : 'FunctionName' , 'Value' : name } , ] , StartTime = maya . now ( ) . subtract ( days = 1 ) . datetime ( ) , EndTime = maya . now ( ) . datetime ( ) , Period = 3600 , Statistics = [ 'Sum' , ] , Unit = unit ( metric ) ) log . info ( '\t%s %s' % ( metric , repr ( aggregate_datapoints ( response [ 'Datapoints' ] ) ) ) ) return 0
|
Print out cloudformation metrics for a lambda function .
|
6,625
|
def rollback ( awsclient , function_name , alias_name = ALIAS_NAME , version = None ) : if version : log . info ( 'rolling back to version {}' . format ( version ) ) else : log . info ( 'rolling back to previous version' ) version = _get_previous_version ( awsclient , function_name , alias_name ) if version == '0' : log . error ( 'unable to find previous version of lambda function' ) return 1 log . info ( 'new version is %s' % str ( version ) ) _update_alias ( awsclient , function_name , version , alias_name ) return 0
|
Rollback a lambda function to a given version .
|
6,626
|
def delete_lambda ( awsclient , function_name , events = None , delete_logs = False ) : if events is not None : unwire ( awsclient , events , function_name , alias_name = ALIAS_NAME ) client_lambda = awsclient . get_client ( 'lambda' ) response = client_lambda . delete_function ( FunctionName = function_name ) if delete_logs : log_group_name = '/aws/lambda/%s' % function_name delete_log_group ( awsclient , log_group_name ) log . info ( json2table ( response ) ) return 0
|
Delete a lambda function .
|
6,627
|
def _stop_ec2_instances ( awsclient , ec2_instances , wait = True ) : if len ( ec2_instances ) == 0 : return client_ec2 = awsclient . get_client ( 'ec2' ) running_instances = all_pages ( client_ec2 . describe_instance_status , { 'InstanceIds' : ec2_instances , 'Filters' : [ { 'Name' : 'instance-state-name' , 'Values' : [ 'pending' , 'running' ] } ] } , lambda r : [ i [ 'InstanceId' ] for i in r . get ( 'InstanceStatuses' , [ ] ) ] , ) if running_instances : log . info ( 'Stopping EC2 instances: %s' , running_instances ) client_ec2 . stop_instances ( InstanceIds = running_instances ) if wait : waiter_inst_stopped = client_ec2 . get_waiter ( 'instance_stopped' ) waiter_inst_stopped . wait ( InstanceIds = running_instances )
|
Helper to stop ec2 instances . By default it waits for instances to stop .
|
6,628
|
def _start_ec2_instances ( awsclient , ec2_instances , wait = True ) : if len ( ec2_instances ) == 0 : return client_ec2 = awsclient . get_client ( 'ec2' ) stopped_instances = all_pages ( client_ec2 . describe_instance_status , { 'InstanceIds' : ec2_instances , 'Filters' : [ { 'Name' : 'instance-state-name' , 'Values' : [ 'stopping' , 'stopped' ] } ] , 'IncludeAllInstances' : True } , lambda r : [ i [ 'InstanceId' ] for i in r . get ( 'InstanceStatuses' , [ ] ) ] , ) if stopped_instances : log . info ( 'Starting EC2 instances: %s' , stopped_instances ) client_ec2 . start_instances ( InstanceIds = stopped_instances ) if wait : waiter_inst_running = client_ec2 . get_waiter ( 'instance_running' ) waiter_inst_running . wait ( InstanceIds = stopped_instances ) waiter_status_ok = client_ec2 . get_waiter ( 'instance_status_ok' ) waiter_status_ok . wait ( InstanceIds = stopped_instances )
|
Helper to start ec2 instances
|
6,629
|
def _filter_db_instances_by_status ( awsclient , db_instances , status_list ) : client_rds = awsclient . get_client ( 'rds' ) db_instances_with_status = [ ] for db in db_instances : response = client_rds . describe_db_instances ( DBInstanceIdentifier = db ) for entry in response . get ( 'DBInstances' , [ ] ) : if entry [ 'DBInstanceStatus' ] in status_list : db_instances_with_status . append ( db ) return db_instances_with_status
|
helper to select dbinstances .
|
6,630
|
def stop_stack ( awsclient , stack_name , use_suspend = False ) : exit_code = 0 if not stack_exists ( awsclient , stack_name ) : log . warn ( 'Stack \'%s\' not deployed - nothing to do!' , stack_name ) else : client_cfn = awsclient . get_client ( 'cloudformation' ) client_autoscaling = awsclient . get_client ( 'autoscaling' ) client_rds = awsclient . get_client ( 'rds' ) client_ec2 = awsclient . get_client ( 'ec2' ) resources = all_pages ( client_cfn . list_stack_resources , { 'StackName' : stack_name } , lambda r : r [ 'StackResourceSummaries' ] ) autoscaling_groups = [ r for r in resources if r [ 'ResourceType' ] == 'AWS::AutoScaling::AutoScalingGroup' ] response = client_autoscaling . describe_scaling_process_types ( ) scaling_process_types = [ t [ 'ProcessName' ] for t in response . get ( 'Processes' , [ ] ) ] for asg in autoscaling_groups : ec2_instances = all_pages ( client_autoscaling . describe_auto_scaling_instances , { } , lambda r : [ i [ 'InstanceId' ] for i in r . get ( 'AutoScalingInstances' , [ ] ) if i [ 'AutoScalingGroupName' ] == asg [ 'PhysicalResourceId' ] ] , ) if use_suspend : log . info ( 'Suspending all autoscaling processes for \'%s\'' , asg [ 'LogicalResourceId' ] ) response = client_autoscaling . suspend_processes ( AutoScalingGroupName = asg [ 'PhysicalResourceId' ] , ScalingProcesses = scaling_process_types ) _stop_ec2_instances ( awsclient , ec2_instances ) else : log . info ( 'Resize autoscaling group \'%s\' to minSize=0, maxSize=0' , asg [ 'LogicalResourceId' ] ) response = client_autoscaling . update_auto_scaling_group ( AutoScalingGroupName = asg [ 'PhysicalResourceId' ] , MinSize = 0 , MaxSize = 0 ) if ec2_instances : running_instances = all_pages ( client_ec2 . describe_instance_status , { 'InstanceIds' : ec2_instances , 'Filters' : [ { 'Name' : 'instance-state-name' , 'Values' : [ 'pending' , 'running' ] } ] } , lambda r : [ i [ 'InstanceId' ] for i in r . get ( 'InstanceStatuses' , [ ] ) ] , ) if running_instances : waiter_inst_terminated = client_ec2 . get_waiter ( 'instance_terminated' ) waiter_inst_terminated . wait ( InstanceIds = running_instances ) services = [ r for r in resources if r [ 'ResourceType' ] == 'AWS::ECS::Service' ] if services : template , parameters = _get_template_parameters ( awsclient , stack_name ) _stop_ecs_services ( awsclient , services , template , parameters ) instances = [ r [ 'PhysicalResourceId' ] for r in resources if r [ 'ResourceType' ] == 'AWS::EC2::Instance' ] _stop_ec2_instances ( awsclient , instances ) db_instances = [ r [ 'PhysicalResourceId' ] for r in resources if r [ 'ResourceType' ] == 'AWS::RDS::DBInstance' ] running_db_instances = _filter_db_instances_by_status ( awsclient , db_instances , [ 'available' ] ) for db in running_db_instances : log . info ( 'Stopping RDS instance \'%s\'' , db ) client_rds . stop_db_instance ( DBInstanceIdentifier = db ) return exit_code
|
Stop an existing stack on AWS cloud .
|
6,631
|
def _get_autoscaling_min_max ( template , parameters , asg_name ) : params = { e [ 'ParameterKey' ] : e [ 'ParameterValue' ] for e in parameters } asg = template . get ( 'Resources' , { } ) . get ( asg_name , None ) if asg : assert asg [ 'Type' ] == 'AWS::AutoScaling::AutoScalingGroup' min = asg . get ( 'Properties' , { } ) . get ( 'MinSize' , None ) max = asg . get ( 'Properties' , { } ) . get ( 'MaxSize' , None ) if 'Ref' in min : min = params . get ( min [ 'Ref' ] , None ) if 'Ref' in max : max = params . get ( max [ 'Ref' ] , None ) if min and max : return int ( min ) , int ( max )
|
Helper to extract the configured MinSize MaxSize attributes from the template .
|
6,632
|
def _get_service_cluster_desired_count ( template , parameters , service_name ) : params = { e [ 'ParameterKey' ] : e [ 'ParameterValue' ] for e in parameters } service = template . get ( 'Resources' , { } ) . get ( service_name , None ) if service : assert service [ 'Type' ] == 'AWS::ECS::Service' cluster = service . get ( 'Properties' , { } ) . get ( 'Cluster' , None ) desired_count = service . get ( 'Properties' , { } ) . get ( 'DesiredCount' , None ) if 'Ref' in cluster : cluster = params . get ( cluster [ 'Ref' ] , None ) if not isinstance ( desired_count , int ) and 'Ref' in desired_count : desired_count = params . get ( desired_count [ 'Ref' ] , None ) return cluster , int ( desired_count )
|
Helper to extract the configured desiredCount attribute from the template .
|
6,633
|
def start_stack ( awsclient , stack_name , use_suspend = False ) : exit_code = 0 if not stack_exists ( awsclient , stack_name ) : log . warn ( 'Stack \'%s\' not deployed - nothing to do!' , stack_name ) else : client_cfn = awsclient . get_client ( 'cloudformation' ) client_autoscaling = awsclient . get_client ( 'autoscaling' ) client_rds = awsclient . get_client ( 'rds' ) resources = all_pages ( client_cfn . list_stack_resources , { 'StackName' : stack_name } , lambda r : r [ 'StackResourceSummaries' ] ) autoscaling_groups = [ r for r in resources if r [ 'ResourceType' ] == 'AWS::AutoScaling::AutoScalingGroup' ] response = client_autoscaling . describe_scaling_process_types ( ) scaling_process_types = [ t [ 'ProcessName' ] for t in response . get ( 'Processes' , [ ] ) ] db_instances = [ r [ 'PhysicalResourceId' ] for r in resources if r [ 'ResourceType' ] == 'AWS::RDS::DBInstance' ] stopped_db_instances = _filter_db_instances_by_status ( awsclient , db_instances , [ 'stopped' ] ) for db in stopped_db_instances : log . info ( 'Starting RDS instance \'%s\'' , db ) client_rds . start_db_instance ( DBInstanceIdentifier = db ) for db in stopped_db_instances : waiter_db_available = client_rds . get_waiter ( 'db_instance_available' ) waiter_db_available . wait ( DBInstanceIdentifier = db ) instances = [ r [ 'PhysicalResourceId' ] for r in resources if r [ 'ResourceType' ] == 'AWS::EC2::Instance' ] _start_ec2_instances ( awsclient , instances ) services = [ r for r in resources if r [ 'ResourceType' ] == 'AWS::ECS::Service' ] if ( autoscaling_groups and not use_suspend ) or services : template , parameters = _get_template_parameters ( awsclient , stack_name ) if services : _start_ecs_services ( awsclient , services , template , parameters ) for asg in autoscaling_groups : if use_suspend : instances = all_pages ( client_autoscaling . describe_auto_scaling_instances , { } , lambda r : [ i [ 'InstanceId' ] for i in r . get ( 'AutoScalingInstances' , [ ] ) if i [ 'AutoScalingGroupName' ] == asg [ 'PhysicalResourceId' ] ] , ) _start_ec2_instances ( awsclient , instances ) log . info ( 'Resuming all autoscaling processes for \'%s\'' , asg [ 'LogicalResourceId' ] ) response = client_autoscaling . resume_processes ( AutoScalingGroupName = asg [ 'PhysicalResourceId' ] , ScalingProcesses = scaling_process_types ) else : log . info ( 'Resize autoscaling group \'%s\' back to original values' , asg [ 'LogicalResourceId' ] ) min , max = _get_autoscaling_min_max ( template , parameters , asg [ 'LogicalResourceId' ] ) response = client_autoscaling . update_auto_scaling_group ( AutoScalingGroupName = asg [ 'PhysicalResourceId' ] , MinSize = min , MaxSize = max ) return exit_code
|
Start an existing stack on AWS cloud .
|
6,634
|
def is_running ( self ) : return self . _state in [ self . STATE_ACTIVE , self . STATE_IDLE , self . STATE_INITIALIZING ]
|
Property method that returns a bool specifying if the process is currently running . This will return true if the state is active idle or initializing .
|
6,635
|
def process_signal ( self , signum ) : if signum == signal . SIGTERM : LOGGER . info ( 'Received SIGTERM, initiating shutdown' ) self . stop ( ) elif signum == signal . SIGHUP : LOGGER . info ( 'Received SIGHUP' ) if self . config . reload ( ) : LOGGER . info ( 'Configuration reloaded' ) logging . config . dictConfig ( self . config . logging ) self . on_configuration_reloaded ( ) elif signum == signal . SIGUSR1 : self . on_sigusr1 ( ) elif signum == signal . SIGUSR2 : self . on_sigusr2 ( )
|
Invoked whenever a signal is added to the stack .
|
6,636
|
def run ( self ) : LOGGER . info ( '%s v%s started' , self . APPNAME , self . VERSION ) self . setup ( ) while not any ( [ self . is_stopping , self . is_stopped ] ) : self . set_state ( self . STATE_SLEEPING ) try : signum = self . pending_signals . get ( True , self . wake_interval ) except queue . Empty : pass else : self . process_signal ( signum ) if any ( [ self . is_stopping , self . is_stopped ] ) : break self . set_state ( self . STATE_ACTIVE ) self . process ( )
|
The core method for starting the application . Will setup logging toggle the runtime state flag block on loop then call shutdown .
|
6,637
|
def stop ( self ) : LOGGER . info ( 'Attempting to stop the process' ) self . set_state ( self . STATE_STOP_REQUESTED ) self . shutdown ( ) while self . is_running and self . is_waiting_to_stop : LOGGER . info ( 'Waiting for the process to finish' ) time . sleep ( self . SLEEP_UNIT ) if not self . is_stopping : self . set_state ( self . STATE_STOPPING ) self . on_shutdown ( ) self . set_state ( self . STATE_STOPPED )
|
Override to implement shutdown steps .
|
6,638
|
def _add_default_arguments ( parser ) : parser . add_argument ( '-c' , '--config' , action = 'store' , dest = 'config' , help = 'Path to the configuration file' ) parser . add_argument ( '-f' , '--foreground' , action = 'store_true' , dest = 'foreground' , help = 'Run the application interactively' )
|
Add the default arguments to the parser .
|
6,639
|
def dump ( pif , fp , ** kwargs ) : return json . dump ( pif , fp , cls = PifEncoder , ** kwargs )
|
Convert a single Physical Information Object or a list of such objects into a JSON - encoded text file .
|
6,640
|
def load ( fp , class_ = None , ** kwargs ) : return loado ( json . load ( fp , ** kwargs ) , class_ = class_ )
|
Convert content in a JSON - encoded text file to a Physical Information Object or a list of such objects .
|
6,641
|
def loads ( s , class_ = None , ** kwargs ) : return loado ( json . loads ( s , ** kwargs ) , class_ = class_ )
|
Convert content in a JSON - encoded string to a Physical Information Object or a list of such objects .
|
6,642
|
def loado ( obj , class_ = None ) : if isinstance ( obj , list ) : return [ _dict_to_pio ( i , class_ = class_ ) for i in obj ] elif isinstance ( obj , dict ) : return _dict_to_pio ( obj , class_ = class_ ) else : raise ValueError ( 'expecting list or dictionary as outermost structure' )
|
Convert a dictionary or a list of dictionaries into a single Physical Information Object or a list of such objects .
|
6,643
|
def _dict_to_pio ( d , class_ = None ) : d = keys_to_snake_case ( d ) if class_ : return class_ ( ** d ) if 'category' not in d : raise ValueError ( 'Dictionary does not contains a category field: ' + ', ' . join ( d . keys ( ) ) ) elif d [ 'category' ] == 'system' : return System ( ** d ) elif d [ 'category' ] == 'system.chemical' : return ChemicalSystem ( ** d ) elif d [ 'category' ] == 'system.chemical.alloy' : return Alloy ( ** d ) elif d [ 'category' ] == 'system.chemical.alloy.phase' : return ChemicalSystem ( ** d ) raise ValueError ( 'Dictionary does not contain a valid top-level category: ' + str ( d [ 'category' ] ) )
|
Convert a single dictionary object to a Physical Information Object .
|
6,644
|
def get_command ( arguments ) : cmds = list ( filter ( lambda k : not ( k . startswith ( '-' ) or k . startswith ( '<' ) ) and arguments [ k ] , arguments . keys ( ) ) ) if len ( cmds ) != 1 : raise Exception ( 'invalid command line!' ) return cmds [ 0 ]
|
Utility function to extract command from docopt arguments .
|
6,645
|
def dispatch ( cls , arguments , ** kwargs ) : for spec , func in cls . _specs : args = [ ] options = list ( filter ( lambda k : k . startswith ( '-' ) and ( arguments [ k ] or k in spec ) , arguments . keys ( ) ) ) cmds = list ( filter ( lambda k : not ( k . startswith ( '-' ) or k . startswith ( '<' ) ) and arguments [ k ] , arguments . keys ( ) ) ) args_spec = list ( filter ( lambda k : k . startswith ( '<' ) , spec ) ) cmd_spec = list ( filter ( lambda k : not ( k . startswith ( '-' ) or k . startswith ( '<' ) ) , spec ) ) for element in spec : if element . startswith ( '-' ) : if element in options : args . append ( arguments . get ( element , False ) ) options . remove ( element ) elif element . startswith ( '<' ) and not arguments . get ( element ) is False : args . append ( arguments . get ( element ) ) if element in args_spec : args_spec . remove ( element ) else : if element in cmds and element in cmd_spec : cmds . remove ( element ) cmd_spec . remove ( element ) if options : continue if cmds : continue if args_spec : continue if cmd_spec : continue exit_code = func ( * args , ** kwargs ) return exit_code raise Exception ( 'No implementation for spec: %s' % arguments )
|
Dispatch arguments parsed by docopt to the cmd with matching spec .
|
6,646
|
def convert_representation ( self , i ) : if self . number_representation == 'unsigned' : return i elif self . number_representation == 'signed' : if i & ( 1 << self . interpreter . _bit_width - 1 ) : return - ( ( ~ i + 1 ) & ( 2 ** self . interpreter . _bit_width - 1 ) ) else : return i elif self . number_representation == 'hex' : return hex ( i )
|
Return the proper representation for the given integer
|
6,647
|
def magic_generate_random ( self , line ) : line = line . strip ( ) . lower ( ) if not line or line == 'true' : self . interpreter . generate_random = True elif line == 'false' : self . interpreter . generate_random = False else : stream_content = { 'name' : 'stderr' , 'text' : "unknwon value '{}'" . format ( line ) } self . send_response ( self . iopub_socket , 'stream' , stream_content ) return { 'status' : 'error' , 'execution_count' : self . execution_count , 'ename' : ValueError . __name__ , 'evalue' : "unknwon value '{}'" . format ( line ) , 'traceback' : '???' }
|
Set the generate random flag unset registers and memory will return a random value .
|
6,648
|
def magic_postpone_execution ( self , line ) : line = line . strip ( ) . lower ( ) if not line or line == 'true' : self . interpreter . postpone_execution = True elif line == 'false' : self . interpreter . postpone_execution = False else : stream_content = { 'name' : 'stderr' , 'text' : "unknwon value '{}'" . format ( line ) } self . send_response ( self . iopub_socket , 'stream' , stream_content ) return { 'status' : 'error' , 'execution_count' : self . execution_count , 'ename' : ValueError . __name__ , 'evalue' : "unknwon value '{}'" . format ( line ) , 'traceback' : '???' }
|
Postpone execution of instructions until explicitly run
|
6,649
|
def magic_register ( self , line ) : message = "" for reg in [ i . strip ( ) for i in line . replace ( ',' , '' ) . split ( ) ] : if '-' in reg : r1 , r2 = reg . split ( '-' ) n1 = re . search ( self . interpreter . REGISTER_REGEX , r1 ) . groups ( ) [ 0 ] n2 = re . search ( self . interpreter . REGISTER_REGEX , r2 ) . groups ( ) [ 0 ] n1 = self . interpreter . convert_to_integer ( n1 ) n2 = self . interpreter . convert_to_integer ( n2 ) for i in range ( n1 , n2 + 1 ) : val = self . interpreter . register [ r1 [ 0 ] + str ( i ) ] val = self . convert_representation ( val ) message += "{}: {}\n" . format ( r1 [ 0 ] + str ( i ) , val ) else : val = self . interpreter . register [ reg ] val = self . convert_representation ( val ) message += "{}: {}\n" . format ( reg , val ) stream_content = { 'name' : 'stdout' , 'text' : message } self . send_response ( self . iopub_socket , 'stream' , stream_content )
|
Print out the current value of a register
|
6,650
|
def magic_memory ( self , line ) : message = "" for address in [ i . strip ( ) for i in line . replace ( ',' , '' ) . split ( ) ] : if '-' in address : m1 , m2 = address . split ( '-' ) n1 = re . search ( self . interpreter . IMMEDIATE_NUMBER , m1 ) . groups ( ) [ 0 ] n2 = re . search ( self . interpreter . IMMEDIATE_NUMBER , m2 ) . groups ( ) [ 0 ] n1 = self . interpreter . convert_to_integer ( n1 ) n2 = self . interpreter . convert_to_integer ( n2 ) for i in range ( n1 , n2 + 1 ) : val = self . interpreter . memory [ i ] val = self . convert_representation ( val ) message += "{}: {}\n" . format ( str ( i ) , val ) else : val = self . interpreter . memory [ self . interpreter . convert_to_integer ( address ) ] val = self . convert_representation ( val ) message += "{}: {}\n" . format ( address , val ) stream_content = { 'name' : 'stdout' , 'text' : message } self . send_response ( self . iopub_socket , 'stream' , stream_content )
|
Print out the current value of memory
|
6,651
|
def magic_run ( self , line ) : i = float ( 'inf' ) if line . strip ( ) : i = int ( line ) try : with warnings . catch_warnings ( record = True ) as w : self . interpreter . run ( i ) for warning_message in w : stream_content = { 'name' : 'stdout' , 'text' : 'Warning: ' + str ( warning_message . message ) + '\n' } self . send_response ( self . iopub_socket , 'stream' , stream_content ) except iarm . exceptions . EndOfProgram as e : f_name = self . interpreter . program [ self . interpreter . register [ 'PC' ] - 1 ] . __name__ f_name = f_name [ : f_name . find ( '_' ) ] message = "Error in {}: " . format ( f_name ) stream_content = { 'name' : 'stdout' , 'text' : message + str ( e ) + '\n' } self . send_response ( self . iopub_socket , 'stream' , stream_content ) except Exception as e : for err in e . args : stream_content = { 'name' : 'stderr' , 'text' : str ( err ) } self . send_response ( self . iopub_socket , 'stream' , stream_content ) return { 'status' : 'error' , 'execution_count' : self . execution_count , 'ename' : type ( e ) . __name__ , 'evalue' : str ( e ) , 'traceback' : '???' }
|
Run the current program
|
6,652
|
def magic_help ( self , line ) : line = line . strip ( ) if not line : for magic in self . magics : stream_content = { 'name' : 'stdout' , 'text' : "%{}\n" . format ( magic ) } self . send_response ( self . iopub_socket , 'stream' , stream_content ) elif line in self . magics : stream_content = { 'name' : 'stdout' , 'text' : "{}\n{}" . format ( line , self . magics [ line ] . __doc__ ) } self . send_response ( self . iopub_socket , 'stream' , stream_content ) elif line in self . interpreter . ops : stream_content = { 'name' : 'stdout' , 'text' : "{}\n{}" . format ( line , self . interpreter . ops [ line ] . __doc__ ) } self . send_response ( self . iopub_socket , 'stream' , stream_content ) else : stream_content = { 'name' : 'stderr' , 'text' : "'{}' not a known magic or instruction" . format ( line ) } self . send_response ( self . iopub_socket , 'stream' , stream_content )
|
Print out the help for magics
|
6,653
|
def list_apis ( awsclient ) : client_api = awsclient . get_client ( 'apigateway' ) apis = client_api . get_rest_apis ( ) [ 'items' ] for api in apis : print ( json2table ( api ) )
|
List APIs in account .
|
6,654
|
def deploy_api ( awsclient , api_name , api_description , stage_name , api_key , lambdas , cache_cluster_enabled , cache_cluster_size , method_settings = None ) : if not _api_exists ( awsclient , api_name ) : if os . path . isfile ( SWAGGER_FILE ) : _import_from_swagger ( awsclient , api_name , api_description , stage_name , lambdas ) else : print ( 'No swagger file (%s) found' % SWAGGER_FILE ) api = _api_by_name ( awsclient , api_name ) if api is not None : _ensure_lambdas_permissions ( awsclient , lambdas , api ) _create_deployment ( awsclient , api_name , stage_name , cache_cluster_enabled , cache_cluster_size ) _update_stage ( awsclient , api [ 'id' ] , stage_name , method_settings ) _wire_api_key ( awsclient , api_name , api_key , stage_name ) else : print ( 'API name unknown' ) else : if os . path . isfile ( SWAGGER_FILE ) : _update_from_swagger ( awsclient , api_name , api_description , stage_name , lambdas ) else : _update_api ( ) api = _api_by_name ( awsclient , api_name ) if api is not None : _ensure_lambdas_permissions ( awsclient , lambdas , api ) _create_deployment ( awsclient , api_name , stage_name , cache_cluster_enabled , cache_cluster_size ) _update_stage ( awsclient , api [ 'id' ] , stage_name , method_settings ) else : print ( 'API name unknown' )
|
Deploy API Gateway to AWS cloud .
|
6,655
|
def delete_api ( awsclient , api_name ) : _sleep ( ) client_api = awsclient . get_client ( 'apigateway' ) print ( 'deleting api: %s' % api_name ) api = _api_by_name ( awsclient , api_name ) if api is not None : print ( json2table ( api ) ) response = client_api . delete_rest_api ( restApiId = api [ 'id' ] ) print ( json2table ( response ) ) else : print ( 'API name unknown' )
|
Delete the API .
|
6,656
|
def create_api_key ( awsclient , api_name , api_key_name ) : _sleep ( ) client_api = awsclient . get_client ( 'apigateway' ) print ( 'create api key: %s' % api_key_name ) response = client_api . create_api_key ( name = api_key_name , description = 'Created for ' + api_name , enabled = True ) print ( 'Add this api key \'%s\' to your api.conf' % response [ 'id' ] ) return response [ 'id' ]
|
Create a new API key as reference for api . conf .
|
6,657
|
def delete_api_key ( awsclient , api_key ) : _sleep ( ) client_api = awsclient . get_client ( 'apigateway' ) print ( 'delete api key: %s' % api_key ) response = client_api . delete_api_key ( apiKey = api_key ) print ( json2table ( response ) )
|
Remove API key .
|
6,658
|
def list_api_keys ( awsclient ) : _sleep ( ) client_api = awsclient . get_client ( 'apigateway' ) print ( 'listing api keys' ) response = client_api . get_api_keys ( ) [ 'items' ] for item in response : print ( json2table ( item ) )
|
Print the defined API keys .
|
6,659
|
def deploy_custom_domain ( awsclient , api_name , api_target_stage , api_base_path , domain_name , route_53_record , cert_name , cert_arn , hosted_zone_id , ensure_cname ) : api_base_path = _basepath_to_string_if_null ( api_base_path ) api = _api_by_name ( awsclient , api_name ) if not api : print ( "Api %s does not exist, aborting..." % api_name ) return 1 domain = _custom_domain_name_exists ( awsclient , domain_name ) if not domain : response = _create_custom_domain ( awsclient , domain_name , cert_name , cert_arn ) cloudfront_distribution = response [ 'distributionDomainName' ] else : response = _update_custom_domain ( awsclient , domain_name , cert_name , cert_arn ) cloudfront_distribution = response [ 'distributionDomainName' ] if _base_path_mapping_exists ( awsclient , domain_name , api_base_path ) : _ensure_correct_base_path_mapping ( awsclient , domain_name , api_base_path , api [ 'id' ] , api_target_stage ) else : _create_base_path_mapping ( awsclient , domain_name , api_base_path , api_target_stage , api [ 'id' ] ) if ensure_cname : record_exists , record_correct = _record_exists_and_correct ( awsclient , hosted_zone_id , route_53_record , cloudfront_distribution ) if record_correct : print ( 'Route53 record correctly set: %s % ( route_53_record , cloudfront_distribution ) ) else : _ensure_correct_route_53_record ( awsclient , hosted_zone_id , record_name = route_53_record , record_value = cloudfront_distribution ) print ( 'Route53 record set: %s % ( route_53_record , cloudfront_distribution ) ) else : print ( 'Skipping creating and checking DNS record' ) return 0
|
Add custom domain to your API .
|
6,660
|
def get_lambdas ( awsclient , config , add_arn = False ) : if 'lambda' in config : client_lambda = awsclient . get_client ( 'lambda' ) lambda_entries = config [ 'lambda' ] . get ( 'entries' , [ ] ) lmbdas = [ ] for lambda_entry in lambda_entries : lmbda = { 'name' : lambda_entry . get ( 'name' , None ) , 'alias' : lambda_entry . get ( 'alias' , None ) , 'swagger_ref' : lambda_entry . get ( 'swaggerRef' , None ) } if add_arn : _sleep ( ) response_lambda = client_lambda . get_function ( FunctionName = lmbda [ 'name' ] ) lmbda [ 'arn' ] = response_lambda [ 'Configuration' ] [ 'FunctionArn' ] lmbdas . append ( lmbda ) return lmbdas else : return [ ]
|
Get the list of lambda functions .
|
6,661
|
def _update_stage ( awsclient , api_id , stage_name , method_settings ) : client_api = awsclient . get_client ( 'apigateway' ) operations = _convert_method_settings_into_operations ( method_settings ) if operations : print ( 'update method settings for stage' ) _sleep ( ) response = client_api . update_stage ( restApiId = api_id , stageName = stage_name , patchOperations = operations )
|
Helper to apply method_settings to stage
|
6,662
|
def _convert_method_settings_into_operations ( method_settings = None ) : operations = [ ] if method_settings : for method in method_settings . keys ( ) : for key , value in method_settings [ method ] . items ( ) : if isinstance ( value , bool ) : if value : value = 'true' else : value = 'false' operations . append ( { 'op' : 'replace' , 'path' : method + _resolve_key ( key ) , 'value' : value } ) return operations
|
Helper to handle the conversion of method_settings to operations
|
6,663
|
def generate_settings ( ) : conf_file = os . path . join ( os . path . dirname ( base_settings . __file__ ) , 'example' , 'conf.py' ) conf_template = open ( conf_file ) . read ( ) default_url = 'http://salmon.example.com' site_url = raw_input ( "What will be the URL for Salmon? [{0}]" . format ( default_url ) ) site_url = site_url or default_url secret_key = base64 . b64encode ( os . urandom ( KEY_LENGTH ) ) api_key = base64 . b64encode ( os . urandom ( KEY_LENGTH ) ) output = conf_template . format ( api_key = api_key , secret_key = secret_key , site_url = site_url ) return output
|
This command is run when default_path doesn t exist or init is run and returns a string representing the default data to put into their settings file .
|
6,664
|
def configure_app ( ** kwargs ) : sys_args = sys . argv args , command , command_args = parse_args ( sys_args [ 1 : ] ) parser = OptionParser ( ) parser . add_option ( '--config' , metavar = 'CONFIG' ) ( options , logan_args ) = parser . parse_args ( args ) config_path = options . config logan_configure ( config_path = config_path , ** kwargs )
|
Builds up the settings using the same method as logan
|
6,665
|
def _reset_changes ( self ) : self . _original = { } if self . last_updated is not None : self . _original [ 'last_updated' ] = self . last_updated
|
Stores current values for comparison later
|
6,666
|
def whisper_filename ( self ) : source_name = self . source_id and self . source . name or '' return get_valid_filename ( "{0}__{1}.wsp" . format ( source_name , self . name ) )
|
Build a file path to the Whisper database
|
6,667
|
def get_value_display ( self ) : if self . display_as == 'percentage' : return '{0}%' . format ( self . latest_value ) if self . display_as == 'boolean' : return bool ( self . latest_value ) if self . display_as == 'byte' : return defaultfilters . filesizeformat ( self . latest_value ) if self . display_as == 'second' : return time . strftime ( '%H:%M:%S' , time . gmtime ( self . latest_value ) ) return self . latest_value
|
Human friendly value output
|
6,668
|
def time_between_updates ( self ) : if 'last_updated' not in self . _original : return 0 last_update = self . _original [ 'last_updated' ] this_update = self . last_updated return this_update - last_update
|
Time between current last_updated and previous last_updated
|
6,669
|
def do_counter_conversion ( self ) : if self . is_counter : if self . _previous_counter_value is None : prev_value = self . latest_value else : prev_value = self . _previous_counter_value self . _previous_counter_value = self . latest_value self . latest_value = self . latest_value - prev_value
|
Update latest value to the diff between it and the previous value
|
6,670
|
def replace_variable ( self , variable ) : if variable == 'x' : return self . value if variable == 't' : return self . timedelta raise ValueError ( "Invalid variable %s" , variable )
|
Substitute variables with numeric values
|
6,671
|
def result ( self ) : return self . eval_ ( ast . parse ( self . expr ) . body [ 0 ] . value )
|
Evaluate expression and return result
|
6,672
|
def email_login ( request , * , email , ** kwargs ) : _u , created = auth . get_user_model ( ) . _default_manager . get_or_create ( email = email ) user = auth . authenticate ( request , email = email ) if user and user . is_active : auth . login ( request , user ) return user , created return None , None
|
Given a request an email and optionally some additional data ensure that a user with the email address exists and authenticate & login them right away if the user is active .
|
6,673
|
def dashboard ( request ) : sources = ( models . Source . objects . all ( ) . prefetch_related ( 'metric_set' ) . order_by ( 'name' ) ) metrics = SortedDict ( [ ( src , src . metric_set . all ( ) ) for src in sources ] ) no_source_metrics = models . Metric . objects . filter ( source__isnull = True ) if no_source_metrics : metrics [ '' ] = no_source_metrics if request . META . get ( 'HTTP_X_PJAX' , False ) : parent_template = 'pjax.html' else : parent_template = 'base.html' return render ( request , 'metrics/dashboard.html' , { 'source_metrics' : metrics , 'parent_template' : parent_template } )
|
Shows the latest results for each source
|
6,674
|
def _create ( self ) : if not os . path . exists ( settings . SALMON_WHISPER_DB_PATH ) : os . makedirs ( settings . SALMON_WHISPER_DB_PATH ) archives = [ whisper . parseRetentionDef ( retentionDef ) for retentionDef in settings . ARCHIVES . split ( "," ) ] whisper . create ( self . path , archives , xFilesFactor = settings . XFILEFACTOR , aggregationMethod = settings . AGGREGATION_METHOD )
|
Create the Whisper file on disk
|
6,675
|
def _update ( self , datapoints ) : if len ( datapoints ) == 1 : timestamp , value = datapoints [ 0 ] whisper . update ( self . path , value , timestamp ) else : whisper . update_many ( self . path , datapoints )
|
This method store in the datapoints in the current database .
|
6,676
|
def fetch ( self , from_time , until_time = None ) : until_time = until_time or datetime . now ( ) time_info , values = whisper . fetch ( self . path , from_time . strftime ( '%s' ) , until_time . strftime ( '%s' ) ) start_time , end_time , step = time_info current = start_time times = [ ] while current <= end_time : times . append ( current ) current += step return zip ( times , values )
|
This method fetch data from the database according to the period given
|
6,677
|
def CMN ( self , params ) : Ra , Rb = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params ) self . check_arguments ( low_registers = ( Ra , Rb ) ) def CMN_func ( ) : self . set_NZCV_flags ( self . register [ Ra ] , self . register [ Rb ] , self . register [ Ra ] + self . register [ Rb ] , 'add' ) return CMN_func
|
CMN Ra Rb
|
6,678
|
def MULS ( self , params ) : Ra , Rb , Rc = self . get_three_parameters ( self . THREE_PARAMETER_COMMA_SEPARATED , params ) self . check_arguments ( low_registers = ( Ra , Rb , Rc ) ) if Ra != Rc : raise iarm . exceptions . RuleError ( "Third parameter {} is not the same as the first parameter {}" . format ( Rc , Ra ) ) def MULS_func ( ) : self . register [ Ra ] = self . register [ Rb ] * self . register [ Rc ] self . set_NZ_flags ( self . register [ Ra ] ) return MULS_func
|
MULS Ra Rb Ra
|
6,679
|
def initialize ( template , service_name , environment = 'dev' ) : template . SERVICE_NAME = os . getenv ( 'SERVICE_NAME' , service_name ) template . SERVICE_ENVIRONMENT = os . getenv ( 'ENV' , environment ) . lower ( ) template . DEFAULT_TAGS = troposphere . Tags ( ** { 'service-name' : template . SERVICE_NAME , 'environment' : template . SERVICE_ENVIRONMENT } ) template . add_version ( "2010-09-09" ) template . add_description ( "Stack for %s microservice" % service_name )
|
Adds SERVICE_NAME SERVICE_ENVIRONMENT and DEFAULT_TAGS to the template
|
6,680
|
def get_dist ( dist_name , lookup_dirs = None ) : req = pkg_resources . Requirement . parse ( dist_name ) if lookup_dirs is None : working_set = pkg_resources . WorkingSet ( ) else : working_set = pkg_resources . WorkingSet ( lookup_dirs ) return working_set . find ( req )
|
Get dist for installed version of dist_name avoiding pkg_resources cache
|
6,681
|
def _load_hooks ( path ) : module = imp . load_source ( os . path . splitext ( os . path . basename ( path ) ) [ 0 ] , path ) if not check_hook_mechanism_is_intact ( module ) : log . debug ( 'No valid hook configuration: \'%s\'. Not using hooks!' , path ) else : if check_register_present ( module ) : module . register ( ) return module
|
Load hook module and register signals .
|
6,682
|
def main ( doc , tool , dispatch_only = None ) : signal . signal ( signal . SIGTERM , signal_handler ) signal . signal ( signal . SIGINT , signal_handler ) try : arguments = docopt ( doc , sys . argv [ 1 : ] ) command = get_command ( arguments ) verbose = arguments . pop ( '--verbose' , False ) if verbose : logging_config [ 'loggers' ] [ 'gcdt' ] [ 'level' ] = 'DEBUG' dictConfig ( logging_config ) if dispatch_only is None : dispatch_only = [ 'version' ] assert tool in [ 'gcdt' , 'kumo' , 'tenkai' , 'ramuda' , 'yugen' ] if command in dispatch_only : check_gcdt_update ( ) return cmd . dispatch ( arguments ) else : env = get_env ( ) if not env : log . error ( '\'ENV\' environment variable not set!' ) return 1 awsclient = AWSClient ( botocore . session . get_session ( ) ) return lifecycle ( awsclient , env , tool , command , arguments ) except GracefulExit as e : log . info ( 'Received %s signal - exiting command \'%s %s\'' , str ( e ) , tool , command ) return 1
|
gcdt tools parametrized main function to initiate gcdt lifecycle .
|
6,683
|
def MOV ( self , params ) : Rx , Ry = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params ) self . check_arguments ( any_registers = ( Rx , Ry ) ) def MOV_func ( ) : self . register [ Rx ] = self . register [ Ry ] return MOV_func
|
MOV Rx Ry MOV PC Ry
|
6,684
|
def MRS ( self , params ) : Rj , Rspecial = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params ) self . check_arguments ( LR_or_general_purpose_registers = ( Rj , ) , special_registers = ( Rspecial , ) ) def MRS_func ( ) : if Rspecial == 'PSR' : self . register [ Rj ] = self . register [ 'APSR' ] | self . register [ 'IPSR' ] | self . register [ 'EPSR' ] else : self . register [ Rj ] = self . register [ Rspecial ] return MRS_func
|
MRS Rj Rspecial
|
6,685
|
def MSR ( self , params ) : Rspecial , Rj = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params ) self . check_arguments ( LR_or_general_purpose_registers = ( Rj , ) , special_registers = ( Rspecial , ) ) def MSR_func ( ) : if Rspecial in ( 'PSR' , 'APSR' ) : self . register [ 'APSR' ] = self . register [ Rj ] else : pass return MSR_func
|
MSR Rspecial Rj
|
6,686
|
def MVNS ( self , params ) : Ra , Rb = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params ) self . check_arguments ( low_registers = ( Ra , Rb ) ) def MVNS_func ( ) : self . register [ Ra ] = ~ self . register [ Rb ] self . set_NZ_flags ( self . register [ Ra ] ) return MVNS_func
|
MVNS Ra Rb
|
6,687
|
def REV ( self , params ) : Ra , Rb = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params ) self . check_arguments ( low_registers = ( Ra , Rb ) ) def REV_func ( ) : self . register [ Ra ] = ( ( self . register [ Rb ] & 0xFF000000 ) >> 24 ) | ( ( self . register [ Rb ] & 0x00FF0000 ) >> 8 ) | ( ( self . register [ Rb ] & 0x0000FF00 ) << 8 ) | ( ( self . register [ Rb ] & 0x000000FF ) << 24 ) return REV_func
|
REV Ra Rb
|
6,688
|
def REV16 ( self , params ) : Ra , Rb = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params ) self . check_arguments ( low_registers = ( Ra , Rb ) ) def REV16_func ( ) : self . register [ Ra ] = ( ( self . register [ Rb ] & 0xFF00FF00 ) >> 8 ) | ( ( self . register [ Rb ] & 0x00FF00FF ) << 8 ) return REV16_func
|
REV16 Ra Rb
|
6,689
|
def SXTB ( self , params ) : Ra , Rb = self . get_two_parameters ( r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*' , params ) self . check_arguments ( low_registers = ( Ra , Rb ) ) def SXTB_func ( ) : if self . register [ Rb ] & ( 1 << 7 ) : self . register [ Ra ] = 0xFFFFFF00 + ( self . register [ Rb ] & 0xFF ) else : self . register [ Ra ] = ( self . register [ Rb ] & 0xFF ) return SXTB_func
|
STXB Ra Rb
|
6,690
|
def SXTH ( self , params ) : Ra , Rb = self . get_two_parameters ( r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*' , params ) self . check_arguments ( low_registers = ( Ra , Rb ) ) def SXTH_func ( ) : if self . register [ Rb ] & ( 1 << 15 ) : self . register [ Ra ] = 0xFFFF0000 + ( self . register [ Rb ] & 0xFFFF ) else : self . register [ Ra ] = ( self . register [ Rb ] & 0xFFFF ) return SXTH_func
|
STXH Ra Rb
|
6,691
|
def UXTB ( self , params ) : Ra , Rb = self . get_two_parameters ( r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*' , params ) self . check_arguments ( low_registers = ( Ra , Rb ) ) def UXTB_func ( ) : self . register [ Ra ] = ( self . register [ Rb ] & 0xFF ) return UXTB_func
|
UTXB Ra Rb
|
6,692
|
def UXTH ( self , params ) : Ra , Rb = self . get_two_parameters ( r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*' , params ) self . check_arguments ( low_registers = ( Ra , Rb ) ) def UXTH_func ( ) : self . register [ Ra ] = ( self . register [ Rb ] & 0xFFFF ) return UXTH_func
|
UTXH Ra Rb
|
6,693
|
def _get_event_source_obj ( awsclient , evt_source ) : event_source_map = { 'dynamodb' : event_source . dynamodb_stream . DynamoDBStreamEventSource , 'kinesis' : event_source . kinesis . KinesisEventSource , 's3' : event_source . s3 . S3EventSource , 'sns' : event_source . sns . SNSEventSource , 'events' : event_source . cloudwatch . CloudWatchEventSource , 'cloudfront' : event_source . cloudfront . CloudFrontEventSource , 'cloudwatch_logs' : event_source . cloudwatch_logs . CloudWatchLogsEventSource , } evt_type = _get_event_type ( evt_source ) event_source_func = event_source_map . get ( evt_type , None ) if not event_source : raise ValueError ( 'Unknown event source: {0}' . format ( evt_source [ 'arn' ] ) ) return event_source_func ( awsclient , evt_source )
|
Given awsclient event_source dictionary item create an event_source object of the appropriate event type to schedule this event and return the object .
|
6,694
|
def unwire ( awsclient , events , lambda_name , alias_name = ALIAS_NAME ) : if not lambda_exists ( awsclient , lambda_name ) : log . error ( colored . red ( 'The function you try to wire up doesn\'t ' + 'exist... Bailing out...' ) ) return 1 client_lambda = awsclient . get_client ( 'lambda' ) lambda_function = client_lambda . get_function ( FunctionName = lambda_name ) lambda_arn = client_lambda . get_alias ( FunctionName = lambda_name , Name = alias_name ) [ 'AliasArn' ] log . info ( 'UN-wiring lambda_arn %s ' % lambda_arn ) if lambda_function is not None : for event in events : evt_source = event [ 'event_source' ] _remove_event_source ( awsclient , evt_source , lambda_arn ) return 0
|
Unwire a list of event from an AWS Lambda function .
|
6,695
|
def wire_deprecated ( awsclient , function_name , s3_event_sources = None , time_event_sources = None , alias_name = ALIAS_NAME ) : if not lambda_exists ( awsclient , function_name ) : log . error ( colored . red ( 'The function you try to wire up doesn\'t ' + 'exist... Bailing out...' ) ) return 1 client_lambda = awsclient . get_client ( 'lambda' ) lambda_function = client_lambda . get_function ( FunctionName = function_name ) lambda_arn = client_lambda . get_alias ( FunctionName = function_name , Name = alias_name ) [ 'AliasArn' ] log . info ( 'wiring lambda_arn %s ...' % lambda_arn ) if lambda_function is not None : s3_events_ensure_exists , s3_events_ensure_absent = filter_events_ensure ( s3_event_sources ) cloudwatch_events_ensure_exists , cloudwatch_events_ensure_absent = filter_events_ensure ( time_event_sources ) for s3_event_source in s3_events_ensure_absent : _ensure_s3_event ( awsclient , s3_event_source , function_name , alias_name , lambda_arn , s3_event_source [ 'ensure' ] ) for s3_event_source in s3_events_ensure_exists : _ensure_s3_event ( awsclient , s3_event_source , function_name , alias_name , lambda_arn , s3_event_source [ 'ensure' ] ) for time_event in cloudwatch_events_ensure_absent : _ensure_cloudwatch_event ( awsclient , time_event , function_name , alias_name , lambda_arn , time_event [ 'ensure' ] ) for time_event in cloudwatch_events_ensure_exists : _ensure_cloudwatch_event ( awsclient , time_event , function_name , alias_name , lambda_arn , time_event [ 'ensure' ] ) return 0
|
Deprecated! Please use wire!
|
6,696
|
def unwire_deprecated ( awsclient , function_name , s3_event_sources = None , time_event_sources = None , alias_name = ALIAS_NAME ) : if not lambda_exists ( awsclient , function_name ) : log . error ( colored . red ( 'The function you try to wire up doesn\'t ' + 'exist... Bailing out...' ) ) return 1 client_lambda = awsclient . get_client ( 'lambda' ) lambda_function = client_lambda . get_function ( FunctionName = function_name ) lambda_arn = client_lambda . get_alias ( FunctionName = function_name , Name = alias_name ) [ 'AliasArn' ] log . info ( 'UN-wiring lambda_arn %s ' % lambda_arn ) policies = None try : result = client_lambda . get_policy ( FunctionName = function_name , Qualifier = alias_name ) policies = json . loads ( result [ 'Policy' ] ) except ClientError as e : if e . response [ 'Error' ] [ 'Code' ] == 'ResourceNotFoundException' : log . warn ( "Permission policies not found" ) else : raise e if lambda_function is not None : if policies : for statement in policies [ 'Statement' ] : if statement [ 'Principal' ] [ 'Service' ] == 's3.amazonaws.com' : source_bucket = get_bucket_from_s3_arn ( statement [ 'Condition' ] [ 'ArnLike' ] [ 'AWS:SourceArn' ] ) log . info ( '\tRemoving S3 permission {} invoking {}' . format ( source_bucket , lambda_arn ) ) _remove_permission ( awsclient , function_name , statement [ 'Sid' ] , alias_name ) log . info ( '\tRemoving All S3 events {} invoking {}' . format ( source_bucket , lambda_arn ) ) _remove_events_from_s3_bucket ( awsclient , source_bucket , lambda_arn ) for s3_event_source in s3_event_sources : bucket_name = s3_event_source . get ( 'bucket' ) _remove_events_from_s3_bucket ( awsclient , bucket_name , lambda_arn ) if policies : for statement in policies [ 'Statement' ] : if statement [ 'Principal' ] [ 'Service' ] == 'events.amazonaws.com' : rule_name = get_rule_name_from_event_arn ( statement [ 'Condition' ] [ 'ArnLike' ] [ 'AWS:SourceArn' ] ) log . info ( '\tRemoving Cloudwatch permission {} invoking {}' . format ( rule_name , lambda_arn ) ) _remove_permission ( awsclient , function_name , statement [ 'Sid' ] , alias_name ) log . info ( '\tRemoving Cloudwatch rule {} invoking {}' . format ( rule_name , lambda_arn ) ) _remove_cloudwatch_rule_event ( awsclient , rule_name , lambda_arn ) for time_event in time_event_sources : rule_name = time_event . get ( 'ruleName' ) _remove_cloudwatch_rule_event ( awsclient , rule_name , lambda_arn ) return 0
|
Deprecated! Please use unwire!
|
6,697
|
def _lambda_add_s3_event_source ( awsclient , arn , event , bucket , prefix , suffix ) : json_data = { 'LambdaFunctionConfigurations' : [ { 'LambdaFunctionArn' : arn , 'Id' : str ( uuid . uuid1 ( ) ) , 'Events' : [ event ] } ] } filter_rules = build_filter_rules ( prefix , suffix ) json_data [ 'LambdaFunctionConfigurations' ] [ 0 ] . update ( { 'Filter' : { 'Key' : { 'FilterRules' : filter_rules } } } ) client_s3 = awsclient . get_client ( 's3' ) bucket_configurations = client_s3 . get_bucket_notification_configuration ( Bucket = bucket ) bucket_configurations . pop ( 'ResponseMetadata' ) if 'LambdaFunctionConfigurations' in bucket_configurations : bucket_configurations [ 'LambdaFunctionConfigurations' ] . append ( json_data [ 'LambdaFunctionConfigurations' ] [ 0 ] ) else : bucket_configurations [ 'LambdaFunctionConfigurations' ] = json_data [ 'LambdaFunctionConfigurations' ] response = client_s3 . put_bucket_notification_configuration ( Bucket = bucket , NotificationConfiguration = bucket_configurations ) return json2table ( response )
|
Use only prefix OR suffix
|
6,698
|
def find_eigen ( hint = None ) : r try : import pkgconfig if pkgconfig . installed ( 'eigen3' , '>3.0.0' ) : return pkgconfig . parse ( 'eigen3' ) [ 'include_dirs' ] [ 0 ] except : pass search_dirs = [ ] if hint is None else hint search_dirs += [ "/usr/local/include/eigen3" , "/usr/local/homebrew/include/eigen3" , "/opt/local/var/macports/software/eigen3" , "/opt/local/include/eigen3" , "/usr/include/eigen3" , "/usr/include/local" , "/usr/include" , ] for d in search_dirs : path = os . path . join ( d , "Eigen" , "Dense" ) if os . path . exists ( path ) : vf = os . path . join ( d , "Eigen" , "src" , "Core" , "util" , "Macros.h" ) if not os . path . exists ( vf ) : continue src = open ( vf , "r" ) . read ( ) v1 = re . findall ( "#define EIGEN_WORLD_VERSION (.+)" , src ) v2 = re . findall ( "#define EIGEN_MAJOR_VERSION (.+)" , src ) v3 = re . findall ( "#define EIGEN_MINOR_VERSION (.+)" , src ) if not len ( v1 ) or not len ( v2 ) or not len ( v3 ) : continue v = "{0}.{1}.{2}" . format ( v1 [ 0 ] , v2 [ 0 ] , v3 [ 0 ] ) print ( "Found Eigen version {0} in: {1}" . format ( v , d ) ) return d return None
|
r Try to find the Eigen library . If successful the include directory is returned .
|
6,699
|
def check_and_format_logs_params ( start , end , tail ) : def _decode_duration_type ( duration_type ) : durations = { 'm' : 'minutes' , 'h' : 'hours' , 'd' : 'days' , 'w' : 'weeks' } return durations [ duration_type ] if not start : if tail : start_dt = maya . now ( ) . subtract ( seconds = 300 ) . datetime ( naive = True ) else : start_dt = maya . now ( ) . subtract ( days = 1 ) . datetime ( naive = True ) elif start and start [ - 1 ] in [ 'm' , 'h' , 'd' , 'w' ] : value = int ( start [ : - 1 ] ) start_dt = maya . now ( ) . subtract ( ** { _decode_duration_type ( start [ - 1 ] ) : value } ) . datetime ( naive = True ) elif start : start_dt = maya . parse ( start ) . datetime ( naive = True ) if end and end [ - 1 ] in [ 'm' , 'h' , 'd' , 'w' ] : value = int ( end [ : - 1 ] ) end_dt = maya . now ( ) . subtract ( ** { _decode_duration_type ( end [ - 1 ] ) : value } ) . datetime ( naive = True ) elif end : end_dt = maya . parse ( end ) . datetime ( naive = True ) else : end_dt = None return start_dt , end_dt
|
Helper to read the params for the logs command
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.