idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
13,600
def setup ( level = logging . WARNING , outputs = [ output . STDERR ] , program_name = None , capture_warnings = True ) : root_logger = logging . getLogger ( None ) for handler in list ( root_logger . handlers ) : root_logger . removeHandler ( handler ) for out in outputs : if isinstance ( out , str ) : out = output . preconfigured . get ( out ) if out is None : raise RuntimeError ( "Output {} is not available" . format ( out ) ) out . add_to_logger ( root_logger ) root_logger . setLevel ( level ) program_logger = logging . getLogger ( program_name ) def logging_excepthook ( exc_type , value , tb ) : program_logger . critical ( "" . join ( traceback . format_exception ( exc_type , value , tb ) ) ) sys . excepthook = logging_excepthook if capture_warnings : logging . captureWarnings ( True )
Setup Python logging .
13,601
def set_default_log_levels ( loggers_and_log_levels ) : for logger , level in loggers_and_log_levels : if isinstance ( level , str ) : level = level . upper ( ) logging . getLogger ( logger ) . setLevel ( level )
Set default log levels for some loggers .
13,602
def create_swag_from_ctx ( ctx ) : swag_opts = { } if ctx . type == 'file' : swag_opts = { 'swag.type' : 'file' , 'swag.data_dir' : ctx . data_dir , 'swag.data_file' : ctx . data_file } elif ctx . type == 's3' : swag_opts = { 'swag.type' : 's3' , 'swag.bucket_name' : ctx . bucket_name , 'swag.data_file' : ctx . data_file , 'swag.region' : ctx . region } elif ctx . type == 'dynamodb' : swag_opts = { 'swag.type' : 'dynamodb' , 'swag.region' : ctx . region } return SWAGManager ( ** parse_swag_config_options ( swag_opts ) )
Creates SWAG client from the current context .
13,603
def file ( ctx , data_dir , data_file ) : if not ctx . file : ctx . data_file = data_file if not ctx . data_dir : ctx . data_dir = data_dir ctx . type = 'file'
Use the File SWAG Backend
13,604
def s3 ( ctx , bucket_name , data_file , region ) : if not ctx . data_file : ctx . data_file = data_file if not ctx . bucket_name : ctx . bucket_name = bucket_name if not ctx . region : ctx . region = region ctx . type = 's3'
Use the S3 SWAG backend .
13,605
def list ( ctx ) : if ctx . namespace != 'accounts' : click . echo ( click . style ( 'Only account data is available for listing.' , fg = 'red' ) ) return swag = create_swag_from_ctx ( ctx ) accounts = swag . get_all ( ) _table = [ [ result [ 'name' ] , result . get ( 'id' ) ] for result in accounts ] click . echo ( tabulate ( _table , headers = [ "Account Name" , "Account Number" ] ) )
List SWAG account info .
13,606
def list_service ( ctx , name ) : swag = create_swag_from_ctx ( ctx ) accounts = swag . get_service_enabled ( name ) _table = [ [ result [ 'name' ] , result . get ( 'id' ) ] for result in accounts ] click . echo ( tabulate ( _table , headers = [ "Account Name" , "Account Number" ] ) )
Retrieve accounts pertaining to named service .
13,607
def migrate ( ctx , start_version , end_version ) : if ctx . type == 'file' : if ctx . data_file : file_path = ctx . data_file else : file_path = os . path . join ( ctx . data_file , ctx . namespace + '.json' ) with open ( file_path , 'r' ) as f : data = json . loads ( f . read ( ) ) data = run_migration ( data , start_version , end_version ) with open ( file_path , 'w' ) as f : f . write ( json . dumps ( data ) )
Transition from one SWAG schema to another .
13,608
def propagate ( ctx ) : data = [ ] if ctx . type == 'file' : if ctx . data_file : file_path = ctx . data_file else : file_path = os . path . join ( ctx . data_dir , ctx . namespace + '.json' ) with open ( file_path , 'r' ) as f : data = json . loads ( f . read ( ) ) swag_opts = { 'swag.type' : 'dynamodb' } swag = SWAGManager ( ** parse_swag_config_options ( swag_opts ) ) for item in data : time . sleep ( 2 ) swag . create ( item , dry_run = ctx . dry_run )
Transfers SWAG data from one backend to another
13,609
def create ( ctx , data ) : swag = create_swag_from_ctx ( ctx ) data = json . loads ( data . read ( ) ) for account in data : swag . create ( account , dry_run = ctx . dry_run )
Create a new SWAG item .
13,610
def deploy_service ( ctx , path , name , regions , disabled ) : enabled = False if disabled else True swag = create_swag_from_ctx ( ctx ) accounts = swag . get_all ( search_filter = path ) log . debug ( 'Searching for accounts. Found: {} JMESPath: `{}`' . format ( len ( accounts ) , path ) ) for a in accounts : try : if not swag . get_service ( name , search_filter = "[?id=='{id}']" . format ( id = a [ 'id' ] ) ) : log . info ( 'Found an account to update. AccountName: {name} AccountNumber: {number}' . format ( name = a [ 'name' ] , number = a [ 'id' ] ) ) status = [ ] for region in regions : status . append ( { 'enabled' : enabled , 'region' : region } ) a [ 'services' ] . append ( { 'name' : name , 'status' : status } ) swag . update ( a , dry_run = ctx . dry_run ) except InvalidSWAGDataException as e : log . warning ( 'Found a data quality issue. AccountName: {name} AccountNumber: {number}' . format ( name = a [ 'name' ] , number = a [ 'id' ] ) ) log . info ( 'Service has been deployed to all matching accounts.' )
Deploys a new service JSON to multiple accounts . NAME is the service name you wish to deploy .
13,611
def seed_aws_data ( ctx , data ) : swag = create_swag_from_ctx ( ctx ) for k , v in json . loads ( data . read ( ) ) . items ( ) : for account in v [ 'accounts' ] : data = { 'description' : 'This is an AWS owned account used for {}' . format ( k ) , 'id' : account [ 'account_id' ] , 'contacts' : [ ] , 'owner' : 'aws' , 'provider' : 'aws' , 'sensitive' : False , 'email' : 'support@amazon.com' , 'name' : k + '-' + account [ 'region' ] } click . echo ( click . style ( 'Seeded Account. AccountName: {}' . format ( data [ 'name' ] ) , fg = 'green' ) ) swag . create ( data , dry_run = ctx . dry_run )
Seeds SWAG from a list of known AWS accounts .
13,612
def seed_aws_organization ( ctx , owner ) : swag = create_swag_from_ctx ( ctx ) accounts = swag . get_all ( ) _ids = [ result . get ( 'id' ) for result in accounts ] client = boto3 . client ( 'organizations' ) paginator = client . get_paginator ( 'list_accounts' ) response_iterator = paginator . paginate ( ) count = 0 for response in response_iterator : for account in response [ 'Accounts' ] : if account [ 'Id' ] in _ids : click . echo ( click . style ( 'Ignoring Duplicate Account. AccountId: {} already exists in SWAG' . format ( account [ 'Id' ] ) , fg = 'yellow' ) ) continue if account [ 'Status' ] == 'SUSPENDED' : status = 'deprecated' else : status = 'created' data = { 'id' : account [ 'Id' ] , 'name' : account [ 'Name' ] , 'description' : 'Account imported from AWS organization.' , 'email' : account [ 'Email' ] , 'owner' : owner , 'provider' : 'aws' , 'contacts' : [ ] , 'sensitive' : False , 'status' : [ { 'region' : 'all' , 'status' : status } ] } click . echo ( click . style ( 'Seeded Account. AccountName: {}' . format ( data [ 'name' ] ) , fg = 'green' ) ) count += 1 swag . create ( data , dry_run = ctx . dry_run ) click . echo ( 'Seeded {} accounts to SWAG.' . format ( count ) )
Seeds SWAG from an AWS organziation .
13,613
def load_file ( client , bucket , data_file ) : logger . debug ( 'Loading item from s3. Bucket: {bucket} Key: {key}' . format ( bucket = bucket , key = data_file ) ) try : data = _get_from_s3 ( client , bucket , data_file ) except ClientError as ce : if ce . response [ 'Error' ] [ 'Code' ] == 'NoSuchKey' : return { } else : raise ce if sys . version_info > ( 3 , ) : data = data . decode ( 'utf-8' ) return json . loads ( data )
Tries to load JSON data from S3 .
13,614
def save_file ( client , bucket , data_file , items , dry_run = None ) : logger . debug ( 'Writing {number_items} items to s3. Bucket: {bucket} Key: {key}' . format ( number_items = len ( items ) , bucket = bucket , key = data_file ) ) if not dry_run : return _put_to_s3 ( client , bucket , data_file , json . dumps ( items ) )
Tries to write JSON data to data file in S3 .
13,615
def create ( self , item , dry_run = None ) : logger . debug ( 'Creating new item. Item: {item} Path: {data_file}' . format ( item = item , data_file = self . data_file ) ) items = load_file ( self . client , self . bucket_name , self . data_file ) items = append_item ( self . namespace , self . version , item , items ) save_file ( self . client , self . bucket_name , self . data_file , items , dry_run = dry_run ) return item
Creates a new item in file .
13,616
def health_check ( self ) : logger . debug ( 'Health Check on S3 file for: {namespace}' . format ( namespace = self . namespace ) ) try : self . client . head_object ( Bucket = self . bucket_name , Key = self . data_file ) return True except ClientError as e : logger . debug ( 'Error encountered with S3. Assume unhealthy' )
Uses head object to make sure the file exists in S3 .
13,617
def health_check ( self ) : logger . debug ( 'Health Check on Table: {namespace}' . format ( namespace = self . namespace ) ) try : self . get_all ( ) return True except ClientError as e : logger . exception ( e ) logger . error ( 'Error encountered with Database. Assume unhealthy' ) return False
Gets a single item to determine if Dynamo is functioning .
13,618
def parse_swag_config_options ( config ) : options = { } for key , val in config . items ( ) : if key . startswith ( 'swag.backend.' ) : options [ key [ 12 : ] ] = val if key . startswith ( 'swag.' ) : options [ key [ 5 : ] ] = val if options . get ( 'type' ) == 's3' : return S3OptionsSchema ( strict = True ) . load ( options ) . data elif options . get ( 'type' ) == 'dynamodb' : return DynamoDBOptionsSchema ( strict = True ) . load ( options ) . data else : return FileOptionsSchema ( strict = True ) . load ( options ) . data
Ensures that options passed to the backend are valid .
13,619
def deprecated ( message ) : def wrapper ( fn ) : def deprecated_method ( * args , ** kargs ) : warnings . warn ( message , DeprecationWarning , 2 ) return fn ( * args , ** kargs ) deprecated_method . __name__ = fn . __name__ deprecated_method . __doc__ = "%s\n\n%s" % ( message , fn . __doc__ ) return deprecated_method return wrapper
Deprecated function decorator .
13,620
def is_sub_dict ( sub_dict , dictionary ) : for key in sub_dict . keys ( ) : if key not in dictionary : return False if ( type ( sub_dict [ key ] ) is not dict ) and ( sub_dict [ key ] != dictionary [ key ] ) : return False if ( type ( sub_dict [ key ] ) is dict ) and ( not is_sub_dict ( sub_dict [ key ] , dictionary [ key ] ) ) : return False return True
Legacy filter for determining if a given dict is present .
13,621
def get_by_name ( account_name , bucket , region = 'us-west-2' , json_path = 'accounts.json' , alias = None ) : for account in get_all_accounts ( bucket , region , json_path ) [ 'accounts' ] : if 'aws' in account [ 'type' ] : if account [ 'name' ] == account_name : return account elif alias : for a in account [ 'alias' ] : if a == account_name : return account
Given an account name attempts to retrieve associated account info .
13,622
def get_all_accounts ( bucket , region = 'us-west-2' , json_path = 'accounts.json' , ** filters ) : swag_opts = { 'swag.type' : 's3' , 'swag.bucket_name' : bucket , 'swag.bucket_region' : region , 'swag.data_file' : json_path , 'swag.schema_version' : 1 } swag = SWAGManager ( ** parse_swag_config_options ( swag_opts ) ) accounts = swag . get_all ( ) accounts = [ account for account in accounts [ 'accounts' ] if is_sub_dict ( filters , account ) ] return { 'accounts' : accounts }
Fetches all the accounts from SWAG .
13,623
def load_file ( data_file ) : try : with open ( data_file , 'r' , encoding = 'utf-8' ) as f : return json . loads ( f . read ( ) ) except JSONDecodeError as e : return [ ]
Tries to load JSON from data file .
13,624
def save_file ( data_file , data , dry_run = None ) : if dry_run : return with open ( data_file , 'w' , encoding = 'utf-8' ) as f : if sys . version_info > ( 3 , 0 ) : f . write ( json . dumps ( data ) ) else : f . write ( json . dumps ( data ) . decode ( 'utf-8' ) )
Writes JSON data to data file .
13,625
def health_check ( self ) : logger . debug ( 'Health Check on file for: {namespace}' . format ( namespace = self . namespace ) ) return os . path . isfile ( self . data_file )
Checks to make sure the file is there .
13,626
def configure ( self , * args , ** kwargs ) : self . version = kwargs [ 'schema_version' ] self . namespace = kwargs [ 'namespace' ] self . backend = get ( kwargs [ 'type' ] ) ( * args , ** kwargs ) self . context = kwargs . pop ( 'schema_context' , { } )
Configures a SWAG manager . Overrides existing configuration .
13,627
def create ( self , item , dry_run = None ) : return self . backend . create ( validate ( item , version = self . version , context = self . context ) , dry_run = dry_run )
Create a new item in backend .
13,628
def delete ( self , item , dry_run = None ) : return self . backend . delete ( item , dry_run = dry_run )
Delete an item in backend .
13,629
def update ( self , item , dry_run = None ) : return self . backend . update ( validate ( item , version = self . version , context = self . context ) , dry_run = dry_run )
Update an item in backend .
13,630
def get_all ( self , search_filter = None ) : items = self . backend . get_all ( ) if not items : if self . version == 1 : return { self . namespace : [ ] } return [ ] if search_filter : items = jmespath . search ( search_filter , items ) return items
Fetch all data from backend .
13,631
def get_service_enabled ( self , name , accounts_list = None , search_filter = None , region = None ) : if not accounts_list : accounts = self . get_all ( search_filter = search_filter ) else : accounts = accounts_list if self . version == 1 : accounts = accounts [ 'accounts' ] enabled = [ ] for account in accounts : if self . version == 1 : account_filter = "accounts[?id=='{id}']" . format ( id = account [ 'id' ] ) else : account_filter = "[?id=='{id}']" . format ( id = account [ 'id' ] ) service = self . get_service ( name , search_filter = account_filter ) if self . version == 1 : if service : service = service [ 'enabled' ] else : if not region : service_filter = "status[?enabled]" else : service_filter = "status[?(region=='{region}' || region=='all') && enabled]" . format ( region = region ) service = jmespath . search ( service_filter , service ) if service : enabled . append ( account ) return enabled
Get a list of accounts where a service has been enabled .
13,632
def get_service ( self , name , search_filter ) : if self . version == 1 : service_filter = "service.{name}" . format ( name = name ) return jmespath . search ( service_filter , self . get ( search_filter ) ) else : service_filter = "services[?name=='{}']" . format ( name ) return one ( jmespath . search ( service_filter , self . get ( search_filter ) ) )
Fetch service metadata .
13,633
def get_service_name ( self , name , search_filter ) : service_filter = "services[?name=='{}'].metadata.name" . format ( name ) return one ( jmespath . search ( service_filter , self . get ( search_filter ) ) )
Fetch account name as referenced by a particular service .
13,634
def get_by_name ( self , name , alias = None ) : search_filter = "[?name=='{}']" . format ( name ) if alias : if self . version == 1 : search_filter = "accounts[?name=='{name}' || contains(alias, '{name}')]" . format ( name = name ) elif self . version == 2 : search_filter = "[?name=='{name}' || contains(aliases, '{name}')]" . format ( name = name ) return self . get_all ( search_filter )
Fetch all accounts with name specified optionally include aliases .
13,635
def run_migration ( data , version_start , version_end ) : items = [ ] if version_start == 1 and version_end == 2 : for item in data [ 'accounts' ] : items . append ( v2 . upgrade ( item ) ) if version_start == 2 and version_end == 1 : for item in data : items . append ( v2 . downgrade ( item ) ) items = { 'accounts' : items } return items
Runs migration against a data set .
13,636
def validate_type ( self , data ) : fields_to_validate = [ 'type' , 'environment' , 'owner' ] for field in fields_to_validate : value = data . get ( field ) allowed_values = self . context . get ( field ) if allowed_values and value not in allowed_values : raise ValidationError ( 'Must be one of {}' . format ( allowed_values ) , field_names = field )
Performs field validation against the schema context if values have been provided to SWAGManager via the swag . schema_context config object .
13,637
def validate_account_status ( self , data ) : deleted_status = 'deleted' region_status = data . get ( 'status' ) account_status = data . get ( 'account_status' ) for region in region_status : if region [ 'status' ] != deleted_status and account_status == deleted_status : raise ValidationError ( 'Account Status cannot be "deleted" if a region is not "deleted"' )
Performs field validation for account_status . If any region is not deleted account_status cannot be deleted
13,638
def validate_regions_schema ( self , data ) : region_schema = RegionSchema ( ) supplied_regions = data . get ( 'regions' , { } ) for region in supplied_regions . keys ( ) : result = region_schema . validate ( supplied_regions [ region ] ) if len ( result . keys ( ) ) > 0 : raise ValidationError ( result )
Performs field validation for regions . This should be a dict with region names as the key and RegionSchema as the value
13,639
def coord_to_dimension ( coord ) : kwargs = { } if coord . units . is_time_reference ( ) : kwargs [ 'value_format' ] = get_date_format ( coord ) else : kwargs [ 'unit' ] = str ( coord . units ) return Dimension ( coord . name ( ) , ** kwargs )
Converts an iris coordinate to a HoloViews dimension .
13,640
def sort_coords ( coord ) : import iris order = { 'T' : - 2 , 'Z' : - 1 , 'X' : 1 , 'Y' : 2 } axis = iris . util . guess_coord_axis ( coord ) return ( order . get ( axis , 0 ) , coord and coord . name ( ) )
Sorts a list of DimCoords trying to ensure that dates and pressure levels appear first and the longitude and latitude appear last in the correct order .
13,641
def values ( cls , dataset , dim , expanded = True , flat = True , compute = True ) : dim = dataset . get_dimension ( dim , strict = True ) if dim in dataset . vdims : coord_names = [ c . name ( ) for c in dataset . data . dim_coords ] data = dataset . data . copy ( ) . data data = cls . canonicalize ( dataset , data , coord_names ) return data . T . flatten ( ) if flat else data elif expanded : data = cls . coords ( dataset , dim . name , expanded = True ) return data . T . flatten ( ) if flat else data else : return cls . coords ( dataset , dim . name , ordered = True )
Returns an array of the values along the supplied dimension .
13,642
def groupby ( cls , dataset , dims , container_type = HoloMap , group_type = None , ** kwargs ) : import iris if not isinstance ( dims , list ) : dims = [ dims ] dims = [ dataset . get_dimension ( d , strict = True ) for d in dims ] constraints = [ d . name for d in dims ] slice_dims = [ d for d in dataset . kdims if d not in dims ] group_kwargs = { } group_type = dict if group_type == 'raw' else group_type if issubclass ( group_type , Element ) : group_kwargs . update ( util . get_param_values ( dataset ) ) group_kwargs [ 'kdims' ] = slice_dims group_kwargs . update ( kwargs ) drop_dim = any ( d not in group_kwargs [ 'kdims' ] for d in slice_dims ) unique_coords = product ( * [ cls . values ( dataset , d , expanded = False ) for d in dims ] ) data = [ ] for key in unique_coords : constraint = iris . Constraint ( ** dict ( zip ( constraints , key ) ) ) extracted = dataset . data . extract ( constraint ) if drop_dim : extracted = group_type ( extracted , kdims = slice_dims , vdims = dataset . vdims ) . columns ( ) cube = group_type ( extracted , ** group_kwargs ) data . append ( ( key , cube ) ) if issubclass ( container_type , NdMapping ) : with item_check ( False ) , sorted_context ( False ) : return container_type ( data , kdims = dims ) else : return container_type ( data )
Groups the data by one or more dimensions returning a container indexed by the grouped dimensions containing slices of the cube wrapped in the group_type . This makes it very easy to break up a high - dimensional dataset into smaller viewable chunks .
13,643
def concat_dim ( cls , datasets , dim , vdims ) : import iris from iris . experimental . equalise_cubes import equalise_attributes cubes = [ ] for c , cube in datasets . items ( ) : cube = cube . copy ( ) cube . add_aux_coord ( iris . coords . DimCoord ( [ c ] , var_name = dim . name ) ) cubes . append ( cube ) cubes = iris . cube . CubeList ( cubes ) equalise_attributes ( cubes ) return cubes . merge_cube ( )
Concatenates datasets along one dimension
13,644
def range ( cls , dataset , dimension ) : dim = dataset . get_dimension ( dimension , strict = True ) values = dataset . dimension_values ( dim . name , False ) return ( np . nanmin ( values ) , np . nanmax ( values ) )
Computes the range along a particular dimension .
13,645
def redim ( cls , dataset , dimensions ) : new_dataset = dataset . data . copy ( ) for name , new_dim in dimensions . items ( ) : if name == new_dataset . name ( ) : new_dataset . rename ( new_dim . name ) for coord in new_dataset . dim_coords : if name == coord . name ( ) : coord . rename ( new_dim . name ) return new_dataset
Rename coords on the Cube .
13,646
def length ( cls , dataset ) : return np . product ( [ len ( d . points ) for d in dataset . data . coords ( dim_coords = True ) ] , dtype = np . intp )
Returns the total number of samples in the dataset .
13,647
def add_dimension ( cls , columns , dimension , dim_pos , values , vdim ) : if not vdim : raise Exception ( "Cannot add key dimension to a dense representation." ) raise NotImplementedError
Adding value dimensions not currently supported by iris interface . Adding key dimensions not possible on dense interfaces .
13,648
def select_to_constraint ( cls , dataset , selection ) : import iris def get_slicer ( start , end ) : def slicer ( cell ) : return start <= cell . point < end return slicer constraint_kwargs = { } for dim , constraint in selection . items ( ) : if isinstance ( constraint , slice ) : constraint = ( constraint . start , constraint . stop ) if isinstance ( constraint , tuple ) : if constraint == ( None , None ) : continue constraint = get_slicer ( * constraint ) dim = dataset . get_dimension ( dim , strict = True ) constraint_kwargs [ dim . name ] = constraint return iris . Constraint ( ** constraint_kwargs )
Transform a selection dictionary to an iris Constraint .
13,649
def select ( cls , dataset , selection_mask = None , ** selection ) : import iris constraint = cls . select_to_constraint ( dataset , selection ) pre_dim_coords = [ c . name ( ) for c in dataset . data . dim_coords ] indexed = cls . indexed ( dataset , selection ) extracted = dataset . data . extract ( constraint ) if indexed and not extracted . dim_coords : return extracted . data . item ( ) post_dim_coords = [ c . name ( ) for c in extracted . dim_coords ] dropped = [ c for c in pre_dim_coords if c not in post_dim_coords ] for d in dropped : extracted = iris . util . new_axis ( extracted , d ) return extracted
Apply a selection to the data .
13,650
def convert_to_geotype ( element , crs = None ) : geotype = getattr ( gv_element , type ( element ) . __name__ , None ) if crs is None or geotype is None or isinstance ( element , _Element ) : return element return geotype ( element , crs = crs )
Converts a HoloViews element type to the equivalent GeoViews element if given a coordinate reference system .
13,651
def add_crs ( op , element , ** kwargs ) : return element . map ( lambda x : convert_to_geotype ( x , kwargs . get ( 'crs' ) ) , Element )
Converts any elements in the input to their equivalent geotypes if given a coordinate reference system .
13,652
def is_geographic ( element , kdims = None ) : if isinstance ( element , ( Overlay , NdOverlay ) ) : return any ( element . traverse ( is_geographic , [ _Element ] ) ) if kdims : kdims = [ element . get_dimension ( d ) for d in kdims ] else : kdims = element . kdims if len ( kdims ) != 2 and not isinstance ( element , ( Graph , Nodes ) ) : return False if isinstance ( element . data , geographic_types ) or isinstance ( element , ( WMTS , Feature ) ) : return True elif isinstance ( element , _Element ) : return kdims == element . kdims and element . crs else : return False
Utility to determine whether the supplied element optionally a subset of its key dimensions represent a geographic coordinate system .
13,653
def geoms ( self , scale = None , bounds = None , as_element = True ) : feature = self . data if scale is not None : feature = feature . with_scale ( scale ) if bounds : extent = ( bounds [ 0 ] , bounds [ 2 ] , bounds [ 1 ] , bounds [ 3 ] ) else : extent = None geoms = [ g for g in feature . intersecting_geometries ( extent ) if g is not None ] if not as_element : return geoms elif not geoms or 'Polygon' in geoms [ 0 ] . geom_type : return Polygons ( geoms , crs = feature . crs ) elif 'Point' in geoms [ 0 ] . geom_type : return Points ( geoms , crs = feature . crs ) else : return Path ( geoms , crs = feature . crs )
Returns the geometries held by the Feature .
13,654
def from_shapefile ( cls , shapefile , * args , ** kwargs ) : reader = Reader ( shapefile ) return cls . from_records ( reader . records ( ) , * args , ** kwargs )
Loads a shapefile from disk and optionally merges it with a dataset . See from_records for full signature .
13,655
def from_records ( cls , records , dataset = None , on = None , value = None , index = [ ] , drop_missing = False , element = None , ** kwargs ) : if dataset is not None and not on : raise ValueError ( 'To merge dataset with shapes mapping ' 'must define attribute(s) to merge on.' ) if util . pd and isinstance ( dataset , util . pd . DataFrame ) : dataset = Dataset ( dataset ) if not isinstance ( on , ( dict , list ) ) : on = [ on ] if on and not isinstance ( on , dict ) : on = { o : o for o in on } if not isinstance ( index , list ) : index = [ index ] kdims = [ ] for ind in index : if dataset and dataset . get_dimension ( ind ) : dim = dataset . get_dimension ( ind ) else : dim = Dimension ( ind ) kdims . append ( dim ) ddims = [ ] if dataset : if value : vdims = [ dataset . get_dimension ( value ) ] else : vdims = dataset . vdims ddims = dataset . dimensions ( ) if None in vdims : raise ValueError ( 'Value dimension %s not found ' 'in dataset dimensions %s' % ( value , ddims ) ) else : vdims = [ ] data = [ ] for i , rec in enumerate ( records ) : geom = { } if dataset : selection = { dim : rec . attributes . get ( attr , None ) for attr , dim in on . items ( ) } row = dataset . select ( ** selection ) if len ( row ) : values = { k : v [ 0 ] for k , v in row . iloc [ 0 ] . columns ( ) . items ( ) } elif drop_missing : continue else : values = { vd . name : np . nan for vd in vdims } geom . update ( values ) if index : for kdim in kdims : if kdim in ddims and len ( row ) : k = row [ kdim . name ] [ 0 ] elif kdim . name in rec . attributes : k = rec . attributes [ kdim . name ] else : k = None geom [ kdim . name ] = k geom [ 'geometry' ] = rec . geometry data . append ( geom ) if element is not None : pass elif data and data [ 0 ] : if isinstance ( data [ 0 ] [ 'geometry' ] , poly_types ) : element = Polygons else : element = Path else : element = Polygons return element ( data , vdims = kdims + vdims , ** kwargs ) . opts ( color = value )
Load data from a collection of cartopy . io . shapereader . Record objects and optionally merge it with a dataset to assign values to each polygon and form a chloropleth . Supplying just records will return an NdOverlayof Shape Elements with a numeric index . If a dataset is supplied a mapping between the attribute names in the records and the dimension names in the dataset must be supplied . The values assigned to each shape file can then be drawn from the dataset by supplying a value and keys the Shapes are indexed by specifying one or index dimensions .
13,656
def get_cb_plot ( cb , plot = None ) : plot = plot or cb . plot if isinstance ( plot , GeoOverlayPlot ) : plots = [ get_cb_plot ( cb , p ) for p in plot . subplots . values ( ) ] plots = [ p for p in plots if any ( s in cb . streams and getattr ( s , '_triggering' , False ) for s in p . streams ) ] if plots : plot = plots [ 0 ] return plot
Finds the subplot with the corresponding stream .
13,657
def skip ( cb , msg , attributes ) : if not all ( a in msg for a in attributes ) : return True plot = get_cb_plot ( cb ) return ( not getattr ( plot , 'geographic' , False ) or not hasattr ( plot . current_frame , 'crs' ) )
Skips applying transforms if data is not geographic .
13,658
def project_ranges ( cb , msg , attributes ) : if skip ( cb , msg , attributes ) : return msg plot = get_cb_plot ( cb ) x0 , x1 = msg . get ( 'x_range' , ( 0 , 1000 ) ) y0 , y1 = msg . get ( 'y_range' , ( 0 , 1000 ) ) extents = x0 , y0 , x1 , y1 x0 , y0 , x1 , y1 = project_extents ( extents , plot . projection , plot . current_frame . crs ) coords = { 'x_range' : ( x0 , x1 ) , 'y_range' : ( y0 , y1 ) } return { k : v for k , v in coords . items ( ) if k in attributes }
Projects ranges supplied by a callback .
13,659
def project_point ( cb , msg , attributes = ( 'x' , 'y' ) ) : if skip ( cb , msg , attributes ) : return msg plot = get_cb_plot ( cb ) x , y = msg . get ( 'x' , 0 ) , msg . get ( 'y' , 0 ) crs = plot . current_frame . crs coordinates = crs . transform_points ( plot . projection , np . array ( [ x ] ) , np . array ( [ y ] ) ) msg [ 'x' ] , msg [ 'y' ] = coordinates [ 0 , : 2 ] return { k : v for k , v in msg . items ( ) if k in attributes }
Projects a single point supplied by a callback
13,660
def project_drawn ( cb , msg ) : stream = cb . streams [ 0 ] old_data = stream . data stream . update ( data = msg [ 'data' ] ) element = stream . element stream . update ( data = old_data ) proj = cb . plot . projection if not isinstance ( element , _Element ) or element . crs == proj : return None crs = element . crs element . crs = proj return project ( element , projection = crs )
Projects a drawn element to the declared coordinate system
13,661
def clean_weight_files ( cls ) : deleted = [ ] for f in cls . _files : try : os . remove ( f ) deleted . append ( f ) except FileNotFoundError : pass print ( 'Deleted %d weight files' % len ( deleted ) ) cls . _files = [ ]
Cleans existing weight files .
13,662
def _get_projection ( el ) : result = None if hasattr ( el , 'crs' ) : result = ( int ( el . _auxiliary_component ) , el . crs ) return result
Get coordinate reference system from non - auxiliary elements . Return value is a tuple of a precedence integer and the projection to allow non - auxiliary components to take precedence .
13,663
def get_extents ( self , element , ranges , range_type = 'combined' ) : proj = self . projection if self . global_extent and range_type in ( 'combined' , 'data' ) : ( x0 , x1 ) , ( y0 , y1 ) = proj . x_limits , proj . y_limits return ( x0 , y0 , x1 , y1 ) extents = super ( ProjectionPlot , self ) . get_extents ( element , ranges , range_type ) if not getattr ( element , 'crs' , None ) or not self . geographic : return extents elif any ( e is None or not np . isfinite ( e ) for e in extents ) : extents = None else : extents = project_extents ( extents , element . crs , proj ) return ( np . NaN , ) * 4 if not extents else extents
Subclasses the get_extents method using the GeoAxes set_extent method to project the extents to the Elements coordinate reference system .
13,664
def wrap_lons ( lons , base , period ) : lons = lons . astype ( np . float64 ) return ( ( lons - base + period * 2 ) % period ) + base
Wrap longitude values into the range between base and base + period .
13,665
def geom_dict_to_array_dict ( geom_dict , coord_names = [ 'Longitude' , 'Latitude' ] ) : x , y = coord_names geom = geom_dict [ 'geometry' ] new_dict = { k : v for k , v in geom_dict . items ( ) if k != 'geometry' } array = geom_to_array ( geom ) new_dict [ x ] = array [ : , 0 ] new_dict [ y ] = array [ : , 1 ] if geom . geom_type == 'Polygon' : holes = [ ] for interior in geom . interiors : holes . append ( geom_to_array ( interior ) ) if holes : new_dict [ 'holes' ] = [ holes ] elif geom . geom_type == 'MultiPolygon' : outer_holes = [ ] for g in geom : holes = [ ] for interior in g . interiors : holes . append ( geom_to_array ( interior ) ) outer_holes . append ( holes ) if any ( hs for hs in outer_holes ) : new_dict [ 'holes' ] = outer_holes return new_dict
Converts a dictionary containing an geometry key to a dictionary of x - and y - coordinate arrays and if present a list - of - lists of hole array .
13,666
def polygons_to_geom_dicts ( polygons , skip_invalid = True ) : interface = polygons . interface . datatype if interface == 'geodataframe' : return [ row . to_dict ( ) for _ , row in polygons . data . iterrows ( ) ] elif interface == 'geom_dictionary' : return polygons . data polys = [ ] xdim , ydim = polygons . kdims has_holes = polygons . has_holes holes = polygons . holes ( ) if has_holes else None for i , polygon in enumerate ( polygons . split ( datatype = 'columns' ) ) : array = np . column_stack ( [ polygon . pop ( xdim . name ) , polygon . pop ( ydim . name ) ] ) splits = np . where ( np . isnan ( array [ : , : 2 ] . astype ( 'float' ) ) . sum ( axis = 1 ) ) [ 0 ] arrays = np . split ( array , splits + 1 ) if len ( splits ) else [ array ] invalid = False subpolys = [ ] subholes = None if has_holes : subholes = [ [ LinearRing ( h ) for h in hs ] for hs in holes [ i ] ] for j , arr in enumerate ( arrays ) : if j != ( len ( arrays ) - 1 ) : arr = arr [ : - 1 ] if len ( arr ) == 0 : continue elif len ( arr ) == 1 : if skip_invalid : continue poly = Point ( arr [ 0 ] ) invalid = True elif len ( arr ) == 2 : if skip_invalid : continue poly = LineString ( arr ) invalid = True elif not len ( splits ) : poly = Polygon ( arr , ( subholes [ j ] if has_holes else [ ] ) ) else : poly = Polygon ( arr ) hs = [ h for h in subholes [ j ] ] if has_holes else [ ] poly = Polygon ( poly . exterior , holes = hs ) subpolys . append ( poly ) if invalid : polys += [ dict ( polygon , geometry = sp ) for sp in subpolys ] continue elif len ( subpolys ) == 1 : geom = subpolys [ 0 ] elif subpolys : geom = MultiPolygon ( subpolys ) else : continue polygon [ 'geometry' ] = geom polys . append ( polygon ) return polys
Converts a Polygons element into a list of geometry dictionaries preserving all value dimensions .
13,667
def path_to_geom_dicts ( path , skip_invalid = True ) : interface = path . interface . datatype if interface == 'geodataframe' : return [ row . to_dict ( ) for _ , row in path . data . iterrows ( ) ] elif interface == 'geom_dictionary' : return path . data geoms = [ ] invalid = False xdim , ydim = path . kdims for i , path in enumerate ( path . split ( datatype = 'columns' ) ) : array = np . column_stack ( [ path . pop ( xdim . name ) , path . pop ( ydim . name ) ] ) splits = np . where ( np . isnan ( array [ : , : 2 ] . astype ( 'float' ) ) . sum ( axis = 1 ) ) [ 0 ] arrays = np . split ( array , splits + 1 ) if len ( splits ) else [ array ] subpaths = [ ] for j , arr in enumerate ( arrays ) : if j != ( len ( arrays ) - 1 ) : arr = arr [ : - 1 ] if len ( arr ) == 0 : continue elif len ( arr ) == 1 : if skip_invalid : continue g = Point ( arr [ 0 ] ) invalid = True else : g = LineString ( arr ) subpaths . append ( g ) if invalid : geoms += [ dict ( path , geometry = sp ) for sp in subpaths ] continue elif len ( subpaths ) == 1 : geom = subpaths [ 0 ] elif subpaths : geom = MultiLineString ( subpaths ) path [ 'geometry' ] = geom geoms . append ( path ) return geoms
Converts a Path element into a list of geometry dictionaries preserving all value dimensions .
13,668
def to_ccw ( geom ) : if isinstance ( geom , sgeom . Polygon ) and not geom . exterior . is_ccw : geom = sgeom . polygon . orient ( geom ) return geom
Reorients polygon to be wound counter - clockwise .
13,669
def geom_length ( geom ) : if geom . geom_type == 'Point' : return 1 if hasattr ( geom , 'exterior' ) : geom = geom . exterior if not geom . geom_type . startswith ( 'Multi' ) and hasattr ( geom , 'array_interface_base' ) : return len ( geom . array_interface_base [ 'data' ] ) // 2 else : length = 0 for g in geom : length += geom_length ( g ) return length
Calculates the length of coordinates in a shapely geometry .
13,670
def geo_mesh ( element ) : if len ( element . vdims ) > 1 : xs , ys = ( element . dimension_values ( i , False , False ) for i in range ( 2 ) ) zs = np . dstack ( [ element . dimension_values ( i , False , False ) for i in range ( 2 , 2 + len ( element . vdims ) ) ] ) else : xs , ys , zs = ( element . dimension_values ( i , False , False ) for i in range ( 3 ) ) lon0 , lon1 = element . range ( 0 ) if isinstance ( element . crs , ccrs . _CylindricalProjection ) and ( lon1 - lon0 ) == 360 : xs = np . append ( xs , xs [ 0 : 1 ] + 360 , axis = 0 ) zs = np . ma . concatenate ( [ zs , zs [ : , 0 : 1 ] ] , axis = 1 ) return xs , ys , zs
Get mesh data from a 2D Element ensuring that if the data is on a cylindrical coordinate system and wraps globally that data actually wraps around .
13,671
def check_crs ( crs ) : import pyproj if isinstance ( crs , pyproj . Proj ) : out = crs elif isinstance ( crs , dict ) or isinstance ( crs , basestring ) : try : out = pyproj . Proj ( crs ) except RuntimeError : try : out = pyproj . Proj ( init = crs ) except RuntimeError : out = None else : out = None return out
Checks if the crs represents a valid grid projection or ESPG string .
13,672
def proj_to_cartopy ( proj ) : import cartopy . crs as ccrs try : from osgeo import osr has_gdal = True except ImportError : has_gdal = False proj = check_crs ( proj ) if proj . is_latlong ( ) : return ccrs . PlateCarree ( ) srs = proj . srs if has_gdal : s1 = osr . SpatialReference ( ) s1 . ImportFromProj4 ( proj . srs ) srs = s1 . ExportToProj4 ( ) km_proj = { 'lon_0' : 'central_longitude' , 'lat_0' : 'central_latitude' , 'x_0' : 'false_easting' , 'y_0' : 'false_northing' , 'k' : 'scale_factor' , 'zone' : 'zone' , } km_globe = { 'a' : 'semimajor_axis' , 'b' : 'semiminor_axis' , } km_std = { 'lat_1' : 'lat_1' , 'lat_2' : 'lat_2' , } kw_proj = dict ( ) kw_globe = dict ( ) kw_std = dict ( ) for s in srs . split ( '+' ) : s = s . split ( '=' ) if len ( s ) != 2 : continue k = s [ 0 ] . strip ( ) v = s [ 1 ] . strip ( ) try : v = float ( v ) except : pass if k == 'proj' : if v == 'tmerc' : cl = ccrs . TransverseMercator if v == 'lcc' : cl = ccrs . LambertConformal if v == 'merc' : cl = ccrs . Mercator if v == 'utm' : cl = ccrs . UTM if k in km_proj : kw_proj [ km_proj [ k ] ] = v if k in km_globe : kw_globe [ km_globe [ k ] ] = v if k in km_std : kw_std [ km_std [ k ] ] = v globe = None if kw_globe : globe = ccrs . Globe ( ** kw_globe ) if kw_std : kw_proj [ 'standard_parallels' ] = ( kw_std [ 'lat_1' ] , kw_std [ 'lat_2' ] ) if cl . __name__ == 'Mercator' : kw_proj . pop ( 'false_easting' , None ) kw_proj . pop ( 'false_northing' , None ) return cl ( globe = globe , ** kw_proj )
Converts a pyproj . Proj to a cartopy . crs . Projection
13,673
def load_tiff ( filename , crs = None , apply_transform = False , nan_nodata = False , ** kwargs ) : try : import xarray as xr except : raise ImportError ( 'Loading tiffs requires xarray to be installed' ) with warnings . catch_warnings ( ) : warnings . filterwarnings ( 'ignore' ) da = xr . open_rasterio ( filename ) return from_xarray ( da , crs , apply_transform , nan_nodata , ** kwargs )
Returns an RGB or Image element loaded from a geotiff file .
13,674
def teardown_handles ( self ) : if not isinstance ( self . handles . get ( 'artist' ) , GoogleTiles ) : self . handles [ 'artist' ] . remove ( )
If no custom update_handles method is supplied this method is called to tear down any previous handles before replacing them .
13,675
def find_geom ( geom , geoms ) : for i , g in enumerate ( geoms ) : if g is geom : return i
Returns the index of a geometry in a list of geometries avoiding expensive equality checks of in operator .
13,676
def compute_zoom_level ( bounds , domain , levels ) : area_fraction = min ( bounds . area / domain . area , 1 ) return int ( min ( round ( np . log2 ( 1 / area_fraction ) ) , levels ) )
Computes a zoom level given a bounds polygon a polygon of the overall domain and the number of zoom levels to divide the data into .
13,677
def bounds_to_poly ( bounds ) : x0 , y0 , x1 , y1 = bounds return Polygon ( [ ( x0 , y0 ) , ( x1 , y0 ) , ( x1 , y1 ) , ( x0 , y1 ) ] )
Constructs a shapely Polygon from the provided bounds tuple .
13,678
def get_assignment ( self ) : try : plan = json . loads ( open ( self . args . plan_file_path ) . read ( ) ) return plan_to_assignment ( plan ) except IOError : self . log . exception ( 'Given json file {file} not found.' . format ( file = self . args . plan_file_path ) , ) raise except ValueError : self . log . exception ( 'Given json file {file} could not be decoded.' . format ( file = self . args . plan_file_path ) , ) raise except KeyError : self . log . exception ( 'Given json file {file} could not be parsed in desired format.' . format ( file = self . args . plan_file_path ) , ) raise
Parse the given json plan in dict format .
13,679
def generate_requests ( hosts , jolokia_port , jolokia_prefix ) : session = FuturesSession ( ) for host in hosts : url = "http://{host}:{port}/{prefix}/read/{key}" . format ( host = host , port = jolokia_port , prefix = jolokia_prefix , key = UNDER_REPL_KEY , ) yield host , session . get ( url )
Return a generator of requests to fetch the under replicated partition number from the specified hosts .
13,680
def read_cluster_status ( hosts , jolokia_port , jolokia_prefix ) : under_replicated = 0 missing_brokers = 0 for host , request in generate_requests ( hosts , jolokia_port , jolokia_prefix ) : try : response = request . result ( ) if 400 <= response . status_code <= 599 : print ( "Got status code {0}. Exiting." . format ( response . status_code ) ) sys . exit ( 1 ) json = response . json ( ) under_replicated += json [ 'value' ] except RequestException as e : print ( "Broker {0} is down: {1}." "This maybe because it is starting up" . format ( host , e ) , file = sys . stderr ) missing_brokers += 1 except KeyError : print ( "Cannot find the key, Kafka is probably still starting up" , file = sys . stderr ) missing_brokers += 1 return under_replicated , missing_brokers
Read and return the number of under replicated partitions and missing brokers from the specified hosts .
13,681
def print_brokers ( cluster_config , brokers ) : print ( "Will restart the following brokers in {0}:" . format ( cluster_config . name ) ) for id , host in brokers : print ( " {0}: {1}" . format ( id , host ) )
Print the list of brokers that will be restarted .
13,682
def ask_confirmation ( ) : while True : print ( "Do you want to restart these brokers? " , end = "" ) choice = input ( ) . lower ( ) if choice in [ 'yes' , 'y' ] : return True elif choice in [ 'no' , 'n' ] : return False else : print ( "Please respond with 'yes' or 'no'" )
Ask for confirmation to the user . Return true if the user confirmed the execution false otherwise .
13,683
def start_broker ( host , connection , start_command , verbose ) : _ , stdout , stderr = connection . sudo_command ( start_command ) if verbose : report_stdout ( host , stdout ) report_stderr ( host , stderr )
Execute the start
13,684
def stop_broker ( host , connection , stop_command , verbose ) : _ , stdout , stderr = connection . sudo_command ( stop_command ) if verbose : report_stdout ( host , stdout ) report_stderr ( host , stderr )
Execute the stop
13,685
def wait_for_stable_cluster ( hosts , jolokia_port , jolokia_prefix , check_interval , check_count , unhealthy_time_limit , ) : stable_counter = 0 max_checks = int ( math . ceil ( unhealthy_time_limit / check_interval ) ) for i in itertools . count ( ) : partitions , brokers = read_cluster_status ( hosts , jolokia_port , jolokia_prefix , ) if partitions or brokers : stable_counter = 0 else : stable_counter += 1 print ( "Under replicated partitions: {p_count}, missing brokers: {b_count} ({stable}/{limit})" . format ( p_count = partitions , b_count = brokers , stable = stable_counter , limit = check_count , ) ) if stable_counter >= check_count : print ( "The cluster is stable" ) return if i >= max_checks : raise WaitTimeoutException ( ) time . sleep ( check_interval )
Block the caller until the cluster can be considered stable .
13,686
def execute_rolling_restart ( brokers , jolokia_port , jolokia_prefix , check_interval , check_count , unhealthy_time_limit , skip , verbose , pre_stop_task , post_stop_task , start_command , stop_command , ssh_password = None ) : all_hosts = [ b [ 1 ] for b in brokers ] for n , host in enumerate ( all_hosts [ skip : ] ) : with ssh ( host = host , forward_agent = True , sudoable = True , max_attempts = 3 , max_timeout = 2 , ssh_password = ssh_password ) as connection : execute_task ( pre_stop_task , host ) wait_for_stable_cluster ( all_hosts , jolokia_port , jolokia_prefix , check_interval , 1 if n == 0 else check_count , unhealthy_time_limit , ) print ( "Stopping {0} ({1}/{2})" . format ( host , n + 1 , len ( all_hosts ) - skip ) ) stop_broker ( host , connection , stop_command , verbose ) execute_task ( post_stop_task , host ) with ssh ( host = host , forward_agent = True , sudoable = True , max_attempts = 3 , max_timeout = 2 , ssh_password = ssh_password ) as connection : print ( "Starting {0} ({1}/{2})" . format ( host , n + 1 , len ( all_hosts ) - skip ) ) start_broker ( host , connection , start_command , verbose ) wait_for_stable_cluster ( all_hosts , jolokia_port , jolokia_prefix , check_interval , check_count , unhealthy_time_limit , )
Execute the rolling restart on the specified brokers . It checks the number of under replicated partitions on each broker using Jolokia .
13,687
def validate_opts ( opts , brokers_num ) : if opts . skip < 0 or opts . skip >= brokers_num : print ( "Error: --skip must be >= 0 and < #brokers" ) return True if opts . check_count < 0 : print ( "Error: --check-count must be >= 0" ) return True if opts . unhealthy_time_limit < 0 : print ( "Error: --unhealthy-time-limit must be >= 0" ) return True if opts . check_count == 0 : print ( "Warning: no check will be performed" ) if opts . check_interval < 0 : print ( "Error: --check-interval must be >= 0" ) return True return False
Basic option validation . Returns True if the options are not valid False otherwise .
13,688
def validate_broker_ids_subset ( broker_ids , subset_ids ) : all_ids = set ( broker_ids ) valid = True for subset_id in subset_ids : valid = valid and subset_id in all_ids if subset_id not in all_ids : print ( "Error: user specified broker id {0} does not exist in cluster." . format ( subset_id ) ) return valid
Validate that user specified broker ids to restart exist in the broker ids retrieved from cluster config .
13,689
def run ( self , cluster_config , rg_parser , partition_measurer , cluster_balancer , args , ) : self . cluster_config = cluster_config self . args = args with ZK ( self . cluster_config ) as self . zk : self . log . debug ( 'Starting %s for cluster: %s and zookeeper: %s' , self . __class__ . __name__ , self . cluster_config . name , self . cluster_config . zookeeper , ) brokers = self . zk . get_brokers ( ) assignment = self . zk . get_cluster_assignment ( ) pm = partition_measurer ( self . cluster_config , brokers , assignment , args , ) ct = ClusterTopology ( assignment , brokers , pm , rg_parser . get_replication_group , ) if len ( ct . partitions ) == 0 : self . log . info ( "The cluster is empty. No actions to perform." ) return if self . is_reassignment_pending ( ) : self . log . error ( 'Previous reassignment pending.' ) sys . exit ( 1 ) self . run_command ( ct , cluster_balancer ( ct , args ) )
Initialize cluster_config args and zk then call run_command .
13,690
def execute_plan ( self , plan , allow_rf_change = False ) : if self . should_execute ( ) : result = self . zk . execute_plan ( plan , allow_rf_change = allow_rf_change ) if not result : self . log . error ( 'Plan execution unsuccessful.' ) sys . exit ( 1 ) else : self . log . info ( 'Plan sent to zookeeper for reassignment successfully.' , ) else : self . log . info ( 'Proposed plan won\'t be executed (--apply and confirmation needed).' )
Save proposed - plan and execute the same if requested .
13,691
def should_execute ( self ) : return self . args . apply and ( self . args . no_confirm or self . confirm_execution ( ) )
Confirm if proposed - plan should be executed .
13,692
def is_reassignment_pending ( self ) : in_progress_plan = self . zk . get_pending_plan ( ) if in_progress_plan : in_progress_partitions = in_progress_plan [ 'partitions' ] self . log . info ( 'Previous re-assignment in progress for {count} partitions.' ' Current partitions in re-assignment queue: {partitions}' . format ( count = len ( in_progress_partitions ) , partitions = in_progress_partitions , ) ) return True else : return False
Return True if there are reassignment tasks pending .
13,693
def get_reduced_assignment ( self , original_assignment , cluster_topology , max_partition_movements , max_leader_only_changes , max_movement_size = DEFAULT_MAX_MOVEMENT_SIZE , force_progress = False , ) : new_assignment = cluster_topology . assignment if ( not original_assignment or not new_assignment or max_partition_movements < 0 or max_leader_only_changes < 0 or max_movement_size < 0 ) : return { } leaders_changes = [ ( t_p , new_assignment [ t_p ] ) for t_p , replica in six . iteritems ( original_assignment ) if replica != new_assignment [ t_p ] and set ( replica ) == set ( new_assignment [ t_p ] ) ] partition_change_count = [ ( t_p , len ( set ( replica ) - set ( new_assignment [ t_p ] ) ) , ) for t_p , replica in six . iteritems ( original_assignment ) if set ( replica ) != set ( new_assignment [ t_p ] ) ] self . log . info ( "Total number of actions before reduction: %s." , len ( partition_change_count ) + len ( leaders_changes ) , ) reduced_actions = self . _extract_actions_unique_topics ( partition_change_count , max_partition_movements , cluster_topology , max_movement_size , ) if len ( reduced_actions ) == 0 and force_progress : smallest_size = min ( [ cluster_topology . partitions [ t_p [ 0 ] ] . size for t_p in partition_change_count ] ) self . log . warning ( '--max-movement-size={max_movement_size} is too small, using smallest size' ' in set of partitions to move, {smallest_size} instead to force progress' . format ( max_movement_size = max_movement_size , smallest_size = smallest_size , ) ) max_movement_size = smallest_size reduced_actions = self . _extract_actions_unique_topics ( partition_change_count , max_partition_movements , cluster_topology , max_movement_size , ) reduced_partition_changes = [ ( t_p , new_assignment [ t_p ] ) for t_p in reduced_actions ] self . log . info ( "Number of partition changes: %s." " Number of leader-only changes: %s" , len ( reduced_partition_changes ) , min ( max_leader_only_changes , len ( leaders_changes ) ) , ) reduced_assignment = { t_p : replicas for t_p , replicas in ( reduced_partition_changes + leaders_changes [ : max_leader_only_changes ] ) } return reduced_assignment
Reduce the assignment based on the total actions .
13,694
def _extract_actions_unique_topics ( self , movement_counts , max_movements , cluster_topology , max_movement_size ) : topic_actions = defaultdict ( list ) for t_p , replica_change_cnt in movement_counts : topic_actions [ t_p [ 0 ] ] . append ( ( t_p , replica_change_cnt ) ) extracted_actions = [ ] curr_movements = 0 curr_size = 0 action_available = True while curr_movements < max_movements and curr_size <= max_movement_size and action_available : action_available = False for topic , actions in six . iteritems ( topic_actions ) : for action in actions : action_size = cluster_topology . partitions [ action [ 0 ] ] . size if curr_movements + action [ 1 ] > max_movements or curr_size + action_size > max_movement_size : actions . remove ( action ) else : action_available = True extracted_actions . append ( action [ 0 ] ) curr_movements += action [ 1 ] curr_size += action_size actions . remove ( action ) break return extracted_actions
Extract actions limiting to given max value such that the resultant has the minimum possible number of duplicate topics .
13,695
def confirm_execution ( self ) : permit = '' while permit . lower ( ) not in ( 'yes' , 'no' ) : permit = input ( 'Execute Proposed Plan? [yes/no] ' ) if permit . lower ( ) == 'yes' : return True else : return False
Confirm from your if proposed - plan be executed .
13,696
def write_json_plan ( self , proposed_layout , proposed_plan_file ) : with open ( proposed_plan_file , 'w' ) as output : json . dump ( proposed_layout , output )
Dump proposed json plan to given output file for future usage .
13,697
def swap_leader ( self , new_leader ) : assert ( new_leader in self . _replicas ) curr_leader = self . leader idx = self . _replicas . index ( new_leader ) self . _replicas [ 0 ] , self . _replicas [ idx ] = self . _replicas [ idx ] , self . _replicas [ 0 ] return curr_leader
Change the preferred leader with one of given replicas .
13,698
def replace ( self , source , dest ) : for i , broker in enumerate ( self . replicas ) : if broker == source : self . replicas [ i ] = dest return
Replace source broker with destination broker in replica set if found .
13,699
def count_siblings ( self , partitions ) : count = sum ( int ( self . topic == partition . topic ) for partition in partitions ) return count
Count siblings of partition in given partition - list .