idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
13,700
def get_topic_partition_metadata ( hosts ) : kafka_client = KafkaToolClient ( hosts , timeout = 10 ) kafka_client . load_metadata_for_topics ( ) topic_partitions = kafka_client . topic_partitions resp = kafka_client . send_metadata_request ( ) for _ , topic , partitions in resp . topics : for partition_error , partitio...
Returns topic - partition metadata from Kafka broker .
13,701
def get_unavailable_brokers ( zk , partition_metadata ) : topic_data = zk . get_topics ( partition_metadata . topic ) topic = partition_metadata . topic partition = partition_metadata . partition expected_replicas = set ( topic_data [ topic ] [ 'partitions' ] [ str ( partition ) ] [ 'replicas' ] ) available_replicas = ...
Returns the set of unavailable brokers from the difference of replica set of given partition to the set of available replicas .
13,702
def get_current_consumer_offsets ( kafka_client , group , topics , raise_on_error = True , ) : topics = _verify_topics_and_partitions ( kafka_client , topics , raise_on_error ) group_offset_reqs = [ OffsetFetchRequestPayload ( topic , partition ) for topic , partitions in six . iteritems ( topics ) for partition in par...
Get current consumer offsets .
13,703
def get_topics_watermarks ( kafka_client , topics , raise_on_error = True ) : topics = _verify_topics_and_partitions ( kafka_client , topics , raise_on_error , ) highmark_offset_reqs = [ ] lowmark_offset_reqs = [ ] for topic , partitions in six . iteritems ( topics ) : for partition in partitions : highmark_offset_reqs...
Get current topic watermarks .
13,704
def set_consumer_offsets ( kafka_client , group , new_offsets , raise_on_error = True , ) : valid_new_offsets = _verify_commit_offsets_requests ( kafka_client , new_offsets , raise_on_error ) group_offset_reqs = [ OffsetCommitRequestPayload ( topic , partition , offset , metadata = '' , ) for topic , new_partition_offs...
Set consumer offsets to the specified offsets .
13,705
def nullify_offsets ( offsets ) : result = { } for topic , partition_offsets in six . iteritems ( offsets ) : result [ topic ] = _nullify_partition_offsets ( partition_offsets ) return result
Modify offsets metadata so that the partition offsets have null payloads .
13,706
def display_table ( headers , table ) : assert all ( len ( row ) == len ( headers ) for row in table ) str_headers = [ str ( header ) for header in headers ] str_table = [ [ str ( cell ) for cell in row ] for row in table ] column_lengths = [ max ( len ( header ) , * ( len ( row [ i ] ) for row in str_table ) ) for i ,...
Print a formatted table .
13,707
def display_replica_imbalance ( cluster_topologies ) : assert cluster_topologies rg_ids = list ( next ( six . itervalues ( cluster_topologies ) ) . rgs . keys ( ) ) assert all ( set ( rg_ids ) == set ( cluster_topology . rgs . keys ( ) ) for cluster_topology in six . itervalues ( cluster_topologies ) ) rg_imbalances = ...
Display replica replication - group distribution imbalance statistics .
13,708
def display_partition_imbalance ( cluster_topologies ) : broker_ids = list ( next ( six . itervalues ( cluster_topologies ) ) . brokers . keys ( ) ) assert all ( set ( broker_ids ) == set ( cluster_topology . brokers . keys ( ) ) for cluster_topology in six . itervalues ( cluster_topologies ) ) broker_partition_counts ...
Display partition count and weight imbalance statistics .
13,709
def display_leader_imbalance ( cluster_topologies ) : broker_ids = list ( next ( six . itervalues ( cluster_topologies ) ) . brokers . keys ( ) ) assert all ( set ( broker_ids ) == set ( cluster_topology . brokers . keys ( ) ) for cluster_topology in six . itervalues ( cluster_topologies ) ) broker_leader_counts = [ st...
Display leader count and weight imbalance statistics .
13,710
def display_topic_broker_imbalance ( cluster_topologies ) : broker_ids = list ( next ( six . itervalues ( cluster_topologies ) ) . brokers . keys ( ) ) assert all ( set ( broker_ids ) == set ( cluster_topology . brokers . keys ( ) ) for cluster_topology in six . itervalues ( cluster_topologies ) ) topic_names = list ( ...
Display topic broker imbalance statistics .
13,711
def display_movements_stats ( ct , base_assignment ) : movement_count , movement_size , leader_changes = stats . get_partition_movement_stats ( ct , base_assignment ) print ( 'Total partition movements: {movement_count}\n' 'Total partition movement size: {movement_size}\n' 'Total leader changes: {leader_changes}' . for...
Display how the amount of movement between two assignments .
13,712
def display_assignment_changes ( plan_details , to_log = True ) : curr_plan_list , new_plan_list , total_changes = plan_details action_cnt = '\n[INFO] Total actions required {0}' . format ( total_changes ) _log_or_display ( to_log , action_cnt ) action_cnt = ( '[INFO] Total actions that will be executed {0}' . format (...
Display current and proposed changes in topic - partition to replica layout over brokers .
13,713
def get_net_imbalance ( count_per_broker ) : net_imbalance = 0 opt_count , extra_allowed = compute_optimum ( len ( count_per_broker ) , sum ( count_per_broker ) ) for count in count_per_broker : extra_cnt , extra_allowed = get_extra_element_count ( count , opt_count , extra_allowed ) net_imbalance += extra_cnt return n...
Calculate and return net imbalance based on given count of partitions or leaders per broker .
13,714
def get_extra_element_count ( curr_count , opt_count , extra_allowed_cnt ) : if curr_count > opt_count : if extra_allowed_cnt > 0 : extra_allowed_cnt -= 1 extra_cnt = curr_count - opt_count - 1 else : extra_cnt = curr_count - opt_count else : extra_cnt = 0 return extra_cnt , extra_allowed_cnt
Evaluate and return extra same element count based on given values .
13,715
def get_replication_group_imbalance_stats ( rgs , partitions ) : tot_rgs = len ( rgs ) extra_replica_cnt_per_rg = defaultdict ( int ) for partition in partitions : opt_replica_cnt , extra_replicas_allowed = compute_optimum ( tot_rgs , partition . replication_factor ) for rg in rgs : replica_cnt_rg = rg . count_replica ...
Calculate extra replica count replica count over each replication - group and net extra - same - replica count .
13,716
def get_topic_imbalance_stats ( brokers , topics ) : extra_partition_cnt_per_broker = defaultdict ( int ) tot_brokers = len ( brokers ) sorted_brokers = sorted ( brokers , key = lambda b : b . id ) for topic in topics : total_partition_replicas = len ( topic . partitions ) * topic . replication_factor opt_partition_cnt...
Return count of topics and partitions on each broker having multiple partitions of same topic .
13,717
def _read_generated_broker_id ( meta_properties_path ) : try : with open ( meta_properties_path , 'r' ) as f : broker_id = _parse_meta_properties_file ( f ) except IOError : raise IOError ( "Cannot open meta.properties file: {path}" . format ( path = meta_properties_path ) , ) except ValueError : raise ValueError ( "Br...
reads broker_id from meta . properties file .
13,718
def get_broker_id ( data_path ) : META_FILE_PATH = "{data_path}/meta.properties" if not data_path : raise ValueError ( "You need to specify the data_path if broker_id == -1" ) meta_properties_path = META_FILE_PATH . format ( data_path = data_path ) return _read_generated_broker_id ( meta_properties_path )
This function will look into the data folder to get the automatically created broker_id .
13,719
def merge_offsets_metadata ( topics , * offsets_responses ) : result = dict ( ) for topic in topics : partition_offsets = [ response [ topic ] for response in offsets_responses if topic in response ] result [ topic ] = merge_partition_offsets ( * partition_offsets ) return result
Merge the offset metadata dictionaries from multiple responses .
13,720
def merge_partition_offsets ( * partition_offsets ) : output = dict ( ) for partition_offset in partition_offsets : for partition , offset in six . iteritems ( partition_offset ) : prev_offset = output . get ( partition , 0 ) output [ partition ] = max ( prev_offset , offset ) return output
Merge the partition offsets of a single topic from multiple responses .
13,721
def rebalance_replicas ( self , max_movement_count = None , max_movement_size = None , ) : movement_count = 0 movement_size = 0 for partition in six . itervalues ( self . cluster_topology . partitions ) : count , size = self . _rebalance_partition_replicas ( partition , None if not max_movement_count else max_movement_...
Balance replicas across replication - groups .
13,722
def _rebalance_partition_replicas ( self , partition , max_movement_count = None , max_movement_size = None , ) : total = partition . replication_factor over_replicated_rgs , under_replicated_rgs = separate_groups ( list ( self . cluster_topology . rgs . values ( ) ) , lambda g : g . count_replica ( partition ) , total...
Rebalance replication groups for given partition .
13,723
def _elect_source_replication_group ( self , over_replicated_rgs , partition , ) : return max ( over_replicated_rgs , key = lambda rg : rg . count_replica ( partition ) , )
Decide source replication - group based as group with highest replica count .
13,724
def _elect_dest_replication_group ( self , replica_count_source , under_replicated_rgs , partition , ) : min_replicated_rg = min ( under_replicated_rgs , key = lambda rg : rg . count_replica ( partition ) , ) if min_replicated_rg . count_replica ( partition ) < replica_count_source - 1 : return min_replicated_rg return...
Decide destination replication - group based on replica - count .
13,725
def parse_consumer_offsets ( cls , json_file ) : with open ( json_file , 'r' ) as consumer_offsets_json : try : parsed_offsets = { } parsed_offsets_data = json . load ( consumer_offsets_json ) parsed_offsets [ 'groupid' ] = parsed_offsets_data [ 'groupid' ] parsed_offsets [ 'offsets' ] = { } for topic , topic_data in s...
Parse current offsets from json - file .
13,726
def build_new_offsets ( cls , client , topics_offset_data , topic_partitions , current_offsets ) : new_offsets = defaultdict ( dict ) try : for topic , partitions in six . iteritems ( topic_partitions ) : valid_partitions = set ( ) for topic_partition_offsets in current_offsets [ topic ] : partition = topic_partition_o...
Build complete consumer offsets from parsed current consumer - offsets and lowmarks and highmarks from current - offsets for .
13,727
def restore_offsets ( cls , client , parsed_consumer_offsets ) : try : consumer_group = parsed_consumer_offsets [ 'groupid' ] topics_offset_data = parsed_consumer_offsets [ 'offsets' ] topic_partitions = dict ( ( topic , [ partition for partition in offset_data . keys ( ) ] ) for topic , offset_data in six . iteritems ...
Fetch current offsets from kafka validate them against given consumer - offsets data and commit the new offsets .
13,728
def tuple_replace ( tup , * pairs ) : tuple_list = list ( tup ) for index , value in pairs : tuple_list [ index ] = value return tuple ( tuple_list )
Return a copy of a tuple with some elements replaced .
13,729
def tuple_alter ( tup , * pairs ) : tuple_list = list ( tup ) for i , f in pairs : tuple_list [ i ] = f ( tuple_list [ i ] ) return tuple ( tuple_list )
Return a copy of a tuple with some elements altered .
13,730
def tuple_remove ( tup , * items ) : tuple_list = list ( tup ) for item in items : tuple_list . remove ( item ) return tuple ( tuple_list )
Return a copy of a tuple with some items removed .
13,731
def positive_int ( string ) : error_msg = 'Positive integer required, {string} given.' . format ( string = string ) try : value = int ( string ) except ValueError : raise ArgumentTypeError ( error_msg ) if value < 0 : raise ArgumentTypeError ( error_msg ) return value
Convert string to positive integer .
13,732
def positive_nonzero_int ( string ) : error_msg = 'Positive non-zero integer required, {string} given.' . format ( string = string ) try : value = int ( string ) except ValueError : raise ArgumentTypeError ( error_msg ) if value <= 0 : raise ArgumentTypeError ( error_msg ) return value
Convert string to positive integer greater than zero .
13,733
def positive_float ( string ) : error_msg = 'Positive float required, {string} given.' . format ( string = string ) try : value = float ( string ) except ValueError : raise ArgumentTypeError ( error_msg ) if value < 0 : raise ArgumentTypeError ( error_msg ) return value
Convert string to positive float .
13,734
def dict_merge ( set1 , set2 ) : return dict ( list ( set1 . items ( ) ) + list ( set2 . items ( ) ) )
Joins two dictionaries .
13,735
def to_h ( num , suffix = 'B' ) : if num is None : return "None" for unit in [ '' , 'Ki' , 'Mi' , 'Gi' , 'Ti' , 'Pi' , 'Ei' , 'Zi' ] : if abs ( num ) < 1024.0 : return "%3.1f%s%s" % ( num , unit , suffix ) num /= 1024.0 return "%.1f%s%s" % ( num , 'Yi' , suffix )
Converts a byte value in human readable form .
13,736
def format_to_json ( data ) : if sys . stdout . isatty ( ) : return json . dumps ( data , indent = 4 , separators = ( ',' , ': ' ) ) else : return json . dumps ( data )
Converts data into json If stdout is a tty it performs a pretty print .
13,737
def _build_brokers ( self , brokers ) : for broker_id , metadata in six . iteritems ( brokers ) : self . brokers [ broker_id ] = self . _create_broker ( broker_id , metadata )
Build broker objects using broker - ids .
13,738
def _create_broker ( self , broker_id , metadata = None ) : broker = Broker ( broker_id , metadata ) if not metadata : broker . mark_inactive ( ) rg_id = self . extract_group ( broker ) group = self . rgs . setdefault ( rg_id , ReplicationGroup ( rg_id ) ) group . add_broker ( broker ) broker . replication_group = grou...
Create a broker object and assign to a replication group . A broker object with no metadata is considered inactive . An inactive broker may or may not belong to a group .
13,739
def _build_partitions ( self , assignment ) : self . partitions = { } for partition_name , replica_ids in six . iteritems ( assignment ) : topic_id = partition_name [ 0 ] partition_id = partition_name [ 1 ] topic = self . topics . setdefault ( topic_id , Topic ( topic_id , replication_factor = len ( replica_ids ) ) ) p...
Builds all partition objects and update corresponding broker and topic objects .
13,740
def active_brokers ( self ) : return { broker for broker in six . itervalues ( self . brokers ) if not broker . inactive and not broker . decommissioned }
Set of brokers that are not inactive or decommissioned .
13,741
def replace_broker ( self , source_id , dest_id ) : try : source = self . brokers [ source_id ] dest = self . brokers [ dest_id ] for partition in source . partitions . copy ( ) : source . partitions . remove ( partition ) dest . partitions . add ( partition ) partition . replace ( source , dest ) except KeyError as e ...
Move all partitions in source broker to destination broker .
13,742
def update_cluster_topology ( self , assignment ) : try : for partition_name , replica_ids in six . iteritems ( assignment ) : try : new_replicas = [ self . brokers [ b_id ] for b_id in replica_ids ] except KeyError : self . log . error ( "Invalid replicas %s for topic-partition %s-%s." , ', ' . join ( [ str ( id ) for...
Modify the cluster - topology with given assignment .
13,743
def plan_to_assignment ( plan ) : assignment = { } for elem in plan [ 'partitions' ] : assignment [ ( elem [ 'topic' ] , elem [ 'partition' ] ) ] = elem [ 'replicas' ] return assignment
Convert the plan to the format used by cluster - topology .
13,744
def assignment_to_plan ( assignment ) : return { 'version' : 1 , 'partitions' : [ { 'topic' : t_p [ 0 ] , 'partition' : t_p [ 1 ] , 'replicas' : replica } for t_p , replica in six . iteritems ( assignment ) ] }
Convert an assignment to the format used by Kafka to describe a reassignment plan .
13,745
def validate_plan ( new_plan , base_plan = None , is_partition_subset = True , allow_rf_change = False , ) : if not _validate_plan ( new_plan ) : _log . error ( 'Invalid proposed-plan.' ) return False if base_plan : if not _validate_plan ( base_plan ) : _log . error ( 'Invalid assignment from cluster.' ) return False i...
Verify that the new plan is valid for execution .
13,746
def _validate_plan_base ( new_plan , base_plan , is_partition_subset = True , allow_rf_change = False , ) : new_partitions = set ( [ ( p_data [ 'topic' ] , p_data [ 'partition' ] ) for p_data in new_plan [ 'partitions' ] ] ) base_partitions = set ( [ ( p_data [ 'topic' ] , p_data [ 'partition' ] ) for p_data in base_pl...
Validate if given plan is valid comparing with given base - plan .
13,747
def _validate_format ( plan ) : if set ( plan . keys ( ) ) != set ( [ 'version' , 'partitions' ] ) : _log . error ( 'Invalid or incomplete keys in given plan. Expected: "version", ' '"partitions". Found:{keys}' . format ( keys = ', ' . join ( list ( plan . keys ( ) ) ) ) , ) return False if plan [ 'version' ] != 1 : _l...
Validate if the format of the plan as expected .
13,748
def _validate_plan ( plan ) : if not _validate_format ( plan ) : return False partition_names = [ ( p_data [ 'topic' ] , p_data [ 'partition' ] ) for p_data in plan [ 'partitions' ] ] duplicate_partitions = [ partition for partition , count in six . iteritems ( Counter ( partition_names ) ) if count > 1 ] if duplicate_...
Validate if given plan is valid based on kafka - cluster - assignment protocols .
13,749
def percentage_distance ( cls , highmark , current ) : highmark = int ( highmark ) current = int ( current ) if highmark > 0 : return round ( ( highmark - current ) * 100.0 / highmark , 2 , ) else : return 0.0
Percentage of distance the current offset is behind the highmark .
13,750
def get_children ( self , path , watch = None ) : _log . debug ( "ZK: Getting children of {path}" . format ( path = path ) , ) return self . zk . get_children ( path , watch )
Returns the children of the specified node .
13,751
def get ( self , path , watch = None ) : _log . debug ( "ZK: Getting {path}" . format ( path = path ) , ) return self . zk . get ( path , watch )
Returns the data of the specified node .
13,752
def set ( self , path , value ) : _log . debug ( "ZK: Setting {path} to {value}" . format ( path = path , value = value ) ) return self . zk . set ( path , value )
Sets and returns new data for the specified node .
13,753
def get_json ( self , path , watch = None ) : data , _ = self . get ( path , watch ) return load_json ( data ) if data else None
Reads the data of the specified node and converts it to json .
13,754
def get_brokers ( self , names_only = False ) : try : broker_ids = self . get_children ( "/brokers/ids" ) except NoNodeError : _log . info ( "cluster is empty." ) return { } if names_only : return { int ( b_id ) : None for b_id in broker_ids } return { int ( b_id ) : self . get_broker_metadata ( b_id ) for b_id in brok...
Get information on all the available brokers .
13,755
def get_topic_config ( self , topic ) : try : config_data = load_json ( self . get ( "/config/topics/{topic}" . format ( topic = topic ) ) [ 0 ] ) except NoNodeError as e : topics = self . get_topics ( topic_name = topic , fetch_partition_state = False ) if len ( topics ) > 0 : _log . info ( "Configuration not availabl...
Get configuration information for specified topic .
13,756
def set_topic_config ( self , topic , value , kafka_version = ( 0 , 10 , ) ) : config_data = dump_json ( value ) try : return_value = self . set ( "/config/topics/{topic}" . format ( topic = topic ) , config_data ) version = kafka_version [ 1 ] assert version in ( 9 , 10 ) , "Feature supported with kafka 9 and kafka 10...
Set configuration information for specified topic .
13,757
def get_topics ( self , topic_name = None , names_only = False , fetch_partition_state = True , ) : try : topic_ids = [ topic_name ] if topic_name else self . get_children ( "/brokers/topics" , ) except NoNodeError : _log . error ( "Cluster is empty." ) return { } if names_only : return topic_ids topics_data = { } for ...
Get information on all the available topics .
13,758
def get_consumer_groups ( self , consumer_group_id = None , names_only = False ) : if consumer_group_id is None : group_ids = self . get_children ( "/consumers" ) else : group_ids = [ consumer_group_id ] if names_only : return { g_id : None for g_id in group_ids } consumer_offsets = { } for g_id in group_ids : consumer...
Get information on all the available consumer - groups .
13,759
def get_group_offsets ( self , group , topic = None ) : group_offsets = { } try : all_topics = self . get_my_subscribed_topics ( group ) except NoNodeError : _log . warning ( "No topics subscribed to consumer-group {group}." . format ( group = group , ) , ) return group_offsets if topic : if topic in all_topics : topic...
Fetch group offsets for given topic and partition otherwise all topics and partitions otherwise .
13,760
def _fetch_partition_state ( self , topic_id , partition_id ) : state_path = "/brokers/topics/{topic_id}/partitions/{p_id}/state" try : partition_state = self . get ( state_path . format ( topic_id = topic_id , p_id = partition_id ) , ) return partition_state except NoNodeError : return { }
Fetch partition - state for given topic - partition .
13,761
def _fetch_partition_info ( self , topic_id , partition_id ) : info_path = "/brokers/topics/{topic_id}/partitions/{p_id}" try : _ , partition_info = self . get ( info_path . format ( topic_id = topic_id , p_id = partition_id ) , ) return partition_info except NoNodeError : return { }
Fetch partition info for given topic - partition .
13,762
def get_my_subscribed_topics ( self , groupid ) : path = "/consumers/{group_id}/offsets" . format ( group_id = groupid ) return self . get_children ( path )
Get the list of topics that a consumer is subscribed to
13,763
def get_my_subscribed_partitions ( self , groupid , topic ) : path = "/consumers/{group_id}/offsets/{topic}" . format ( group_id = groupid , topic = topic , ) return self . get_children ( path )
Get the list of partitions of a topic that a consumer is subscribed to
13,764
def get_cluster_assignment ( self ) : plan = self . get_cluster_plan ( ) assignment = { } for elem in plan [ 'partitions' ] : assignment [ ( elem [ 'topic' ] , elem [ 'partition' ] ) ] = elem [ 'replicas' ] return assignment
Fetch the cluster layout in form of assignment from zookeeper
13,765
def create ( self , path , value = '' , acl = None , ephemeral = False , sequence = False , makepath = False ) : _log . debug ( "ZK: Creating node " + path ) return self . zk . create ( path , value , acl , ephemeral , sequence , makepath )
Creates a Zookeeper node .
13,766
def delete ( self , path , recursive = False ) : _log . debug ( "ZK: Deleting node " + path ) return self . zk . delete ( path , recursive = recursive )
Deletes a Zookeeper node .
13,767
def execute_plan ( self , plan , allow_rf_change = False ) : reassignment_path = '{admin}/{reassignment_node}' . format ( admin = ADMIN_PATH , reassignment_node = REASSIGNMENT_NODE ) plan_json = dump_json ( plan ) base_plan = self . get_cluster_plan ( ) if not validate_plan ( plan , base_plan , allow_rf_change = allow_...
Submit reassignment plan for execution .
13,768
def get_cluster_plan ( self ) : _log . info ( 'Fetching current cluster-topology from Zookeeper...' ) cluster_layout = self . get_topics ( fetch_partition_state = False ) partitions = [ { 'topic' : topic_id , 'partition' : int ( p_id ) , 'replicas' : partitions_data [ 'replicas' ] } for topic_id , topic_info in six . i...
Fetch cluster plan from zookeeper .
13,769
def get_pending_plan ( self ) : reassignment_path = '{admin}/{reassignment_node}' . format ( admin = ADMIN_PATH , reassignment_node = REASSIGNMENT_NODE ) try : result = self . get ( reassignment_path ) return load_json ( result [ 0 ] ) except NoNodeError : return { }
Read the currently running plan on reassign_partitions node .
13,770
def run_command ( self ) : offline = get_topic_partition_with_error ( self . cluster_config , LEADER_NOT_AVAILABLE_ERROR , ) errcode = status_code . OK if not offline else status_code . CRITICAL out = _prepare_output ( offline , self . args . verbose ) return errcode , out
Checks the number of offline partitions
13,771
def get_kafka_groups ( cls , cluster_config ) : kafka_group_reader = KafkaGroupReader ( cluster_config ) return list ( kafka_group_reader . read_groups ( ) . keys ( ) )
Get the group_id of groups committed into Kafka .
13,772
def report_stdout ( host , stdout ) : lines = stdout . readlines ( ) if lines : print ( "STDOUT from {host}:" . format ( host = host ) ) for line in lines : print ( line . rstrip ( ) , file = sys . stdout )
Take a stdout and print it s lines to output if lines are present .
13,773
def report_stderr ( host , stderr ) : lines = stderr . readlines ( ) if lines : print ( "STDERR from {host}:" . format ( host = host ) ) for line in lines : print ( line . rstrip ( ) , file = sys . stderr )
Take a stderr and print it s lines to output if lines are present .
13,774
def save_offsets ( cls , consumer_offsets_metadata , topics_dict , json_file , groupid , ) : current_consumer_offsets = defaultdict ( dict ) for topic , topic_offsets in six . iteritems ( consumer_offsets_metadata ) : for partition_offset in topic_offsets : current_consumer_offsets [ topic ] [ partition_offset . partit...
Built offsets for given topic - partitions in required format from current offsets metadata and write to given json - file .
13,775
def write_offsets_to_file ( cls , json_file_name , consumer_offsets_data ) : with open ( json_file_name , "w" ) as json_file : try : json . dump ( consumer_offsets_data , json_file ) except ValueError : print ( "Error: Invalid json data {data}" . format ( data = consumer_offsets_data ) ) raise print ( "Consumer offset ...
Save built consumer - offsets data to given json file .
13,776
def decommission_brokers ( self , broker_ids ) : groups = set ( ) for b_id in broker_ids : try : broker = self . cluster_topology . brokers [ b_id ] except KeyError : self . log . error ( "Invalid broker id %s." , b_id ) raise InvalidBrokerIdError ( "Broker id {} does not exist in cluster" . format ( b_id ) , ) broker ...
Decommission a list of brokers trying to keep the replication group the brokers belong to balanced .
13,777
def _decommission_brokers_in_group ( self , group ) : try : group . rebalance_brokers ( ) except EmptyReplicationGroupError : self . log . warning ( "No active brokers left in replication group %s" , group ) for broker in group . brokers : if broker . decommissioned and not broker . empty ( ) : self . log . info ( "Bro...
Decommission the marked brokers of a group .
13,778
def rebalance_replication_groups ( self ) : if any ( b . inactive for b in six . itervalues ( self . cluster_topology . brokers ) ) : self . log . error ( "Impossible to rebalance replication groups because of inactive " "brokers." ) raise RebalanceError ( "Impossible to rebalance replication groups because of inactive...
Rebalance partitions over replication groups .
13,779
def rebalance_brokers ( self ) : for rg in six . itervalues ( self . cluster_topology . rgs ) : rg . rebalance_brokers ( )
Rebalance partition - count across brokers within each replication - group .
13,780
def revoke_leadership ( self , broker_ids ) : for b_id in broker_ids : try : broker = self . cluster_topology . brokers [ b_id ] except KeyError : self . log . error ( "Invalid broker id %s." , b_id ) raise InvalidBrokerIdError ( "Broker id {} does not exist in cluster" . format ( b_id ) , ) broker . mark_revoked_leade...
Revoke leadership for given brokers .
13,781
def _force_revoke_leadership ( self , broker ) : owned_partitions = list ( filter ( lambda p : broker is p . leader , broker . partitions , ) ) for partition in owned_partitions : if len ( partition . replicas ) == 1 : self . log . error ( "Cannot be revoked leadership for broker {b} for partition {p}. Replica count: 1...
Revoke the leadership of given broker for any remaining partitions .
13,782
def rebalance_leaders ( self ) : opt_leader_cnt = len ( self . cluster_topology . partitions ) // len ( self . cluster_topology . brokers ) self . rebalancing_non_followers ( opt_leader_cnt )
Re - order brokers in replicas such that every broker is assigned as preferred leader evenly .
13,783
def _rebalance_groups_partition_cnt ( self ) : total_elements = sum ( len ( rg . partitions ) for rg in six . itervalues ( self . cluster_topology . rgs ) ) over_loaded_rgs , under_loaded_rgs = separate_groups ( list ( self . cluster_topology . rgs . values ( ) ) , lambda rg : len ( rg . partitions ) , total_elements ,...
Re - balance partition - count across replication - groups .
13,784
def add_replica ( self , partition_name , count = 1 ) : try : partition = self . cluster_topology . partitions [ partition_name ] except KeyError : raise InvalidPartitionError ( "Partition name {name} not found" . format ( name = partition_name ) , ) if partition . replication_factor + count > len ( self . cluster_topo...
Increase the replication - factor for a partition .
13,785
def remove_replica ( self , partition_name , osr_broker_ids , count = 1 ) : try : partition = self . cluster_topology . partitions [ partition_name ] except KeyError : raise InvalidPartitionError ( "Partition name {name} not found" . format ( name = partition_name ) , ) if partition . replication_factor <= count : rais...
Remove one replica of a partition from the cluster .
13,786
def preprocess_topics ( source_groupid , source_topics , dest_groupid , topics_dest_group ) : common_topics = [ topic for topic in topics_dest_group if topic in source_topics ] if common_topics : print ( "Error: Consumer Group ID: {groupid} is already " "subscribed to following topics: {topic}.\nPlease delete this " "t...
Pre - process the topics in source and destination group for duplicates .
13,787
def create_offsets ( zk , consumer_group , offsets ) : for topic , partition_offsets in six . iteritems ( offsets ) : for partition , offset in six . iteritems ( partition_offsets ) : new_path = "/consumers/{groupid}/offsets/{topic}/{partition}" . format ( groupid = consumer_group , topic = topic , partition = partitio...
Create path with offset value for each topic - partition of given consumer group .
13,788
def fetch_offsets ( zk , consumer_group , topics ) : source_offsets = defaultdict ( dict ) for topic , partitions in six . iteritems ( topics ) : for partition in partitions : offset , _ = zk . get ( "/consumers/{groupid}/offsets/{topic}/{partition}" . format ( groupid = consumer_group , topic = topic , partition = par...
Fetch offsets for given topics of given consumer group .
13,789
def get_offset_topic_partition_count ( kafka_config ) : metadata = get_topic_partition_metadata ( kafka_config . broker_list ) if CONSUMER_OFFSET_TOPIC not in metadata : raise UnknownTopic ( "Consumer offset topic is missing." ) return len ( metadata [ CONSUMER_OFFSET_TOPIC ] )
Given a kafka cluster configuration return the number of partitions in the offset topic . It will raise an UnknownTopic exception if the topic cannot be found .
13,790
def get_group_partition ( group , partition_count ) : def java_string_hashcode ( s ) : h = 0 for c in s : h = ( 31 * h + ord ( c ) ) & 0xFFFFFFFF return ( ( h + 0x80000000 ) & 0xFFFFFFFF ) - 0x80000000 return abs ( java_string_hashcode ( group ) ) % partition_count
Given a group name return the partition number of the consumer offset topic containing the data associated to that group .
13,791
def topic_offsets_for_timestamp ( consumer , timestamp , topics ) : tp_timestamps = { } for topic in topics : topic_partitions = consumer_partitions_for_topic ( consumer , topic ) for tp in topic_partitions : tp_timestamps [ tp ] = timestamp return consumer . offsets_for_times ( tp_timestamps )
Given an initialized KafkaConsumer timestamp and list of topics looks up the offsets for the given topics by timestamp . The returned offset for each partition is the earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition .
13,792
def consumer_partitions_for_topic ( consumer , topic ) : topic_partitions = [ ] partitions = consumer . partitions_for_topic ( topic ) if partitions is not None : for partition in partitions : topic_partitions . append ( TopicPartition ( topic , partition ) ) else : logging . error ( "No partitions found for topic {}. ...
Returns a list of all TopicPartitions for a given topic .
13,793
def consumer_commit_for_times ( consumer , partition_to_offset , atomic = False ) : no_offsets = set ( ) for tp , offset in six . iteritems ( partition_to_offset ) : if offset is None : logging . error ( "No offsets found for topic-partition {tp}. Either timestamps not supported" " for the topic {tp}, or no offsets fou...
Commits offsets to Kafka using the given KafkaConsumer and offsets a mapping of TopicPartition to Unix Epoch milliseconds timestamps .
13,794
def get_cluster_config ( cluster_type , cluster_name = None , kafka_topology_base_path = None , ) : if not kafka_topology_base_path : config_dirs = get_conf_dirs ( ) else : config_dirs = [ kafka_topology_base_path ] topology = None for config_dir in config_dirs : try : topology = TopologyConfiguration ( cluster_type , ...
Return the cluster configuration . Use the local cluster if cluster_name is not specified .
13,795
def iter_configurations ( kafka_topology_base_path = None ) : if not kafka_topology_base_path : config_dirs = get_conf_dirs ( ) else : config_dirs = [ kafka_topology_base_path ] types = set ( ) for config_dir in config_dirs : new_types = [ x for x in map ( lambda x : os . path . basename ( x ) [ : - 5 ] , glob . glob (...
Cluster topology iterator . Iterate over all the topologies available in config .
13,796
def load_topology_config ( self ) : config_path = os . path . join ( self . kafka_topology_path , '{id}.yaml' . format ( id = self . cluster_type ) , ) self . log . debug ( "Loading configuration from %s" , config_path ) if os . path . isfile ( config_path ) : topology_config = load_yaml_config ( config_path ) else : r...
Load the topology configuration
13,797
def convert_to_broker_id ( string ) : error_msg = 'Positive integer or -1 required, {string} given.' . format ( string = string ) try : value = int ( string ) except ValueError : raise argparse . ArgumentTypeError ( error_msg ) if value <= 0 and value != - 1 : raise argparse . ArgumentTypeError ( error_msg ) return val...
Convert string to kafka broker_id .
13,798
def run ( ) : args = parse_args ( ) logging . basicConfig ( level = logging . WARN ) logging . getLogger ( 'kafka' ) . setLevel ( logging . CRITICAL ) if args . controller_only and args . first_broker_only : terminate ( status_code . WARNING , prepare_terminate_message ( "Only one of controller_only and first_broker_on...
Verify command - line arguments and run commands
13,799
def exception_logger ( exc_type , exc_value , exc_traceback ) : if not issubclass ( exc_type , KeyboardInterrupt ) : _log . critical ( "Uncaught exception:" , exc_info = ( exc_type , exc_value , exc_traceback ) ) sys . __excepthook__ ( exc_type , exc_value , exc_traceback )
Log unhandled exceptions