idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
13,800
def _find_topics_with_wrong_rp ( topics , zk , default_min_isr ) : topics_with_wrong_rf = [ ] for topic_name , partitions in topics . items ( ) : min_isr = get_min_isr ( zk , topic_name ) or default_min_isr replication_factor = len ( partitions [ 0 ] . replicas ) if replication_factor >= min_isr + 1 : continue topics_with_wrong_rf . append ( { 'replication_factor' : replication_factor , 'min_isr' : min_isr , 'topic' : topic_name , } ) return topics_with_wrong_rf
Returns topics with wrong replication factor .
13,801
def run_command ( self ) : topics = get_topic_partition_metadata ( self . cluster_config . broker_list ) topics_with_wrong_rf = _find_topics_with_wrong_rp ( topics , self . zk , self . args . default_min_isr , ) errcode = status_code . OK if not topics_with_wrong_rf else status_code . CRITICAL out = _prepare_output ( topics_with_wrong_rf , self . args . verbose ) return errcode , out
Replication factor command checks replication factor settings and compare it with min . isr in the cluster .
13,802
def decommission_brokers ( self , broker_ids ) : decommission_brokers = [ ] for broker_id in broker_ids : try : broker = self . cluster_topology . brokers [ broker_id ] broker . mark_decommissioned ( ) decommission_brokers . append ( broker ) except KeyError : raise InvalidBrokerIdError ( "No broker found with id {broker_id}" . format ( broker_id = broker_id ) ) partitions = defaultdict ( int ) for broker in decommission_brokers : broker_partitions = list ( broker . partitions ) for partition in broker_partitions : broker . remove_partition ( partition ) partitions [ partition . name ] += 1 active_brokers = self . cluster_topology . active_brokers self . state = _State ( self . cluster_topology , brokers = active_brokers ) for partition_name in sorted ( six . iterkeys ( partitions ) ) : partition = self . cluster_topology . partitions [ partition_name ] replica_count = partitions [ partition_name ] try : self . add_replica ( partition_name , replica_count ) except InvalidReplicationFactorError : raise BrokerDecommissionError ( "Not enough active brokers in the cluster. " "Partition {partition} has replication-factor {rf}, " "but only {brokers} active brokers remain." . format ( partition = partition_name , rf = partition . replication_factor + replica_count , brokers = len ( active_brokers ) ) )
Decommissioning brokers is done by removing all partitions from the decommissioned brokers and adding them one - by - one back to the cluster .
13,803
def add_replica ( self , partition_name , count = 1 ) : try : partition = self . cluster_topology . partitions [ partition_name ] except KeyError : raise InvalidPartitionError ( "Partition name {name} not found." . format ( name = partition_name ) , ) active_brokers = self . cluster_topology . active_brokers if partition . replication_factor + count > len ( active_brokers ) : raise InvalidReplicationFactorError ( "Cannot increase replication factor from {rf} to {new_rf}." " There are only {brokers} active brokers." . format ( rf = partition . replication_factor , new_rf = partition . replication_factor + count , brokers = len ( active_brokers ) , ) ) partition_index = self . state . partition_indices [ partition ] for _ in range ( count ) : non_full_rgs = [ rg for rg in six . itervalues ( self . cluster_topology . rgs ) if rg . count_replica ( partition ) < len ( rg . active_brokers ) ] replica_count = sum ( rg . count_replica ( partition ) for rg in non_full_rgs ) opt_replicas , _ = compute_optimum ( len ( non_full_rgs ) , replica_count , ) under_replicated_rgs = [ rg for rg in non_full_rgs if rg . count_replica ( partition ) < opt_replicas ] or non_full_rgs new_states = [ ] for rg in under_replicated_rgs : for broker in rg . active_brokers : if broker not in partition . replicas : broker_index = self . state . brokers . index ( broker ) new_state = self . state . add_replica ( partition_index , broker_index , ) new_state_leader = new_state . move_leadership ( partition_index , broker_index , ) new_states . extend ( [ new_state , new_state_leader ] ) self . state = sorted ( new_states , key = self . _score , reverse = True ) [ 0 ] self . cluster_topology . update_cluster_topology ( self . state . pending_assignment ) self . state . clear_pending_assignment ( )
Adding a replica is done by trying to add the replica to every broker in the cluster and choosing the resulting state with the highest fitness score .
13,804
def remove_replica ( self , partition_name , osr_broker_ids , count = 1 ) : try : partition = self . cluster_topology . partitions [ partition_name ] except KeyError : raise InvalidPartitionError ( "Partition name {name} not found." . format ( name = partition_name ) , ) if partition . replication_factor - count < 1 : raise InvalidReplicationFactorError ( "Cannot decrease replication factor from {rf} to {new_rf}." "Replication factor must be at least 1." . format ( rf = partition . replication_factor , new_rf = partition . replication_factor - count , ) ) osr = { broker for broker in partition . replicas if broker . id in osr_broker_ids } state = _State ( self . cluster_topology ) partition_index = state . partitions . index ( partition ) for _ in range ( count ) : non_empty_rgs = [ rg for rg in six . itervalues ( self . cluster_topology . rgs ) if rg . count_replica ( partition ) > 0 ] rgs_with_osr = [ rg for rg in non_empty_rgs if any ( b in osr for b in rg . brokers ) ] candidate_rgs = rgs_with_osr or non_empty_rgs replica_count = sum ( rg . count_replica ( partition ) for rg in candidate_rgs ) opt_replicas , _ = compute_optimum ( len ( candidate_rgs ) , replica_count , ) over_replicated_rgs = [ rg for rg in candidate_rgs if rg . count_replica ( partition ) > opt_replicas ] or candidate_rgs candidate_rgs = over_replicated_rgs or candidate_rgs new_states = [ ] for rg in candidate_rgs : osr_brokers = { broker for broker in rg . brokers if broker in osr } candidate_brokers = osr_brokers or rg . brokers for broker in candidate_brokers : if broker in partition . replicas : broker_index = state . brokers . index ( broker ) new_states . append ( state . remove_replica ( partition_index , broker_index ) ) state = sorted ( new_states , key = self . _score , reverse = True ) [ 0 ] self . cluster_topology . update_cluster_topology ( state . assignment ) osr = { b for b in osr if b in partition . replicas }
Removing a replica is done by trying to remove a replica from every broker and choosing the resulting state with the highest fitness score . Out - of - sync replicas will always be removed before in - sync replicas .
13,805
def _prune ( self , pop_candidates ) : return set ( sorted ( pop_candidates , key = self . _score , reverse = True ) [ : self . args . max_pop ] )
Choose a subset of the candidate states to continue on to the next generation .
13,806
def _score ( self , state , score_movement = True ) : score = 0 max_score = 0 if state . total_weight : score += self . args . partition_weight_cv_score_weight * ( 1 - state . broker_weight_cv / sqrt ( len ( state . brokers ) ) ) score += self . args . leader_weight_cv_score_weight * ( 1 - state . broker_leader_weight_cv / sqrt ( len ( state . brokers ) ) ) score += self . args . topic_broker_imbalance_score_weight * ( 1 - state . weighted_topic_broker_imbalance ) score += self . args . broker_partition_count_score_weight * ( 1 - state . broker_partition_count_cv / sqrt ( len ( state . brokers ) ) ) score += self . args . broker_leader_count_score_weight * ( 1 - state . broker_leader_count_cv / sqrt ( len ( state . brokers ) ) ) max_score += self . args . partition_weight_cv_score_weight max_score += self . args . leader_weight_cv_score_weight max_score += self . args . topic_broker_imbalance_score_weight max_score += self . args . broker_partition_count_score_weight max_score += self . args . broker_leader_count_score_weight if self . args . max_movement_size is not None and score_movement : max_movement = max ( self . args . max_movement_size , 1 ) score += self . args . movement_size_score_weight * ( 1 - state . movement_size / max_movement ) max_score += self . args . movement_size_score_weight if self . args . max_leader_changes is not None and score_movement : max_leader = max ( self . args . max_leader_changes , 1 ) score += self . args . leader_change_score_weight * ( 1 - state . leader_movement_count / max_leader ) max_score += self . args . leader_change_score_weight return score / max_score
Score a state based on how balanced it is . A higher score represents a more balanced state .
13,807
def move ( self , partition , source , dest ) : new_state = copy ( self ) source_index = self . replicas [ partition ] . index ( source ) new_state . replicas = tuple_alter ( self . replicas , ( partition , lambda replicas : tuple_replace ( replicas , ( source_index , dest ) , ) ) , ) new_state . pending_partitions = self . pending_partitions + ( partition , ) partition_weight = self . partition_weights [ partition ] new_state . broker_weights = tuple_alter ( self . broker_weights , ( source , lambda broker_weight : broker_weight - partition_weight ) , ( dest , lambda broker_weight : broker_weight + partition_weight ) , ) new_state . broker_partition_counts = tuple_alter ( self . broker_partition_counts , ( source , lambda partition_count : partition_count - 1 ) , ( dest , lambda partition_count : partition_count + 1 ) , ) if source_index == 0 : new_state . broker_leader_weights = tuple_alter ( self . broker_leader_weights , ( source , lambda lw : lw - partition_weight ) , ( dest , lambda lw : lw + partition_weight ) , ) new_state . broker_leader_counts = tuple_alter ( self . broker_leader_counts , ( source , lambda leader_count : leader_count - 1 ) , ( dest , lambda leader_count : leader_count + 1 ) , ) new_state . leader_movement_count += 1 topic = self . partition_topic [ partition ] new_state . topic_broker_count = tuple_alter ( self . topic_broker_count , ( topic , lambda broker_count : tuple_alter ( broker_count , ( source , lambda count : count - 1 ) , ( dest , lambda count : count + 1 ) , ) ) , ) new_state . topic_broker_imbalance = tuple_replace ( self . topic_broker_imbalance , ( topic , new_state . _calculate_topic_imbalance ( topic ) ) , ) new_state . _weighted_topic_broker_imbalance = ( self . _weighted_topic_broker_imbalance + self . topic_weights [ topic ] * ( new_state . topic_broker_imbalance [ topic ] - self . topic_broker_imbalance [ topic ] ) ) source_rg = self . broker_rg [ source ] dest_rg = self . broker_rg [ dest ] if source_rg != dest_rg : new_state . rg_replicas = tuple_alter ( self . rg_replicas , ( source_rg , lambda replica_counts : tuple_alter ( replica_counts , ( partition , lambda replica_count : replica_count - 1 ) , ) ) , ( dest_rg , lambda replica_counts : tuple_alter ( replica_counts , ( partition , lambda replica_count : replica_count + 1 ) , ) ) , ) new_state . movement_size += self . partition_sizes [ partition ] new_state . movement_count += 1 return new_state
Return a new state that is the result of moving a single partition .
13,808
def move_leadership ( self , partition , new_leader ) : new_state = copy ( self ) source = new_state . replicas [ partition ] [ 0 ] new_leader_index = self . replicas [ partition ] . index ( new_leader ) new_state . replicas = tuple_alter ( self . replicas , ( partition , lambda replicas : tuple_replace ( replicas , ( 0 , replicas [ new_leader_index ] ) , ( new_leader_index , replicas [ 0 ] ) , ) ) , ) new_state . pending_partitions = self . pending_partitions + ( partition , ) new_state . broker_leader_counts = tuple_alter ( self . broker_leader_counts , ( source , lambda leader_count : leader_count - 1 ) , ( new_leader , lambda leader_count : leader_count + 1 ) , ) partition_weight = self . partition_weights [ partition ] new_state . broker_leader_weights = tuple_alter ( self . broker_leader_weights , ( source , lambda leader_weight : leader_weight - partition_weight ) , ( new_leader , lambda leader_weight : leader_weight + partition_weight ) , ) new_state . leader_movement_count += 1 return new_state
Return a new state that is the result of changing the leadership of a single partition .
13,809
def assignment ( self ) : return { partition . name : [ self . brokers [ bid ] . id for bid in self . replicas [ pid ] ] for pid , partition in enumerate ( self . partitions ) }
Return the partition assignment that this state represents .
13,810
def pending_assignment ( self ) : return { self . partitions [ pid ] . name : [ self . brokers [ bid ] . id for bid in self . replicas [ pid ] ] for pid in set ( self . pending_partitions ) }
Return the pending partition assignment that this state represents .
13,811
def run_command ( self ) : fetch_unavailable_brokers = True result = get_topic_partition_with_error ( self . cluster_config , REPLICA_NOT_AVAILABLE_ERROR , fetch_unavailable_brokers = fetch_unavailable_brokers , ) if fetch_unavailable_brokers : replica_unavailability , unavailable_brokers = result else : replica_unavailability = result errcode = status_code . OK if not replica_unavailability else status_code . CRITICAL out = _prepare_output ( replica_unavailability , unavailable_brokers , self . args . verbose ) return errcode , out
replica_unavailability command checks number of replicas not available for communication over all brokers in the Kafka cluster .
13,812
def get_min_isr ( zk , topic ) : ISR_CONF_NAME = 'min.insync.replicas' try : config = zk . get_topic_config ( topic ) except NoNodeError : return None if ISR_CONF_NAME in config [ 'config' ] : return int ( config [ 'config' ] [ ISR_CONF_NAME ] ) else : return None
Return the min - isr for topic or None if not specified
13,813
def _process_metadata_response ( topics , zk , default_min_isr ) : not_in_sync_partitions = [ ] for topic_name , partitions in topics . items ( ) : min_isr = get_min_isr ( zk , topic_name ) or default_min_isr if min_isr is None : continue for metadata in partitions . values ( ) : cur_isr = len ( metadata . isr ) if cur_isr < min_isr : not_in_sync_partitions . append ( { 'isr' : cur_isr , 'min_isr' : min_isr , 'topic' : metadata . topic , 'partition' : metadata . partition , } ) return not_in_sync_partitions
Returns not in sync partitions .
13,814
def remove_partition ( self , partition ) : if partition in self . _partitions : self . _partitions . remove ( partition ) partition . replicas . remove ( self ) else : raise ValueError ( 'Partition: {topic_id}:{partition_id} not found in broker ' '{broker_id}' . format ( topic_id = partition . topic . id , partition_id = partition . partition_id , broker_id = self . _id , ) )
Remove partition from partition list .
13,815
def add_partition ( self , partition ) : assert ( partition not in self . _partitions ) self . _partitions . add ( partition ) partition . add_replica ( self )
Add partition to partition list .
13,816
def move_partition ( self , partition , broker_destination ) : self . remove_partition ( partition ) broker_destination . add_partition ( partition )
Move partition to destination broker and adjust replicas .
13,817
def count_partitions ( self , topic ) : return sum ( 1 for p in topic . partitions if p in self . partitions )
Return count of partitions for given topic .
13,818
def request_leadership ( self , opt_count , skip_brokers , skip_partitions ) : owned_partitions = list ( filter ( lambda p : self is not p . leader and len ( p . replicas ) > 1 , self . partitions , ) ) for partition in owned_partitions : if partition . leader in skip_brokers or partition in skip_partitions : continue prev_leader = partition . swap_leader ( self ) skip_partitions . append ( partition ) if prev_leader . count_preferred_replica ( ) >= opt_count or prev_leader . revoked_leadership : if self . count_preferred_replica ( ) >= opt_count : return else : continue else : skip_brokers . append ( prev_leader ) prev_leader . request_leadership ( opt_count , skip_brokers , skip_partitions ) if prev_leader . count_preferred_replica ( ) < opt_count : skip_partitions . remove ( partition ) partition . swap_leader ( prev_leader ) continue else : skip_partitions . append ( partition ) skip_brokers . remove ( prev_leader ) if self . count_preferred_replica ( ) >= opt_count : return else : continue
Under - balanced broker requests leadership from current leader on the pretext that it recursively can maintain its leadership count as optimal .
13,819
def donate_leadership ( self , opt_count , skip_brokers , used_edges ) : owned_partitions = list ( filter ( lambda p : self is p . leader and len ( p . replicas ) > 1 , self . partitions , ) ) for partition in owned_partitions : potential_new_leaders = list ( filter ( lambda f : f not in skip_brokers , partition . followers , ) ) for follower in potential_new_leaders : if ( partition , self , follower ) in used_edges : continue partition . swap_leader ( follower ) used_edges . append ( ( partition , follower , self ) ) if follower . count_preferred_replica ( ) <= opt_count + 1 : if ( self . count_preferred_replica ( ) <= opt_count + 1 and not self . revoked_leadership ) or ( self . count_preferred_replica ( ) == 0 and self . revoked_leadership ) : return else : break else : skip_brokers . append ( follower ) follower . donate_leadership ( opt_count , skip_brokers , used_edges ) if follower . count_preferred_replica ( ) > opt_count + 1 : used_edges . append ( ( partition , follower , self ) ) partition . swap_leader ( self ) continue else : used_edges . append ( ( partition , follower , self ) ) skip_brokers . remove ( follower ) if ( self . count_preferred_replica ( ) <= opt_count + 1 and not self . revoked_leadership ) or ( self . count_preferred_replica ( ) == 0 and self . revoked_leadership ) : return else : break
Over - loaded brokers tries to donate their leadership to one of their followers recursively until they become balanced .
13,820
def ssh_client ( host ) : ssh = paramiko . SSHClient ( ) ssh . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) ssh . connect ( host ) return ssh
Start an ssh client .
13,821
def find_files_cmd ( data_path , minutes , start_time , end_time ) : if minutes : return FIND_MINUTES_COMMAND . format ( data_path = data_path , minutes = minutes , ) if start_time : if end_time : return FIND_RANGE_COMMAND . format ( data_path = data_path , start_time = start_time , end_time = end_time , ) else : return FIND_START_COMMAND . format ( data_path = data_path , start_time = start_time , )
Find the log files depending on their modification time .
13,822
def check_corrupted_files_cmd ( java_home , files ) : files_str = "," . join ( files ) check_command = CHECK_COMMAND . format ( ionice = IONICE , java_home = java_home , files = files_str , ) command = "{check_command} | {reduce_output}" . format ( check_command = check_command , reduce_output = REDUCE_OUTPUT , ) return command
Check the file corruption of the specified files .
13,823
def get_output_lines_from_command ( host , command ) : with closing ( ssh_client ( host ) ) as ssh : _ , stdout , stderr = ssh . exec_command ( command ) lines = stdout . read ( ) . splitlines ( ) report_stderr ( host , stderr ) return lines
Execute a command on the specified host returning a list of output lines .
13,824
def find_files ( data_path , brokers , minutes , start_time , end_time ) : command = find_files_cmd ( data_path , minutes , start_time , end_time ) pool = Pool ( len ( brokers ) ) result = pool . map ( partial ( get_output_lines_from_command , command = command ) , [ host for broker , host in brokers ] ) return [ ( broker , host , files ) for ( broker , host ) , files in zip ( brokers , result ) ]
Find all the Kafka log files on the broker that have been modified in the speficied time range .
13,825
def parse_output ( host , output ) : current_file = None for line in output . readlines ( ) : file_name_search = FILE_PATH_REGEX . search ( line ) if file_name_search : current_file = file_name_search . group ( 1 ) continue if INVALID_MESSAGE_REGEX . match ( line ) or INVALID_BYTES_REGEX . match ( line ) : print_line ( host , current_file , line , "ERROR" ) elif VALID_MESSAGE_REGEX . match ( line ) or line . startswith ( 'Starting offset:' ) : continue else : print_line ( host , current_file , line , "UNEXPECTED OUTPUT" )
Parse the output of the dump tool and print warnings or error messages accordingly .
13,826
def print_line ( host , path , line , line_type ) : print ( "{ltype} Host: {host}, File: {path}" . format ( ltype = line_type , host = host , path = path , ) ) print ( "{ltype} Output: {line}" . format ( ltype = line_type , line = line ) )
Print a dump tool line to stdout .
13,827
def check_files_on_host ( java_home , host , files , batch_size ) : with closing ( ssh_client ( host ) ) as ssh : for i , batch in enumerate ( chunks ( files , batch_size ) ) : command = check_corrupted_files_cmd ( java_home , batch ) _ , stdout , stderr = ssh . exec_command ( command ) report_stderr ( host , stderr ) print ( " {host}: file {n_file} of {total}" . format ( host = host , n_file = ( i * DEFAULT_BATCH_SIZE ) , total = len ( files ) , ) ) parse_output ( host , stdout )
Check the files on the host . Files are grouped together in groups of batch_size files . The dump class will be executed on each batch sequentially .
13,828
def get_partition_leaders ( cluster_config ) : client = KafkaClient ( cluster_config . broker_list ) result = { } for topic , topic_data in six . iteritems ( client . topic_partitions ) : for partition , p_data in six . iteritems ( topic_data ) : topic_partition = topic + "-" + str ( partition ) result [ topic_partition ] = p_data . leader return result
Return the current leaders of all partitions . Partitions are returned as a topic - partition string .
13,829
def get_tp_from_file ( file_path ) : match = TP_FROM_FILE_REGEX . match ( file_path ) if not match : print ( "File path is not valid: " + file_path ) sys . exit ( 1 ) return match . group ( 1 )
Return the name of the topic - partition given the path to the file .
13,830
def filter_leader_files ( cluster_config , broker_files ) : print ( "Filtering leaders" ) leader_of = get_partition_leaders ( cluster_config ) result = [ ] for broker , host , files in broker_files : filtered = [ ] for file_path in files : tp = get_tp_from_file ( file_path ) if tp not in leader_of or leader_of [ tp ] == broker : filtered . append ( file_path ) result . append ( ( broker , host , filtered ) ) print ( "Broker: {broker}, leader of {l_count} over {f_count} files" . format ( broker = broker , l_count = len ( filtered ) , f_count = len ( files ) , ) ) return result
Given a list of broker files filters out all the files that are in the replicas .
13,831
def check_cluster ( cluster_config , data_path , java_home , check_replicas , batch_size , minutes , start_time , end_time , ) : brokers = get_broker_list ( cluster_config ) broker_files = find_files ( data_path , brokers , minutes , start_time , end_time ) if not check_replicas : broker_files = filter_leader_files ( cluster_config , broker_files ) processes = [ ] print ( "Starting {n} parallel processes" . format ( n = len ( broker_files ) ) ) try : for broker , host , files in broker_files : print ( " Broker: {host}, {n} files to check" . format ( host = host , n = len ( files ) ) , ) p = Process ( name = "dump_process_" + host , target = check_files_on_host , args = ( java_home , host , files , batch_size ) , ) p . start ( ) processes . append ( p ) print ( "Processes running:" ) for process in processes : process . join ( ) except KeyboardInterrupt : print ( "Terminating all processes" ) for process in processes : process . terminate ( ) process . join ( ) print ( "All processes terminated" ) sys . exit ( 1 )
Check the integrity of the Kafka log files in a cluster .
13,832
def validate_args ( args ) : if not args . minutes and not args . start_time : print ( "Error: missing --minutes or --start-time" ) return False if args . minutes and args . start_time : print ( "Error: --minutes shouldn't be specified if --start-time is used" ) return False if args . end_time and not args . start_time : print ( "Error: --end-time can't be used without --start-time" ) return False if args . minutes and args . minutes <= 0 : print ( "Error: --minutes must be > 0" ) return False if args . start_time and not TIME_FORMAT_REGEX . match ( args . start_time ) : print ( "Error: --start-time format is not valid" ) print ( "Example format: '2015-11-26 11:00:00'" ) return False if args . end_time and not TIME_FORMAT_REGEX . match ( args . end_time ) : print ( "Error: --end-time format is not valid" ) print ( "Example format: '2015-11-26 11:00:00'" ) return False if args . batch_size <= 0 : print ( "Error: --batch-size must be > 0" ) return False return True
Basic option validation . Returns False if the options are not valid True otherwise .
13,833
def separate_groups ( groups , key , total ) : optimum , extra = compute_optimum ( len ( groups ) , total ) over_loaded , under_loaded , optimal = _smart_separate_groups ( groups , key , total ) if not extra : return over_loaded , under_loaded potential_under_loaded = [ group for group in optimal if key ( group ) == optimum ] potential_over_loaded = [ group for group in optimal if key ( group ) > optimum ] revised_under_loaded = under_loaded + potential_under_loaded revised_over_loaded = over_loaded + potential_over_loaded return ( sorted ( revised_over_loaded , key = key , reverse = True ) , sorted ( revised_under_loaded , key = key ) , )
Separate the group into overloaded and under - loaded groups .
13,834
def active_brokers ( self ) : return { broker for broker in self . _brokers if not broker . inactive and not broker . decommissioned }
Return set of brokers that are not inactive or decommissioned .
13,835
def add_broker ( self , broker ) : if broker not in self . _brokers : self . _brokers . add ( broker ) else : self . log . warning ( 'Broker {broker_id} already present in ' 'replication-group {rg_id}' . format ( broker_id = broker . id , rg_id = self . _id , ) )
Add broker to current broker - list .
13,836
def count_replica ( self , partition ) : return sum ( 1 for b in partition . replicas if b in self . brokers )
Return count of replicas of given partition .
13,837
def acquire_partition ( self , partition , source_broker ) : broker_dest = self . _elect_dest_broker ( partition ) if not broker_dest : raise NotEligibleGroupError ( "No eligible brokers to accept partition {p}" . format ( p = partition ) , ) source_broker . move_partition ( partition , broker_dest )
Move a partition from a broker to any of the eligible brokers of the replication group .
13,838
def _select_broker_pair ( self , rg_destination , victim_partition ) : broker_source = self . _elect_source_broker ( victim_partition ) broker_destination = rg_destination . _elect_dest_broker ( victim_partition ) return broker_source , broker_destination
Select best - fit source and destination brokers based on partition count and presence of partition over the broker .
13,839
def _elect_source_broker ( self , victim_partition , broker_subset = None ) : broker_subset = broker_subset or self . _brokers over_loaded_brokers = sorted ( [ broker for broker in broker_subset if victim_partition in broker . partitions and not broker . inactive ] , key = lambda b : len ( b . partitions ) , reverse = True , ) if not over_loaded_brokers : return None broker_topic_partition_cnt = [ ( broker , broker . count_partitions ( victim_partition . topic ) ) for broker in over_loaded_brokers ] max_count_pair = max ( broker_topic_partition_cnt , key = lambda ele : ele [ 1 ] , ) return max_count_pair [ 0 ]
Select first over loaded broker having victim_partition .
13,840
def _elect_dest_broker ( self , victim_partition ) : under_loaded_brokers = sorted ( [ broker for broker in self . _brokers if ( victim_partition not in broker . partitions and not broker . inactive and not broker . decommissioned ) ] , key = lambda b : len ( b . partitions ) ) if not under_loaded_brokers : return None broker_topic_partition_cnt = [ ( broker , broker . count_partitions ( victim_partition . topic ) ) for broker in under_loaded_brokers if victim_partition not in broker . partitions ] min_count_pair = min ( broker_topic_partition_cnt , key = lambda ele : ele [ 1 ] , ) return min_count_pair [ 0 ]
Select first under loaded brokers preferring not having partition of same topic as victim partition .
13,841
def rebalance_brokers ( self ) : total_partitions = sum ( len ( b . partitions ) for b in self . brokers ) blacklist = set ( b for b in self . brokers if b . decommissioned ) active_brokers = self . get_active_brokers ( ) - blacklist if not active_brokers : raise EmptyReplicationGroupError ( "No active brokers in %s" , self . _id ) over_loaded_brokers , under_loaded_brokers = separate_groups ( active_brokers , lambda b : len ( b . partitions ) , total_partitions , ) over_loaded_brokers += [ b for b in blacklist if not b . empty ( ) ] if not over_loaded_brokers and not under_loaded_brokers : self . log . info ( 'Brokers of replication-group: %s already balanced for ' 'partition-count.' , self . _id , ) return sibling_distance = self . generate_sibling_distance ( ) while under_loaded_brokers and over_loaded_brokers : broker_source , broker_destination , victim_partition = self . _get_target_brokers ( over_loaded_brokers , under_loaded_brokers , sibling_distance , ) if broker_source and broker_destination : self . log . debug ( 'Moving partition {p_name} from broker {broker_source} to ' 'broker {broker_destination}' . format ( p_name = victim_partition . name , broker_source = broker_source . id , broker_destination = broker_destination . id , ) , ) broker_source . move_partition ( victim_partition , broker_destination ) sibling_distance = self . update_sibling_distance ( sibling_distance , broker_destination , victim_partition . topic , ) else : break over_loaded_brokers , under_loaded_brokers = separate_groups ( active_brokers , lambda b : len ( b . partitions ) , total_partitions , ) over_loaded_brokers += [ b for b in blacklist if not b . empty ( ) ]
Rebalance partition - count across brokers .
13,842
def _get_target_brokers ( self , over_loaded_brokers , under_loaded_brokers , sibling_distance ) : over_loaded_brokers = sorted ( over_loaded_brokers , key = lambda b : len ( b . partitions ) , reverse = True , ) under_loaded_brokers = sorted ( under_loaded_brokers , key = lambda b : len ( b . partitions ) , ) target = ( None , None , None ) min_distance = sys . maxsize best_partition = None for source in over_loaded_brokers : for dest in under_loaded_brokers : if ( len ( source . partitions ) - len ( dest . partitions ) > 1 or source . decommissioned ) : best_partition = source . get_preferred_partition ( dest , sibling_distance [ dest ] [ source ] , ) if best_partition is None : continue distance = sibling_distance [ dest ] [ source ] [ best_partition . topic ] if distance < min_distance : min_distance = distance target = ( source , dest , best_partition ) else : break return target
Pick best - suitable source - broker destination - broker and partition to balance partition - count over brokers in given replication - group .
13,843
def generate_sibling_distance ( self ) : sibling_distance = defaultdict ( lambda : defaultdict ( dict ) ) topics = { p . topic for p in self . partitions } for source in self . brokers : for dest in self . brokers : if source != dest : for topic in topics : sibling_distance [ dest ] [ source ] [ topic ] = dest . count_partitions ( topic ) - source . count_partitions ( topic ) return sibling_distance
Generate a dict containing the distance computed as difference in in number of partitions of each topic from under_loaded_brokers to over_loaded_brokers .
13,844
def update_sibling_distance ( self , sibling_distance , dest , topic ) : for source in six . iterkeys ( sibling_distance [ dest ] ) : sibling_distance [ dest ] [ source ] [ topic ] = dest . count_partitions ( topic ) - source . count_partitions ( topic ) return sibling_distance
Update the sibling distance for topic and destination broker .
13,845
def move_partition_replica ( self , under_loaded_rg , eligible_partition ) : source_broker , dest_broker = self . _get_eligible_broker_pair ( under_loaded_rg , eligible_partition , ) if source_broker and dest_broker : self . log . debug ( 'Moving partition {p_name} from broker {source_broker} to ' 'replication-group:broker {rg_dest}:{dest_broker}' . format ( p_name = eligible_partition . name , source_broker = source_broker . id , dest_broker = dest_broker . id , rg_dest = under_loaded_rg . id , ) , ) source_broker . move_partition ( eligible_partition , dest_broker )
Move partition to under - loaded replication - group if possible .
13,846
def _get_eligible_broker_pair ( self , under_loaded_rg , eligible_partition ) : under_brokers = list ( filter ( lambda b : eligible_partition not in b . partitions , under_loaded_rg . brokers , ) ) over_brokers = list ( filter ( lambda b : eligible_partition in b . partitions , self . brokers , ) ) source_broker , dest_broker = None , None if over_brokers : source_broker = max ( over_brokers , key = lambda broker : len ( broker . partitions ) , ) if under_brokers : dest_broker = min ( under_brokers , key = lambda broker : len ( broker . partitions ) , ) return ( source_broker , dest_broker )
Evaluate and return source and destination broker - pair from over - loaded and under - loaded replication - group if possible return None otherwise .
13,847
def merge_result ( res ) : if not isinstance ( res , dict ) : raise ValueError ( 'Value should be of dict type' ) result = set ( [ ] ) for _ , v in res . items ( ) : for value in v : result . add ( value ) return list ( result )
Merge all items in res into a list .
13,848
def first_key ( res ) : if not isinstance ( res , dict ) : raise ValueError ( 'Value should be of dict type' ) if len ( res . keys ( ) ) != 1 : raise RedisClusterException ( "More then 1 result from command" ) return list ( res . values ( ) ) [ 0 ]
Returns the first result for the given command .
13,849
def clusterdown_wrapper ( func ) : @ wraps ( func ) async def inner ( * args , ** kwargs ) : for _ in range ( 0 , 3 ) : try : return await func ( * args , ** kwargs ) except ClusterDownError : pass raise ClusterDownError ( "CLUSTERDOWN error. Unable to rebuild the cluster" ) return inner
Wrapper for CLUSTERDOWN error handling .
13,850
def parse_debug_object ( response ) : "Parse the results of Redis's DEBUG OBJECT command into a Python dict" response = nativestr ( response ) response = 'type:' + response response = dict ( [ kv . split ( ':' ) for kv in response . split ( ) ] ) int_fields = ( 'refcount' , 'serializedlength' , 'lru' , 'lru_seconds_idle' ) for field in int_fields : if field in response : response [ field ] = int ( response [ field ] ) return response
Parse the results of Redis s DEBUG OBJECT command into a Python dict
13,851
def parse_info ( response ) : "Parse the result of Redis's INFO command into a Python dict" info = { } response = nativestr ( response ) def get_value ( value ) : if ',' not in value or '=' not in value : try : if '.' in value : return float ( value ) else : return int ( value ) except ValueError : return value else : sub_dict = { } for item in value . split ( ',' ) : k , v = item . rsplit ( '=' , 1 ) sub_dict [ k ] = get_value ( v ) return sub_dict for line in response . splitlines ( ) : if line and not line . startswith ( '#' ) : if line . find ( ':' ) != - 1 : key , value = line . split ( ':' , 1 ) info [ key ] = get_value ( value ) else : info . setdefault ( '__raw__' , [ ] ) . append ( line ) return info
Parse the result of Redis s INFO command into a Python dict
13,852
async def slowlog_get ( self , num = None ) : args = [ 'SLOWLOG GET' ] if num is not None : args . append ( num ) return await self . execute_command ( * args )
Get the entries from the slowlog . If num is specified get the most recent num items .
13,853
def cache ( self , name , cache_class = Cache , identity_generator_class = IdentityGenerator , compressor_class = Compressor , serializer_class = Serializer , * args , ** kwargs ) : return cache_class ( self , app = name , identity_generator_class = identity_generator_class , compressor_class = compressor_class , serializer_class = serializer_class , * args , ** kwargs )
Return a cache object using default identity generator serializer and compressor .
13,854
async def hincrby ( self , name , key , amount = 1 ) : "Increment the value of ``key`` in hash ``name`` by ``amount``" return await self . execute_command ( 'HINCRBY' , name , key , amount )
Increment the value of key in hash name by amount
13,855
async def hincrbyfloat ( self , name , key , amount = 1.0 ) : return await self . execute_command ( 'HINCRBYFLOAT' , name , key , amount )
Increment the value of key in hash name by floating amount
13,856
async def hset ( self , name , key , value ) : return await self . execute_command ( 'HSET' , name , key , value )
Set key to value within hash name Returns 1 if HSET created a new field otherwise 0
13,857
async def hsetnx ( self , name , key , value ) : return await self . execute_command ( 'HSETNX' , name , key , value )
Set key to value within hash name if key does not exist . Returns 1 if HSETNX created a field otherwise 0 .
13,858
async def hmset ( self , name , mapping ) : if not mapping : raise DataError ( "'hmset' with 'mapping' of length 0" ) items = [ ] for pair in iteritems ( mapping ) : items . extend ( pair ) return await self . execute_command ( 'HMSET' , name , * items )
Set key to value within hash name for each corresponding key and value from the mapping dict .
13,859
async def transaction ( self , func , * watches , ** kwargs ) : shard_hint = kwargs . pop ( 'shard_hint' , None ) value_from_callable = kwargs . pop ( 'value_from_callable' , False ) watch_delay = kwargs . pop ( 'watch_delay' , None ) async with await self . pipeline ( True , shard_hint ) as pipe : while True : try : if watches : await pipe . watch ( * watches ) func_value = await func ( pipe ) exec_value = await pipe . execute ( ) return func_value if value_from_callable else exec_value except WatchError : if watch_delay is not None and watch_delay > 0 : await asyncio . sleep ( watch_delay , loop = self . connection_pool . loop ) continue
Convenience method for executing the callable func as a transaction while watching all keys specified in watches . The func callable should expect a single argument which is a Pipeline object .
13,860
async def initialize ( self ) : nodes_cache = { } tmp_slots = { } all_slots_covered = False disagreements = [ ] startup_nodes_reachable = False nodes = self . orig_startup_nodes if self . nodemanager_follow_cluster : nodes = self . startup_nodes for node in nodes : try : r = self . get_redis_link ( host = node [ 'host' ] , port = node [ 'port' ] ) cluster_slots = await r . cluster_slots ( ) startup_nodes_reachable = True except ConnectionError : continue except Exception : raise RedisClusterException ( 'ERROR sending "cluster slots" command to redis server: {0}' . format ( node ) ) all_slots_covered = True if len ( cluster_slots ) == 1 and len ( self . startup_nodes ) == 1 : single_node_slots = cluster_slots . get ( ( 0 , self . RedisClusterHashSlots - 1 ) ) [ 0 ] if len ( single_node_slots [ 'host' ] ) == 0 : single_node_slots [ 'host' ] = self . startup_nodes [ 0 ] [ 'host' ] single_node_slots [ 'server_type' ] = 'master' for min_slot , max_slot in cluster_slots : nodes = cluster_slots . get ( ( min_slot , max_slot ) ) master_node , slave_nodes = nodes [ 0 ] , nodes [ 1 : ] if master_node [ 'host' ] == '' : master_node [ 'host' ] = node [ 'host' ] self . set_node_name ( master_node ) nodes_cache [ master_node [ 'name' ] ] = master_node for i in range ( min_slot , max_slot + 1 ) : if i not in tmp_slots : tmp_slots [ i ] = [ master_node ] for slave_node in slave_nodes : self . set_node_name ( slave_node ) nodes_cache [ slave_node [ 'name' ] ] = slave_node tmp_slots [ i ] . append ( slave_node ) else : if tmp_slots [ i ] [ 0 ] [ 'name' ] != node [ 'name' ] : disagreements . append ( '{0} vs {1} on slot: {2}' . format ( tmp_slots [ i ] [ 0 ] [ 'name' ] , node [ 'name' ] , i ) , ) if len ( disagreements ) > 5 : raise RedisClusterException ( 'startup_nodes could not agree on a valid slots cache. {0}' . format ( ', ' . join ( disagreements ) ) ) self . populate_startup_nodes ( ) self . refresh_table_asap = False if self . _skip_full_coverage_check : need_full_slots_coverage = False else : need_full_slots_coverage = await self . cluster_require_full_coverage ( nodes_cache ) for i in range ( 0 , self . RedisClusterHashSlots ) : if i not in tmp_slots and need_full_slots_coverage : all_slots_covered = False if all_slots_covered : break if not startup_nodes_reachable : raise RedisClusterException ( 'Redis Cluster cannot be connected. ' 'Please provide at least one reachable node.' ) if not all_slots_covered : raise RedisClusterException ( 'Not all slots are covered after query all startup_nodes. ' '{0} of {1} covered...' . format ( len ( tmp_slots ) , self . RedisClusterHashSlots ) ) self . slots = tmp_slots self . nodes = nodes_cache self . reinitialize_counter = 0
Init the slots cache by asking all startup nodes what the current cluster configuration is
13,861
async def cluster_require_full_coverage ( self , nodes_cache ) : nodes = nodes_cache or self . nodes async def node_require_full_coverage ( node ) : r_node = self . get_redis_link ( host = node [ 'host' ] , port = node [ 'port' ] ) node_config = await r_node . config_get ( 'cluster-require-full-coverage' ) return 'yes' in node_config . values ( ) for node in nodes . values ( ) : if await node_require_full_coverage ( node ) : return True return False
if exists cluster - require - full - coverage no config on redis servers then even all slots are not covered cluster still will be able to respond
13,862
def set_node ( self , host , port , server_type = None ) : node_name = "{0}:{1}" . format ( host , port ) node = { 'host' : host , 'port' : port , 'name' : node_name , 'server_type' : server_type } self . nodes [ node_name ] = node return node
Update data for a node .
13,863
def populate_startup_nodes ( self ) : for item in self . startup_nodes : self . set_node_name ( item ) for n in self . nodes . values ( ) : if n not in self . startup_nodes : self . startup_nodes . append ( n ) uniq = { frozenset ( node . items ( ) ) for node in self . startup_nodes } self . startup_nodes = [ dict ( node ) for node in uniq ]
Do something with all startup nodes and filters out any duplicates
13,864
def reset ( self ) : self . pid = os . getpid ( ) self . _created_connections = 0 self . _created_connections_per_node = { } self . _available_connections = { } self . _in_use_connections = { } self . _check_lock = threading . Lock ( ) self . initialized = False
Resets the connection pool back to a clean state .
13,865
def disconnect ( self ) : all_conns = chain ( self . _available_connections . values ( ) , self . _in_use_connections . values ( ) , ) for node_connections in all_conns : for connection in node_connections : connection . disconnect ( )
Nothing that requires any overwrite .
13,866
def get_random_connection ( self ) : if self . _available_connections : node_name = random . choice ( list ( self . _available_connections . keys ( ) ) ) conn_list = self . _available_connections [ node_name ] if conn_list : return conn_list . pop ( ) for node in self . nodes . random_startup_node_iter ( ) : connection = self . get_connection_by_node ( node ) if connection : return connection raise Exception ( "Cant reach a single startup node." )
Open new connection to random redis server .
13,867
def get_connection_by_slot ( self , slot ) : self . _checkpid ( ) try : return self . get_connection_by_node ( self . get_node_by_slot ( slot ) ) except KeyError : return self . get_random_connection ( )
Determine what server a specific slot belongs to and return a redis object that is connected
13,868
def get_connection_by_node ( self , node ) : self . _checkpid ( ) self . nodes . set_node_name ( node ) try : connection = self . _available_connections . get ( node [ "name" ] , [ ] ) . pop ( ) except IndexError : connection = self . make_connection ( node ) self . _in_use_connections . setdefault ( node [ "name" ] , set ( ) ) . add ( connection ) return connection
get a connection by node
13,869
def encode ( self , value ) : if self . decode_responses and isinstance ( value , bytes ) : value = value . decode ( self . encoding ) elif not self . decode_responses and isinstance ( value , str ) : value = value . encode ( self . encoding ) return value
Encode the value so that it s identical to what we ll read off the connection
13,870
async def punsubscribe ( self , * args ) : if args : args = list_or_args ( args [ 0 ] , args [ 1 : ] ) return await self . execute_command ( 'PUNSUBSCRIBE' , * args )
Unsubscribe from the supplied patterns . If empy unsubscribe from all patterns .
13,871
async def listen ( self ) : "Listen for messages on channels this client has been subscribed to" if self . subscribed : return self . handle_message ( await self . parse_response ( block = True ) )
Listen for messages on channels this client has been subscribed to
13,872
async def get_message ( self , ignore_subscribe_messages = False , timeout = 0 ) : response = await self . parse_response ( block = False , timeout = timeout ) if response : return self . handle_message ( response , ignore_subscribe_messages ) return None
Get the next message if one is available otherwise None .
13,873
def _gen_identity ( self , key , param = None ) : if self . identity_generator and param is not None : if self . serializer : param = self . serializer . serialize ( param ) if self . compressor : param = self . compressor . compress ( param ) identity = self . identity_generator . generate ( key , param ) else : identity = key return identity
generate identity according to key and param given
13,874
def _pack ( self , content ) : if self . serializer : content = self . serializer . serialize ( content ) if self . compressor : content = self . compressor . compress ( content ) return content
pack the content using serializer and compressor
13,875
def _unpack ( self , content ) : if self . compressor : try : content = self . compressor . decompress ( content ) except CompressError : pass if self . serializer : content = self . serializer . deserialize ( content ) return content
unpack cache using serializer and compressor
13,876
async def delete ( self , key , param = None ) : identity = self . _gen_identity ( key , param ) return await self . client . delete ( identity )
delete cache corresponding to identity generated from key and param
13,877
async def delete_pattern ( self , pattern , count = None ) : cursor = '0' count_deleted = 0 while cursor != 0 : cursor , identities = await self . client . scan ( cursor = cursor , match = pattern , count = count ) count_deleted += await self . client . delete ( * identities ) return count_deleted
delete cache according to pattern in redis delete count keys each time
13,878
async def exist ( self , key , param = None ) : identity = self . _gen_identity ( key , param ) return await self . client . exists ( identity )
see if specific identity exists
13,879
async def ttl ( self , key , param = None ) : identity = self . _gen_identity ( key , param ) return await self . client . ttl ( identity )
get time to live of a specific identity
13,880
async def set ( self , key , value , param = None , expire_time = None , herd_timeout = None ) : identity = self . _gen_identity ( key , param ) expected_expired_ts = int ( time . time ( ) ) if expire_time : expected_expired_ts += expire_time expected_expired_ts += herd_timeout or self . default_herd_timeout value = self . _pack ( [ value , expected_expired_ts ] ) return await self . client . set ( identity , value , ex = expire_time )
Use key and param to generate identity and pack the content expire the key within real_timeout if expire_time is given . real_timeout is equal to the sum of expire_time and herd_time . The content is cached with expire_time .
13,881
async def xrange ( self , name : str , start = '-' , end = '+' , count = None ) -> list : pieces = [ start , end ] if count is not None : if not isinstance ( count , int ) or count < 1 : raise RedisError ( "XRANGE count must be a positive integer" ) pieces . append ( "COUNT" ) pieces . append ( str ( count ) ) return await self . execute_command ( 'XRANGE' , name , * pieces )
Read stream values within an interval .
13,882
async def ltrim ( self , name , start , end ) : return await self . execute_command ( 'LTRIM' , name , start , end )
Trim the list name removing all values not within the slice between start and end
13,883
def block_pipeline_command ( func ) : def inner ( * args , ** kwargs ) : raise RedisClusterException ( "ERROR: Calling pipelined function {0} is blocked when running redis in cluster mode..." . format ( func . __name__ ) ) return inner
Prints error because some pipelined commands should be blocked when running in cluster - mode
13,884
async def immediate_execute_command ( self , * args , ** options ) : command_name = args [ 0 ] conn = self . connection if not conn : conn = self . connection_pool . get_connection ( ) self . connection = conn try : await conn . send_command ( * args ) return await self . parse_response ( conn , command_name , ** options ) except ( ConnectionError , TimeoutError ) as e : conn . disconnect ( ) if not conn . retry_on_timeout and isinstance ( e , TimeoutError ) : raise try : if not self . watching : await conn . send_command ( * args ) return await self . parse_response ( conn , command_name , ** options ) except ConnectionError : conn . disconnect ( ) await self . reset ( ) raise
Execute a command immediately but don t auto - retry on a ConnectionError if we re already WATCHing a variable . Used when issuing WATCH or subsequent commands retrieving their values but before MULTI is called .
13,885
def _determine_slot ( self , * args ) : if len ( args ) <= 1 : raise RedisClusterException ( "No way to dispatch this command to Redis Cluster. Missing key." ) command = args [ 0 ] if command in [ 'EVAL' , 'EVALSHA' ] : numkeys = args [ 2 ] keys = args [ 3 : 3 + numkeys ] slots = { self . connection_pool . nodes . keyslot ( key ) for key in keys } if len ( slots ) != 1 : raise RedisClusterException ( "{0} - all keys must map to the same key slot" . format ( command ) ) return slots . pop ( ) key = args [ 1 ] return self . connection_pool . nodes . keyslot ( key )
figure out what slot based on command and args
13,886
def reset ( self ) : self . command_stack = [ ] self . scripts = set ( ) self . watches = [ ] self . watching = False self . explicit_transaction = False
Reset back to empty pipeline .
13,887
async def send_cluster_commands ( self , stack , raise_on_error = True , allow_redirections = True ) : attempt = sorted ( stack , key = lambda x : x . position ) nodes = { } for c in attempt : slot = self . _determine_slot ( * c . args ) node = self . connection_pool . get_node_by_slot ( slot ) self . connection_pool . nodes . set_node_name ( node ) node_name = node [ 'name' ] if node_name not in nodes : nodes [ node_name ] = NodeCommands ( self . parse_response , self . connection_pool . get_connection_by_node ( node ) ) nodes [ node_name ] . append ( c ) node_commands = nodes . values ( ) for n in node_commands : await n . write ( ) for n in node_commands : await n . read ( ) for n in nodes . values ( ) : self . connection_pool . release ( n . connection ) attempt = sorted ( [ c for c in attempt if isinstance ( c . result , ERRORS_ALLOW_RETRY ) ] , key = lambda x : x . position ) if attempt and allow_redirections : await self . connection_pool . nodes . increment_reinitialize_counter ( len ( attempt ) ) for c in attempt : try : c . result = await super ( StrictClusterPipeline , self ) . execute_command ( * c . args , ** c . options ) except RedisError as e : c . result = e response = [ c . result for c in sorted ( stack , key = lambda x : x . position ) ] if raise_on_error : self . raise_first_error ( stack ) return response
Send a bunch of cluster commands to the redis cluster .
13,888
async def _watch ( self , node , conn , names ) : "Watches the values at keys ``names``" for name in names : slot = self . _determine_slot ( 'WATCH' , name ) dist_node = self . connection_pool . get_node_by_slot ( slot ) if node . get ( 'name' ) != dist_node [ 'name' ] : if len ( node ) > 0 : raise ClusterTransactionError ( "Keys in request don't hash to the same node" ) if self . explicit_transaction : raise RedisError ( 'Cannot issue a WATCH after a MULTI' ) await conn . send_command ( 'WATCH' , * names ) return await conn . read_response ( )
Watches the values at keys names
13,889
async def _unwatch ( self , conn ) : "Unwatches all previously specified keys" await conn . send_command ( 'UNWATCH' ) res = await conn . read_response ( ) return self . watching and res or True
Unwatches all previously specified keys
13,890
async def write ( self ) : connection = self . connection commands = self . commands for c in commands : c . result = None try : await connection . send_packed_command ( connection . pack_commands ( [ c . args for c in commands ] ) ) except ( ConnectionError , TimeoutError ) as e : for c in commands : c . result = e
Code borrowed from StrictRedis so it can be fixed
13,891
def set ( self , type , offset , value ) : self . _command_stack . extend ( [ 'SET' , type , offset , value ] ) return self
Set the specified bit field and returns its old value .
13,892
def get ( self , type , offset ) : self . _command_stack . extend ( [ 'GET' , type , offset ] ) return self
Returns the specified bit field .
13,893
async def zrange ( self , name , start , end , desc = False , withscores = False , score_cast_func = float ) : if desc : return await self . zrevrange ( name , start , end , withscores , score_cast_func ) pieces = [ 'ZRANGE' , name , start , end ] if withscores : pieces . append ( b ( 'WITHSCORES' ) ) options = { 'withscores' : withscores , 'score_cast_func' : score_cast_func } return await self . execute_command ( * pieces , ** options )
Return a range of values from sorted set name between start and end sorted in ascending order .
13,894
async def zremrangebyscore ( self , name , min , max ) : return await self . execute_command ( 'ZREMRANGEBYSCORE' , name , min , max )
Remove all elements in the sorted set name with scores between min and max . Returns the number of elements removed .
13,895
async def expire ( self , name , time ) : if isinstance ( time , datetime . timedelta ) : time = time . seconds + time . days * 24 * 3600 return await self . execute_command ( 'EXPIRE' , name , time )
Set an expire flag on key name for time seconds . time can be represented by an integer or a Python timedelta object .
13,896
async def delete ( self , * names ) : count = 0 for arg in names : count += await self . execute_command ( 'DEL' , arg ) return count
Delete one or more keys specified by names
13,897
async def geoadd ( self , name , * values ) : if len ( values ) % 3 != 0 : raise RedisError ( "GEOADD requires places with lon, lat and name" " values" ) return await self . execute_command ( 'GEOADD' , name , * values )
Add the specified geospatial items to the specified key identified by the name argument . The Geospatial items are given as ordered members of the values argument each item or place is formed by the triad latitude longitude and name .
13,898
async def georadius ( self , name , longitude , latitude , radius , unit = None , withdist = False , withcoord = False , withhash = False , count = None , sort = None , store = None , store_dist = None ) : return await self . _georadiusgeneric ( 'GEORADIUS' , name , longitude , latitude , radius , unit = unit , withdist = withdist , withcoord = withcoord , withhash = withhash , count = count , sort = sort , store = store , store_dist = store_dist )
Return the members of the specified key identified by the name argument which are within the borders of the area specified with the latitude and longitude location and the maximum distance from the center specified by the radius value .
13,899
async def georadiusbymember ( self , name , member , radius , unit = None , withdist = False , withcoord = False , withhash = False , count = None , sort = None , store = None , store_dist = None ) : return await self . _georadiusgeneric ( 'GEORADIUSBYMEMBER' , name , member , radius , unit = unit , withdist = withdist , withcoord = withcoord , withhash = withhash , count = count , sort = sort , store = store , store_dist = store_dist )
This command is exactly like georadius with the sole difference that instead of taking as the center of the area to query a longitude and latitude value it takes the name of a member already existing inside the geospatial index represented by the sorted set .