idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
13,800 | def _find_topics_with_wrong_rp ( topics , zk , default_min_isr ) : topics_with_wrong_rf = [ ] for topic_name , partitions in topics . items ( ) : min_isr = get_min_isr ( zk , topic_name ) or default_min_isr replication_factor = len ( partitions [ 0 ] . replicas ) if replication_factor >= min_isr + 1 : continue topics_w... | Returns topics with wrong replication factor . |
13,801 | def run_command ( self ) : topics = get_topic_partition_metadata ( self . cluster_config . broker_list ) topics_with_wrong_rf = _find_topics_with_wrong_rp ( topics , self . zk , self . args . default_min_isr , ) errcode = status_code . OK if not topics_with_wrong_rf else status_code . CRITICAL out = _prepare_output ( t... | Replication factor command checks replication factor settings and compare it with min . isr in the cluster . |
13,802 | def decommission_brokers ( self , broker_ids ) : decommission_brokers = [ ] for broker_id in broker_ids : try : broker = self . cluster_topology . brokers [ broker_id ] broker . mark_decommissioned ( ) decommission_brokers . append ( broker ) except KeyError : raise InvalidBrokerIdError ( "No broker found with id {brok... | Decommissioning brokers is done by removing all partitions from the decommissioned brokers and adding them one - by - one back to the cluster . |
13,803 | def add_replica ( self , partition_name , count = 1 ) : try : partition = self . cluster_topology . partitions [ partition_name ] except KeyError : raise InvalidPartitionError ( "Partition name {name} not found." . format ( name = partition_name ) , ) active_brokers = self . cluster_topology . active_brokers if partiti... | Adding a replica is done by trying to add the replica to every broker in the cluster and choosing the resulting state with the highest fitness score . |
13,804 | def remove_replica ( self , partition_name , osr_broker_ids , count = 1 ) : try : partition = self . cluster_topology . partitions [ partition_name ] except KeyError : raise InvalidPartitionError ( "Partition name {name} not found." . format ( name = partition_name ) , ) if partition . replication_factor - count < 1 : ... | Removing a replica is done by trying to remove a replica from every broker and choosing the resulting state with the highest fitness score . Out - of - sync replicas will always be removed before in - sync replicas . |
13,805 | def _prune ( self , pop_candidates ) : return set ( sorted ( pop_candidates , key = self . _score , reverse = True ) [ : self . args . max_pop ] ) | Choose a subset of the candidate states to continue on to the next generation . |
13,806 | def _score ( self , state , score_movement = True ) : score = 0 max_score = 0 if state . total_weight : score += self . args . partition_weight_cv_score_weight * ( 1 - state . broker_weight_cv / sqrt ( len ( state . brokers ) ) ) score += self . args . leader_weight_cv_score_weight * ( 1 - state . broker_leader_weight_... | Score a state based on how balanced it is . A higher score represents a more balanced state . |
13,807 | def move ( self , partition , source , dest ) : new_state = copy ( self ) source_index = self . replicas [ partition ] . index ( source ) new_state . replicas = tuple_alter ( self . replicas , ( partition , lambda replicas : tuple_replace ( replicas , ( source_index , dest ) , ) ) , ) new_state . pending_partitions = s... | Return a new state that is the result of moving a single partition . |
13,808 | def move_leadership ( self , partition , new_leader ) : new_state = copy ( self ) source = new_state . replicas [ partition ] [ 0 ] new_leader_index = self . replicas [ partition ] . index ( new_leader ) new_state . replicas = tuple_alter ( self . replicas , ( partition , lambda replicas : tuple_replace ( replicas , ( ... | Return a new state that is the result of changing the leadership of a single partition . |
13,809 | def assignment ( self ) : return { partition . name : [ self . brokers [ bid ] . id for bid in self . replicas [ pid ] ] for pid , partition in enumerate ( self . partitions ) } | Return the partition assignment that this state represents . |
13,810 | def pending_assignment ( self ) : return { self . partitions [ pid ] . name : [ self . brokers [ bid ] . id for bid in self . replicas [ pid ] ] for pid in set ( self . pending_partitions ) } | Return the pending partition assignment that this state represents . |
13,811 | def run_command ( self ) : fetch_unavailable_brokers = True result = get_topic_partition_with_error ( self . cluster_config , REPLICA_NOT_AVAILABLE_ERROR , fetch_unavailable_brokers = fetch_unavailable_brokers , ) if fetch_unavailable_brokers : replica_unavailability , unavailable_brokers = result else : replica_unavai... | replica_unavailability command checks number of replicas not available for communication over all brokers in the Kafka cluster . |
13,812 | def get_min_isr ( zk , topic ) : ISR_CONF_NAME = 'min.insync.replicas' try : config = zk . get_topic_config ( topic ) except NoNodeError : return None if ISR_CONF_NAME in config [ 'config' ] : return int ( config [ 'config' ] [ ISR_CONF_NAME ] ) else : return None | Return the min - isr for topic or None if not specified |
13,813 | def _process_metadata_response ( topics , zk , default_min_isr ) : not_in_sync_partitions = [ ] for topic_name , partitions in topics . items ( ) : min_isr = get_min_isr ( zk , topic_name ) or default_min_isr if min_isr is None : continue for metadata in partitions . values ( ) : cur_isr = len ( metadata . isr ) if cur... | Returns not in sync partitions . |
13,814 | def remove_partition ( self , partition ) : if partition in self . _partitions : self . _partitions . remove ( partition ) partition . replicas . remove ( self ) else : raise ValueError ( 'Partition: {topic_id}:{partition_id} not found in broker ' '{broker_id}' . format ( topic_id = partition . topic . id , partition_i... | Remove partition from partition list . |
13,815 | def add_partition ( self , partition ) : assert ( partition not in self . _partitions ) self . _partitions . add ( partition ) partition . add_replica ( self ) | Add partition to partition list . |
13,816 | def move_partition ( self , partition , broker_destination ) : self . remove_partition ( partition ) broker_destination . add_partition ( partition ) | Move partition to destination broker and adjust replicas . |
13,817 | def count_partitions ( self , topic ) : return sum ( 1 for p in topic . partitions if p in self . partitions ) | Return count of partitions for given topic . |
13,818 | def request_leadership ( self , opt_count , skip_brokers , skip_partitions ) : owned_partitions = list ( filter ( lambda p : self is not p . leader and len ( p . replicas ) > 1 , self . partitions , ) ) for partition in owned_partitions : if partition . leader in skip_brokers or partition in skip_partitions : continue ... | Under - balanced broker requests leadership from current leader on the pretext that it recursively can maintain its leadership count as optimal . |
13,819 | def donate_leadership ( self , opt_count , skip_brokers , used_edges ) : owned_partitions = list ( filter ( lambda p : self is p . leader and len ( p . replicas ) > 1 , self . partitions , ) ) for partition in owned_partitions : potential_new_leaders = list ( filter ( lambda f : f not in skip_brokers , partition . foll... | Over - loaded brokers tries to donate their leadership to one of their followers recursively until they become balanced . |
13,820 | def ssh_client ( host ) : ssh = paramiko . SSHClient ( ) ssh . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) ssh . connect ( host ) return ssh | Start an ssh client . |
13,821 | def find_files_cmd ( data_path , minutes , start_time , end_time ) : if minutes : return FIND_MINUTES_COMMAND . format ( data_path = data_path , minutes = minutes , ) if start_time : if end_time : return FIND_RANGE_COMMAND . format ( data_path = data_path , start_time = start_time , end_time = end_time , ) else : retur... | Find the log files depending on their modification time . |
13,822 | def check_corrupted_files_cmd ( java_home , files ) : files_str = "," . join ( files ) check_command = CHECK_COMMAND . format ( ionice = IONICE , java_home = java_home , files = files_str , ) command = "{check_command} | {reduce_output}" . format ( check_command = check_command , reduce_output = REDUCE_OUTPUT , ) retur... | Check the file corruption of the specified files . |
13,823 | def get_output_lines_from_command ( host , command ) : with closing ( ssh_client ( host ) ) as ssh : _ , stdout , stderr = ssh . exec_command ( command ) lines = stdout . read ( ) . splitlines ( ) report_stderr ( host , stderr ) return lines | Execute a command on the specified host returning a list of output lines . |
13,824 | def find_files ( data_path , brokers , minutes , start_time , end_time ) : command = find_files_cmd ( data_path , minutes , start_time , end_time ) pool = Pool ( len ( brokers ) ) result = pool . map ( partial ( get_output_lines_from_command , command = command ) , [ host for broker , host in brokers ] ) return [ ( bro... | Find all the Kafka log files on the broker that have been modified in the speficied time range . |
13,825 | def parse_output ( host , output ) : current_file = None for line in output . readlines ( ) : file_name_search = FILE_PATH_REGEX . search ( line ) if file_name_search : current_file = file_name_search . group ( 1 ) continue if INVALID_MESSAGE_REGEX . match ( line ) or INVALID_BYTES_REGEX . match ( line ) : print_line (... | Parse the output of the dump tool and print warnings or error messages accordingly . |
13,826 | def print_line ( host , path , line , line_type ) : print ( "{ltype} Host: {host}, File: {path}" . format ( ltype = line_type , host = host , path = path , ) ) print ( "{ltype} Output: {line}" . format ( ltype = line_type , line = line ) ) | Print a dump tool line to stdout . |
13,827 | def check_files_on_host ( java_home , host , files , batch_size ) : with closing ( ssh_client ( host ) ) as ssh : for i , batch in enumerate ( chunks ( files , batch_size ) ) : command = check_corrupted_files_cmd ( java_home , batch ) _ , stdout , stderr = ssh . exec_command ( command ) report_stderr ( host , stderr ) ... | Check the files on the host . Files are grouped together in groups of batch_size files . The dump class will be executed on each batch sequentially . |
13,828 | def get_partition_leaders ( cluster_config ) : client = KafkaClient ( cluster_config . broker_list ) result = { } for topic , topic_data in six . iteritems ( client . topic_partitions ) : for partition , p_data in six . iteritems ( topic_data ) : topic_partition = topic + "-" + str ( partition ) result [ topic_partitio... | Return the current leaders of all partitions . Partitions are returned as a topic - partition string . |
13,829 | def get_tp_from_file ( file_path ) : match = TP_FROM_FILE_REGEX . match ( file_path ) if not match : print ( "File path is not valid: " + file_path ) sys . exit ( 1 ) return match . group ( 1 ) | Return the name of the topic - partition given the path to the file . |
13,830 | def filter_leader_files ( cluster_config , broker_files ) : print ( "Filtering leaders" ) leader_of = get_partition_leaders ( cluster_config ) result = [ ] for broker , host , files in broker_files : filtered = [ ] for file_path in files : tp = get_tp_from_file ( file_path ) if tp not in leader_of or leader_of [ tp ] =... | Given a list of broker files filters out all the files that are in the replicas . |
13,831 | def check_cluster ( cluster_config , data_path , java_home , check_replicas , batch_size , minutes , start_time , end_time , ) : brokers = get_broker_list ( cluster_config ) broker_files = find_files ( data_path , brokers , minutes , start_time , end_time ) if not check_replicas : broker_files = filter_leader_files ( c... | Check the integrity of the Kafka log files in a cluster . |
13,832 | def validate_args ( args ) : if not args . minutes and not args . start_time : print ( "Error: missing --minutes or --start-time" ) return False if args . minutes and args . start_time : print ( "Error: --minutes shouldn't be specified if --start-time is used" ) return False if args . end_time and not args . start_time... | Basic option validation . Returns False if the options are not valid True otherwise . |
13,833 | def separate_groups ( groups , key , total ) : optimum , extra = compute_optimum ( len ( groups ) , total ) over_loaded , under_loaded , optimal = _smart_separate_groups ( groups , key , total ) if not extra : return over_loaded , under_loaded potential_under_loaded = [ group for group in optimal if key ( group ) == op... | Separate the group into overloaded and under - loaded groups . |
13,834 | def active_brokers ( self ) : return { broker for broker in self . _brokers if not broker . inactive and not broker . decommissioned } | Return set of brokers that are not inactive or decommissioned . |
13,835 | def add_broker ( self , broker ) : if broker not in self . _brokers : self . _brokers . add ( broker ) else : self . log . warning ( 'Broker {broker_id} already present in ' 'replication-group {rg_id}' . format ( broker_id = broker . id , rg_id = self . _id , ) ) | Add broker to current broker - list . |
13,836 | def count_replica ( self , partition ) : return sum ( 1 for b in partition . replicas if b in self . brokers ) | Return count of replicas of given partition . |
13,837 | def acquire_partition ( self , partition , source_broker ) : broker_dest = self . _elect_dest_broker ( partition ) if not broker_dest : raise NotEligibleGroupError ( "No eligible brokers to accept partition {p}" . format ( p = partition ) , ) source_broker . move_partition ( partition , broker_dest ) | Move a partition from a broker to any of the eligible brokers of the replication group . |
13,838 | def _select_broker_pair ( self , rg_destination , victim_partition ) : broker_source = self . _elect_source_broker ( victim_partition ) broker_destination = rg_destination . _elect_dest_broker ( victim_partition ) return broker_source , broker_destination | Select best - fit source and destination brokers based on partition count and presence of partition over the broker . |
13,839 | def _elect_source_broker ( self , victim_partition , broker_subset = None ) : broker_subset = broker_subset or self . _brokers over_loaded_brokers = sorted ( [ broker for broker in broker_subset if victim_partition in broker . partitions and not broker . inactive ] , key = lambda b : len ( b . partitions ) , reverse = ... | Select first over loaded broker having victim_partition . |
13,840 | def _elect_dest_broker ( self , victim_partition ) : under_loaded_brokers = sorted ( [ broker for broker in self . _brokers if ( victim_partition not in broker . partitions and not broker . inactive and not broker . decommissioned ) ] , key = lambda b : len ( b . partitions ) ) if not under_loaded_brokers : return None... | Select first under loaded brokers preferring not having partition of same topic as victim partition . |
13,841 | def rebalance_brokers ( self ) : total_partitions = sum ( len ( b . partitions ) for b in self . brokers ) blacklist = set ( b for b in self . brokers if b . decommissioned ) active_brokers = self . get_active_brokers ( ) - blacklist if not active_brokers : raise EmptyReplicationGroupError ( "No active brokers in %s" ,... | Rebalance partition - count across brokers . |
13,842 | def _get_target_brokers ( self , over_loaded_brokers , under_loaded_brokers , sibling_distance ) : over_loaded_brokers = sorted ( over_loaded_brokers , key = lambda b : len ( b . partitions ) , reverse = True , ) under_loaded_brokers = sorted ( under_loaded_brokers , key = lambda b : len ( b . partitions ) , ) target =... | Pick best - suitable source - broker destination - broker and partition to balance partition - count over brokers in given replication - group . |
13,843 | def generate_sibling_distance ( self ) : sibling_distance = defaultdict ( lambda : defaultdict ( dict ) ) topics = { p . topic for p in self . partitions } for source in self . brokers : for dest in self . brokers : if source != dest : for topic in topics : sibling_distance [ dest ] [ source ] [ topic ] = dest . count_... | Generate a dict containing the distance computed as difference in in number of partitions of each topic from under_loaded_brokers to over_loaded_brokers . |
13,844 | def update_sibling_distance ( self , sibling_distance , dest , topic ) : for source in six . iterkeys ( sibling_distance [ dest ] ) : sibling_distance [ dest ] [ source ] [ topic ] = dest . count_partitions ( topic ) - source . count_partitions ( topic ) return sibling_distance | Update the sibling distance for topic and destination broker . |
13,845 | def move_partition_replica ( self , under_loaded_rg , eligible_partition ) : source_broker , dest_broker = self . _get_eligible_broker_pair ( under_loaded_rg , eligible_partition , ) if source_broker and dest_broker : self . log . debug ( 'Moving partition {p_name} from broker {source_broker} to ' 'replication-group:br... | Move partition to under - loaded replication - group if possible . |
13,846 | def _get_eligible_broker_pair ( self , under_loaded_rg , eligible_partition ) : under_brokers = list ( filter ( lambda b : eligible_partition not in b . partitions , under_loaded_rg . brokers , ) ) over_brokers = list ( filter ( lambda b : eligible_partition in b . partitions , self . brokers , ) ) source_broker , dest... | Evaluate and return source and destination broker - pair from over - loaded and under - loaded replication - group if possible return None otherwise . |
13,847 | def merge_result ( res ) : if not isinstance ( res , dict ) : raise ValueError ( 'Value should be of dict type' ) result = set ( [ ] ) for _ , v in res . items ( ) : for value in v : result . add ( value ) return list ( result ) | Merge all items in res into a list . |
13,848 | def first_key ( res ) : if not isinstance ( res , dict ) : raise ValueError ( 'Value should be of dict type' ) if len ( res . keys ( ) ) != 1 : raise RedisClusterException ( "More then 1 result from command" ) return list ( res . values ( ) ) [ 0 ] | Returns the first result for the given command . |
13,849 | def clusterdown_wrapper ( func ) : @ wraps ( func ) async def inner ( * args , ** kwargs ) : for _ in range ( 0 , 3 ) : try : return await func ( * args , ** kwargs ) except ClusterDownError : pass raise ClusterDownError ( "CLUSTERDOWN error. Unable to rebuild the cluster" ) return inner | Wrapper for CLUSTERDOWN error handling . |
13,850 | def parse_debug_object ( response ) : "Parse the results of Redis's DEBUG OBJECT command into a Python dict" response = nativestr ( response ) response = 'type:' + response response = dict ( [ kv . split ( ':' ) for kv in response . split ( ) ] ) int_fields = ( 'refcount' , 'serializedlength' , 'lru' , 'lru_seconds_idl... | Parse the results of Redis s DEBUG OBJECT command into a Python dict |
13,851 | def parse_info ( response ) : "Parse the result of Redis's INFO command into a Python dict" info = { } response = nativestr ( response ) def get_value ( value ) : if ',' not in value or '=' not in value : try : if '.' in value : return float ( value ) else : return int ( value ) except ValueError : return value else : ... | Parse the result of Redis s INFO command into a Python dict |
13,852 | async def slowlog_get ( self , num = None ) : args = [ 'SLOWLOG GET' ] if num is not None : args . append ( num ) return await self . execute_command ( * args ) | Get the entries from the slowlog . If num is specified get the most recent num items . |
13,853 | def cache ( self , name , cache_class = Cache , identity_generator_class = IdentityGenerator , compressor_class = Compressor , serializer_class = Serializer , * args , ** kwargs ) : return cache_class ( self , app = name , identity_generator_class = identity_generator_class , compressor_class = compressor_class , seria... | Return a cache object using default identity generator serializer and compressor . |
13,854 | async def hincrby ( self , name , key , amount = 1 ) : "Increment the value of ``key`` in hash ``name`` by ``amount``" return await self . execute_command ( 'HINCRBY' , name , key , amount ) | Increment the value of key in hash name by amount |
13,855 | async def hincrbyfloat ( self , name , key , amount = 1.0 ) : return await self . execute_command ( 'HINCRBYFLOAT' , name , key , amount ) | Increment the value of key in hash name by floating amount |
13,856 | async def hset ( self , name , key , value ) : return await self . execute_command ( 'HSET' , name , key , value ) | Set key to value within hash name Returns 1 if HSET created a new field otherwise 0 |
13,857 | async def hsetnx ( self , name , key , value ) : return await self . execute_command ( 'HSETNX' , name , key , value ) | Set key to value within hash name if key does not exist . Returns 1 if HSETNX created a field otherwise 0 . |
13,858 | async def hmset ( self , name , mapping ) : if not mapping : raise DataError ( "'hmset' with 'mapping' of length 0" ) items = [ ] for pair in iteritems ( mapping ) : items . extend ( pair ) return await self . execute_command ( 'HMSET' , name , * items ) | Set key to value within hash name for each corresponding key and value from the mapping dict . |
13,859 | async def transaction ( self , func , * watches , ** kwargs ) : shard_hint = kwargs . pop ( 'shard_hint' , None ) value_from_callable = kwargs . pop ( 'value_from_callable' , False ) watch_delay = kwargs . pop ( 'watch_delay' , None ) async with await self . pipeline ( True , shard_hint ) as pipe : while True : try : i... | Convenience method for executing the callable func as a transaction while watching all keys specified in watches . The func callable should expect a single argument which is a Pipeline object . |
13,860 | async def initialize ( self ) : nodes_cache = { } tmp_slots = { } all_slots_covered = False disagreements = [ ] startup_nodes_reachable = False nodes = self . orig_startup_nodes if self . nodemanager_follow_cluster : nodes = self . startup_nodes for node in nodes : try : r = self . get_redis_link ( host = node [ 'host'... | Init the slots cache by asking all startup nodes what the current cluster configuration is |
13,861 | async def cluster_require_full_coverage ( self , nodes_cache ) : nodes = nodes_cache or self . nodes async def node_require_full_coverage ( node ) : r_node = self . get_redis_link ( host = node [ 'host' ] , port = node [ 'port' ] ) node_config = await r_node . config_get ( 'cluster-require-full-coverage' ) return 'yes'... | if exists cluster - require - full - coverage no config on redis servers then even all slots are not covered cluster still will be able to respond |
13,862 | def set_node ( self , host , port , server_type = None ) : node_name = "{0}:{1}" . format ( host , port ) node = { 'host' : host , 'port' : port , 'name' : node_name , 'server_type' : server_type } self . nodes [ node_name ] = node return node | Update data for a node . |
13,863 | def populate_startup_nodes ( self ) : for item in self . startup_nodes : self . set_node_name ( item ) for n in self . nodes . values ( ) : if n not in self . startup_nodes : self . startup_nodes . append ( n ) uniq = { frozenset ( node . items ( ) ) for node in self . startup_nodes } self . startup_nodes = [ dict ( no... | Do something with all startup nodes and filters out any duplicates |
13,864 | def reset ( self ) : self . pid = os . getpid ( ) self . _created_connections = 0 self . _created_connections_per_node = { } self . _available_connections = { } self . _in_use_connections = { } self . _check_lock = threading . Lock ( ) self . initialized = False | Resets the connection pool back to a clean state . |
13,865 | def disconnect ( self ) : all_conns = chain ( self . _available_connections . values ( ) , self . _in_use_connections . values ( ) , ) for node_connections in all_conns : for connection in node_connections : connection . disconnect ( ) | Nothing that requires any overwrite . |
13,866 | def get_random_connection ( self ) : if self . _available_connections : node_name = random . choice ( list ( self . _available_connections . keys ( ) ) ) conn_list = self . _available_connections [ node_name ] if conn_list : return conn_list . pop ( ) for node in self . nodes . random_startup_node_iter ( ) : connection... | Open new connection to random redis server . |
13,867 | def get_connection_by_slot ( self , slot ) : self . _checkpid ( ) try : return self . get_connection_by_node ( self . get_node_by_slot ( slot ) ) except KeyError : return self . get_random_connection ( ) | Determine what server a specific slot belongs to and return a redis object that is connected |
13,868 | def get_connection_by_node ( self , node ) : self . _checkpid ( ) self . nodes . set_node_name ( node ) try : connection = self . _available_connections . get ( node [ "name" ] , [ ] ) . pop ( ) except IndexError : connection = self . make_connection ( node ) self . _in_use_connections . setdefault ( node [ "name" ] , ... | get a connection by node |
13,869 | def encode ( self , value ) : if self . decode_responses and isinstance ( value , bytes ) : value = value . decode ( self . encoding ) elif not self . decode_responses and isinstance ( value , str ) : value = value . encode ( self . encoding ) return value | Encode the value so that it s identical to what we ll read off the connection |
13,870 | async def punsubscribe ( self , * args ) : if args : args = list_or_args ( args [ 0 ] , args [ 1 : ] ) return await self . execute_command ( 'PUNSUBSCRIBE' , * args ) | Unsubscribe from the supplied patterns . If empy unsubscribe from all patterns . |
13,871 | async def listen ( self ) : "Listen for messages on channels this client has been subscribed to" if self . subscribed : return self . handle_message ( await self . parse_response ( block = True ) ) | Listen for messages on channels this client has been subscribed to |
13,872 | async def get_message ( self , ignore_subscribe_messages = False , timeout = 0 ) : response = await self . parse_response ( block = False , timeout = timeout ) if response : return self . handle_message ( response , ignore_subscribe_messages ) return None | Get the next message if one is available otherwise None . |
13,873 | def _gen_identity ( self , key , param = None ) : if self . identity_generator and param is not None : if self . serializer : param = self . serializer . serialize ( param ) if self . compressor : param = self . compressor . compress ( param ) identity = self . identity_generator . generate ( key , param ) else : ident... | generate identity according to key and param given |
13,874 | def _pack ( self , content ) : if self . serializer : content = self . serializer . serialize ( content ) if self . compressor : content = self . compressor . compress ( content ) return content | pack the content using serializer and compressor |
13,875 | def _unpack ( self , content ) : if self . compressor : try : content = self . compressor . decompress ( content ) except CompressError : pass if self . serializer : content = self . serializer . deserialize ( content ) return content | unpack cache using serializer and compressor |
13,876 | async def delete ( self , key , param = None ) : identity = self . _gen_identity ( key , param ) return await self . client . delete ( identity ) | delete cache corresponding to identity generated from key and param |
13,877 | async def delete_pattern ( self , pattern , count = None ) : cursor = '0' count_deleted = 0 while cursor != 0 : cursor , identities = await self . client . scan ( cursor = cursor , match = pattern , count = count ) count_deleted += await self . client . delete ( * identities ) return count_deleted | delete cache according to pattern in redis delete count keys each time |
13,878 | async def exist ( self , key , param = None ) : identity = self . _gen_identity ( key , param ) return await self . client . exists ( identity ) | see if specific identity exists |
13,879 | async def ttl ( self , key , param = None ) : identity = self . _gen_identity ( key , param ) return await self . client . ttl ( identity ) | get time to live of a specific identity |
13,880 | async def set ( self , key , value , param = None , expire_time = None , herd_timeout = None ) : identity = self . _gen_identity ( key , param ) expected_expired_ts = int ( time . time ( ) ) if expire_time : expected_expired_ts += expire_time expected_expired_ts += herd_timeout or self . default_herd_timeout value = se... | Use key and param to generate identity and pack the content expire the key within real_timeout if expire_time is given . real_timeout is equal to the sum of expire_time and herd_time . The content is cached with expire_time . |
13,881 | async def xrange ( self , name : str , start = '-' , end = '+' , count = None ) -> list : pieces = [ start , end ] if count is not None : if not isinstance ( count , int ) or count < 1 : raise RedisError ( "XRANGE count must be a positive integer" ) pieces . append ( "COUNT" ) pieces . append ( str ( count ) ) return a... | Read stream values within an interval . |
13,882 | async def ltrim ( self , name , start , end ) : return await self . execute_command ( 'LTRIM' , name , start , end ) | Trim the list name removing all values not within the slice between start and end |
13,883 | def block_pipeline_command ( func ) : def inner ( * args , ** kwargs ) : raise RedisClusterException ( "ERROR: Calling pipelined function {0} is blocked when running redis in cluster mode..." . format ( func . __name__ ) ) return inner | Prints error because some pipelined commands should be blocked when running in cluster - mode |
13,884 | async def immediate_execute_command ( self , * args , ** options ) : command_name = args [ 0 ] conn = self . connection if not conn : conn = self . connection_pool . get_connection ( ) self . connection = conn try : await conn . send_command ( * args ) return await self . parse_response ( conn , command_name , ** optio... | Execute a command immediately but don t auto - retry on a ConnectionError if we re already WATCHing a variable . Used when issuing WATCH or subsequent commands retrieving their values but before MULTI is called . |
13,885 | def _determine_slot ( self , * args ) : if len ( args ) <= 1 : raise RedisClusterException ( "No way to dispatch this command to Redis Cluster. Missing key." ) command = args [ 0 ] if command in [ 'EVAL' , 'EVALSHA' ] : numkeys = args [ 2 ] keys = args [ 3 : 3 + numkeys ] slots = { self . connection_pool . nodes . keys... | figure out what slot based on command and args |
13,886 | def reset ( self ) : self . command_stack = [ ] self . scripts = set ( ) self . watches = [ ] self . watching = False self . explicit_transaction = False | Reset back to empty pipeline . |
13,887 | async def send_cluster_commands ( self , stack , raise_on_error = True , allow_redirections = True ) : attempt = sorted ( stack , key = lambda x : x . position ) nodes = { } for c in attempt : slot = self . _determine_slot ( * c . args ) node = self . connection_pool . get_node_by_slot ( slot ) self . connection_pool .... | Send a bunch of cluster commands to the redis cluster . |
13,888 | async def _watch ( self , node , conn , names ) : "Watches the values at keys ``names``" for name in names : slot = self . _determine_slot ( 'WATCH' , name ) dist_node = self . connection_pool . get_node_by_slot ( slot ) if node . get ( 'name' ) != dist_node [ 'name' ] : if len ( node ) > 0 : raise ClusterTransactionEr... | Watches the values at keys names |
13,889 | async def _unwatch ( self , conn ) : "Unwatches all previously specified keys" await conn . send_command ( 'UNWATCH' ) res = await conn . read_response ( ) return self . watching and res or True | Unwatches all previously specified keys |
13,890 | async def write ( self ) : connection = self . connection commands = self . commands for c in commands : c . result = None try : await connection . send_packed_command ( connection . pack_commands ( [ c . args for c in commands ] ) ) except ( ConnectionError , TimeoutError ) as e : for c in commands : c . result = e | Code borrowed from StrictRedis so it can be fixed |
13,891 | def set ( self , type , offset , value ) : self . _command_stack . extend ( [ 'SET' , type , offset , value ] ) return self | Set the specified bit field and returns its old value . |
13,892 | def get ( self , type , offset ) : self . _command_stack . extend ( [ 'GET' , type , offset ] ) return self | Returns the specified bit field . |
13,893 | async def zrange ( self , name , start , end , desc = False , withscores = False , score_cast_func = float ) : if desc : return await self . zrevrange ( name , start , end , withscores , score_cast_func ) pieces = [ 'ZRANGE' , name , start , end ] if withscores : pieces . append ( b ( 'WITHSCORES' ) ) options = { 'with... | Return a range of values from sorted set name between start and end sorted in ascending order . |
13,894 | async def zremrangebyscore ( self , name , min , max ) : return await self . execute_command ( 'ZREMRANGEBYSCORE' , name , min , max ) | Remove all elements in the sorted set name with scores between min and max . Returns the number of elements removed . |
13,895 | async def expire ( self , name , time ) : if isinstance ( time , datetime . timedelta ) : time = time . seconds + time . days * 24 * 3600 return await self . execute_command ( 'EXPIRE' , name , time ) | Set an expire flag on key name for time seconds . time can be represented by an integer or a Python timedelta object . |
13,896 | async def delete ( self , * names ) : count = 0 for arg in names : count += await self . execute_command ( 'DEL' , arg ) return count | Delete one or more keys specified by names |
13,897 | async def geoadd ( self , name , * values ) : if len ( values ) % 3 != 0 : raise RedisError ( "GEOADD requires places with lon, lat and name" " values" ) return await self . execute_command ( 'GEOADD' , name , * values ) | Add the specified geospatial items to the specified key identified by the name argument . The Geospatial items are given as ordered members of the values argument each item or place is formed by the triad latitude longitude and name . |
13,898 | async def georadius ( self , name , longitude , latitude , radius , unit = None , withdist = False , withcoord = False , withhash = False , count = None , sort = None , store = None , store_dist = None ) : return await self . _georadiusgeneric ( 'GEORADIUS' , name , longitude , latitude , radius , unit = unit , withdis... | Return the members of the specified key identified by the name argument which are within the borders of the area specified with the latitude and longitude location and the maximum distance from the center specified by the radius value . |
13,899 | async def georadiusbymember ( self , name , member , radius , unit = None , withdist = False , withcoord = False , withhash = False , count = None , sort = None , store = None , store_dist = None ) : return await self . _georadiusgeneric ( 'GEORADIUSBYMEMBER' , name , member , radius , unit = unit , withdist = withdist... | This command is exactly like georadius with the sole difference that instead of taking as the center of the area to query a longitude and latitude value it takes the name of a member already existing inside the geospatial index represented by the sorted set . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.