desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'C4.5 ID3算法计算的是信息增益C4.5算法计算的是信息增益比对䞊面ID3版本的凜数皍䜜修改即可'
def _chooseBestFeatureToSplit_C45(self, X, y):
numFeatures = X.shape[1] oldEntropy = self._calcEntropy(y) bestGainRatio = 0.0 bestFeatureIndex = (-1) for i in range(numFeatures): featList = X[:, i] uniqueVals = set(featList) newEntropy = 0.0 splitInformation = 0.0 for value in uniqueVals: (sub_...
'凜数功胜返回labelList䞭出现次数最倚的label'
def _majorityCnt(self, labelList):
labelCount = {} for vote in labelList: if (vote not in labelCount.keys()): labelCount[vote] = 0 labelCount[vote] += 1 sortedClassCount = sorted(labelCount.iteritems(), key=(lambda x: x[1]), reverse=True) return sortedClassCount[0][0]
'featureIndex类型是元组它记圕了X䞭的特埁圚原始数据䞭对应的䞋标。'
def _createTree(self, X, y, featureIndex):
labelList = list(y) if (labelList.count(labelList[0]) == len(labelList)): return labelList[0] if (len(featureIndex) == 0): return self._majorityCnt(labelList) if (self._mode == 'C4.5'): bestFeatIndex = self._chooseBestFeatureToSplit_C45(X, y) elif (self._mode == 'ID3'): ...
'an invalid pipeline command at exec time adds the exception instance to the list of returned values'
def test_exec_error_in_response(self, r):
r['c'] = 'a' with r.pipeline() as pipe: pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) result = pipe.execute(raise_on_error=False) assert result[0] assert (r['a'] == b('1')) assert result[1] assert (r['b'] == b('2')) assert isinstance(result[2], re...
'When out of connections, block for timeout seconds, then raise'
def test_connection_pool_blocks_until_timeout(self):
pool = self.get_pool(max_connections=1, timeout=0.1) pool.get_connection('_') start = time.time() with pytest.raises(redis.ConnectionError): pool.get_connection('_') assert ((time.time() - start) >= 0.1)
'When out of connections, block until another connection is released to the pool'
def connection_pool_blocks_until_another_connection_released(self):
pool = self.get_pool(max_connections=1, timeout=2) c1 = pool.get_connection('_') def target(): time.sleep(0.1) pool.release(c1) Thread(target=target).start() start = time.time() pool.get_connection('_') assert ((time.time() - start) >= 0.1)
'An error in Connection.on_connect should disconnect from the server see for details: https://github.com/andymccurdy/redis-py/issues/368'
def test_on_connect_error(self):
bad_connection = redis.Redis(db=9999) with pytest.raises(redis.RedisError): bad_connection.info() pool = bad_connection.connection_pool assert (len(pool._available_connections) == 1) assert (not pool._available_connections[0]._sock)
'If Redis raises a LOADING error, the connection should be disconnected and a BusyLoadingError raised'
@skip_if_server_version_lt('2.8.8') def test_busy_loading_disconnects_socket(self, r):
with pytest.raises(redis.BusyLoadingError): r.execute_command('DEBUG', 'ERROR', 'LOADING fake message') pool = r.connection_pool assert (len(pool._available_connections) == 1) assert (not pool._available_connections[0]._sock)
'BusyLoadingErrors should raise from Pipelines that execute a command immediately, like WATCH does.'
@skip_if_server_version_lt('2.8.8') def test_busy_loading_from_pipeline_immediate_command(self, r):
pipe = r.pipeline() with pytest.raises(redis.BusyLoadingError): pipe.immediate_execute_command('DEBUG', 'ERROR', 'LOADING fake message') pool = r.connection_pool assert (not pipe.connection) assert (len(pool._available_connections) == 1) assert (not pool._available_connections[0]._...
'BusyLoadingErrors should be raised from a pipeline execution regardless of the raise_on_error flag.'
@skip_if_server_version_lt('2.8.8') def test_busy_loading_from_pipeline(self, r):
pipe = r.pipeline() pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message') with pytest.raises(redis.BusyLoadingError): pipe.execute() pool = r.connection_pool assert (not pipe.connection) assert (len(pool._available_connections) == 1) assert (not pool._available_connect...
'READONLY errors get turned in ReadOnlyError exceptions'
@skip_if_server_version_lt('2.8.8') def test_read_only_error(self, r):
with pytest.raises(redis.ReadOnlyError): r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah')
'Older Redis versions contained \'allocation_stats\' in INFO that was the cause of a number of bugs when parsing.'
def test_22_info(self, r):
info = 'allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330,13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020,20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303,27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160,34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523,41=15588,42=265,...
'The PythonParser has some special cases for return values > 1MB'
def test_large_responses(self, r):
data = ''.join(([ascii_letters] * (5000000 // len(ascii_letters)))) r['a'] = data assert (r['a'] == b(data))
'High precision floating point values sent to the server should keep precision.'
def test_floating_point_encoding(self, r):
timestamp = 1349673917.939762 r.zadd('a', 'a1', timestamp) assert (r.zscore('a', 'a1') == timestamp)
'If sleep is higher than timeout, it should raise an error'
def test_high_sleep_raises_error(self, sr):
with pytest.raises(LockError): self.get_lock(sr, 'foo', timeout=1, sleep=2)
'Send an already packed command to the Redis server'
def send_packed_command(self, command):
if (not self._sock): self.connect() try: self._sock.sendall(command) except socket.error: e = sys.exc_info()[1] self.disconnect() if (len(e.args) == 1): (_errno, errmsg) = ('UNKNOWN', e.args[0]) else: (_errno, errmsg) = e.args r...
'Pack a series of arguments into a value Redis command'
def pack_command(self, *args):
args_output = SYM_EMPTY.join([SYM_EMPTY.join((SYM_DOLLAR, b(str(len(k))), SYM_CRLF, k, SYM_CRLF)) for k in imap(self.encode, args)]) output = SYM_EMPTY.join((SYM_STAR, b(str(len(args))), SYM_CRLF, args_output)) return output
'Round-robin slave balancer'
def rotate_slaves(self):
slaves = self.sentinel_manager.discover_slaves(self.service_name) if slaves: if (self.slave_rr_counter is None): self.slave_rr_counter = random.randint(0, (len(slaves) - 1)) for _ in xrange(len(slaves)): self.slave_rr_counter = ((self.slave_rr_counter + 1) % len(slaves)) ...
'Asks sentinel servers for the Redis master\'s address corresponding to the service labeled ``service_name``. Returns a pair (address, port) or raises MasterNotFoundError if no master is found.'
def discover_master(self, service_name):
for (sentinel_no, sentinel) in enumerate(self.sentinels): try: masters = sentinel.sentinel_masters() except (ConnectionError, TimeoutError): continue state = masters.get(service_name) if (state and self.check_master_state(state, service_name)): (se...
'Remove slaves that are in an ODOWN or SDOWN state'
def filter_slaves(self, slaves):
slaves_alive = [] for slave in slaves: if (slave['is_odown'] or slave['is_sdown']): continue slaves_alive.append((slave['ip'], slave['port'])) return slaves_alive
'Returns a list of alive slaves for service ``service_name``'
def discover_slaves(self, service_name):
for sentinel in self.sentinels: try: slaves = sentinel.sentinel_slaves(service_name) except (ConnectionError, ResponseError, TimeoutError): continue slaves = self.filter_slaves(slaves) if slaves: return slaves return []
'Returns a redis client instance for the ``service_name`` master. A SentinelConnectionPool class is used to retrive the master\'s address before establishing a new connection. NOTE: If the master\'s address has changed, any cached connections to the old master are closed. By default clients will be a redis.StrictRedis ...
def master_for(self, service_name, redis_class=StrictRedis, connection_pool_class=SentinelConnectionPool, **kwargs):
kwargs['is_master'] = True connection_kwargs = dict(self.connection_kwargs) connection_kwargs.update(kwargs) return redis_class(connection_pool=connection_pool_class(service_name, self, **connection_kwargs))
'Returns redis client instance for the ``service_name`` slave(s). A SentinelConnectionPool class is used to retrive the slave\'s address before establishing a new connection. By default clients will be a redis.StrictRedis instance. Specify a different class to the ``redis_class`` argument if you desire something differ...
def slave_for(self, service_name, redis_class=StrictRedis, connection_pool_class=SentinelConnectionPool, **kwargs):
kwargs['is_master'] = False connection_kwargs = dict(self.connection_kwargs) connection_kwargs.update(kwargs) return redis_class(connection_pool=connection_pool_class(service_name, self, **connection_kwargs))
'Return a Redis client object configured from the given URL, which must use either `the ``redis://`` scheme <http://www.iana.org/assignments/uri-schemes/prov/redis>`_ for RESP connections or the ``unix://`` scheme for Unix domain sockets. For example:: redis://[:password]@localhost:6379/0 unix://[:password]@/path/to/so...
@classmethod def from_url(cls, url, db=None, **kwargs):
connection_pool = ConnectionPool.from_url(url, db=db, **kwargs) return cls(connection_pool=connection_pool)
'Set a custom Response Callback'
def set_response_callback(self, command, callback):
self.response_callbacks[command] = callback
'Return a new pipeline object that can queue multiple commands for later execution. ``transaction`` indicates whether all commands should be executed atomically. Apart from making a group of operations atomic, pipelines are useful for reducing the back-and-forth overhead between the client and server.'
def pipeline(self, transaction=True, shard_hint=None):
return StrictPipeline(self.connection_pool, self.response_callbacks, transaction, shard_hint)
'Convenience method for executing the callable `func` as a transaction while watching all keys specified in `watches`. The \'func\' callable should expect a single argument which is a Pipeline object.'
def transaction(self, func, *watches, **kwargs):
shard_hint = kwargs.pop('shard_hint', None) value_from_callable = kwargs.pop('value_from_callable', False) watch_delay = kwargs.pop('watch_delay', None) with self.pipeline(True, shard_hint) as pipe: while 1: try: if watches: pipe.watch(*watches) ...
'Return a new Lock object using key ``name`` that mimics the behavior of threading.Lock. If specified, ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``sleep`` indicates the amount of time to sleep per loop iteration when the lock is in blocking mode and ...
def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None, lock_class=None, thread_local=True):
if (lock_class is None): if (self._use_lua_lock is None): try: LuaLock.register_scripts(self) self._use_lua_lock = True except ResponseError: self._use_lua_lock = False lock_class = ((self._use_lua_lock and LuaLock) or Lock) ...
'Return a Publish/Subscribe object. With this object, you can subscribe to channels and listen for messages that get published to them.'
def pubsub(self, **kwargs):
return PubSub(self.connection_pool, **kwargs)
'Execute a command and return a parsed response'
def execute_command(self, *args, **options):
pool = self.connection_pool command_name = args[0] connection = pool.get_connection(command_name, **options) try: connection.send_command(*args) return self.parse_response(connection, command_name, **options) except (ConnectionError, TimeoutError) as e: connection.disconnect(...
'Parses a response from the Redis server'
def parse_response(self, connection, command_name, **options):
response = connection.read_response() if (command_name in self.response_callbacks): return self.response_callbacks[command_name](response, **options) return response
'Tell the Redis server to rewrite the AOF file from data in memory.'
def bgrewriteaof(self):
return self.execute_command('BGREWRITEAOF')
'Tell the Redis server to save its data to disk. Unlike save(), this method is asynchronous and returns immediately.'
def bgsave(self):
return self.execute_command('BGSAVE')
'Disconnects the client at ``address`` (ip:port)'
def client_kill(self, address):
return self.execute_command('CLIENT KILL', address)
'Returns a list of currently connected clients'
def client_list(self):
return self.execute_command('CLIENT LIST')
'Returns the current connection name'
def client_getname(self):
return self.execute_command('CLIENT GETNAME')
'Sets the current connection name'
def client_setname(self, name):
return self.execute_command('CLIENT SETNAME', name)
'Return a dictionary of configuration based on the ``pattern``'
def config_get(self, pattern='*'):
return self.execute_command('CONFIG GET', pattern)
'Set config item ``name`` with ``value``'
def config_set(self, name, value):
return self.execute_command('CONFIG SET', name, value)
'Reset runtime statistics'
def config_resetstat(self):
return self.execute_command('CONFIG RESETSTAT')
'Rewrite config file with the minimal change to reflect running config'
def config_rewrite(self):
return self.execute_command('CONFIG REWRITE')
'Returns the number of keys in the current database'
def dbsize(self):
return self.execute_command('DBSIZE')
'Returns version specific meta information about a given key'
def debug_object(self, key):
return self.execute_command('DEBUG OBJECT', key)
'Echo the string back from the server'
def echo(self, value):
return self.execute_command('ECHO', value)
'Delete all keys in all databases on the current host'
def flushall(self):
return self.execute_command('FLUSHALL')
'Delete all keys in the current database'
def flushdb(self):
return self.execute_command('FLUSHDB')
'Returns a dictionary containing information about the Redis server The ``section`` option can be used to select a specific section of information The section option is not supported by older versions of Redis Server, and will generate ResponseError'
def info(self, section=None):
if (section is None): return self.execute_command('INFO') else: return self.execute_command('INFO', section)
'Return a Python datetime object representing the last time the Redis database was saved to disk'
def lastsave(self):
return self.execute_command('LASTSAVE')
'Return the encoding, idletime, or refcount about the key'
def object(self, infotype, key):
return self.execute_command('OBJECT', infotype, key, infotype=infotype)
'Ping the Redis server'
def ping(self):
return self.execute_command('PING')
'Tell the Redis server to save its data to disk, blocking until the save is complete'
def save(self):
return self.execute_command('SAVE')
'Redis Sentinel\'s SENTINEL command.'
def sentinel(self, *args):
warnings.warn(DeprecationWarning('Use the individual sentinel_* methods'))
'Returns a (host, port) pair for the given ``service_name``'
def sentinel_get_master_addr_by_name(self, service_name):
return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME', service_name)
'Returns a dictionary containing the specified masters state.'
def sentinel_master(self, service_name):
return self.execute_command('SENTINEL MASTER', service_name)
'Returns a list of dictionaries containing each master\'s state.'
def sentinel_masters(self):
return self.execute_command('SENTINEL MASTERS')
'Add a new master to Sentinel to be monitored'
def sentinel_monitor(self, name, ip, port, quorum):
return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
'Remove a master from Sentinel\'s monitoring'
def sentinel_remove(self, name):
return self.execute_command('SENTINEL REMOVE', name)
'Returns a list of sentinels for ``service_name``'
def sentinel_sentinels(self, service_name):
return self.execute_command('SENTINEL SENTINELS', service_name)
'Set Sentinel monitoring parameters for a given master'
def sentinel_set(self, name, option, value):
return self.execute_command('SENTINEL SET', name, option, value)
'Returns a list of slaves for ``service_name``'
def sentinel_slaves(self, service_name):
return self.execute_command('SENTINEL SLAVES', service_name)
'Shutdown the server'
def shutdown(self):
try: self.execute_command('SHUTDOWN') except ConnectionError: return raise RedisError('SHUTDOWN seems to have failed.')
'Set the server to be a replicated slave of the instance identified by the ``host`` and ``port``. If called without arguments, the instance is promoted to a master instead.'
def slaveof(self, host=None, port=None):
if ((host is None) and (port is None)): return self.execute_command('SLAVEOF', Token.get_token('NO'), Token.get_token('ONE')) return self.execute_command('SLAVEOF', host, port)
'Get the entries from the slowlog. If ``num`` is specified, get the most recent ``num`` items.'
def slowlog_get(self, num=None):
args = ['SLOWLOG GET'] if (num is not None): args.append(num) return self.execute_command(*args)
'Get the number of items in the slowlog'
def slowlog_len(self):
return self.execute_command('SLOWLOG LEN')
'Remove all items in the slowlog'
def slowlog_reset(self):
return self.execute_command('SLOWLOG RESET')
'Returns the server time as a 2-item tuple of ints: (seconds since epoch, microseconds into this second).'
def time(self):
return self.execute_command('TIME')
'Redis synchronous replication That returns the number of replicas that processed the query when we finally have at least ``num_replicas``, or when the ``timeout`` was reached.'
def wait(self, num_replicas, timeout):
return self.execute_command('WAIT', num_replicas, timeout)
'Appends the string ``value`` to the value at ``key``. If ``key`` doesn\'t already exist, create it with a value of ``value``. Returns the new length of the value at ``key``.'
def append(self, key, value):
return self.execute_command('APPEND', key, value)
'Returns the count of set bits in the value of ``key``. Optional ``start`` and ``end`` paramaters indicate which bytes to consider'
def bitcount(self, key, start=None, end=None):
params = [key] if ((start is not None) and (end is not None)): params.append(start) params.append(end) elif (((start is not None) and (end is None)) or ((end is not None) and (start is None))): raise RedisError('Both start and end must be specified') return self...
'Perform a bitwise operation using ``operation`` between ``keys`` and store the result in ``dest``.'
def bitop(self, operation, dest, *keys):
return self.execute_command('BITOP', operation, dest, *keys)
'Return the position of the first bit set to 1 or 0 in a string. ``start`` and ``end`` difines search range. The range is interpreted as a range of bytes and not a range of bits, so start=0 and end=2 means to look at the first three bytes.'
def bitpos(self, key, bit, start=None, end=None):
if (bit not in (0, 1)): raise RedisError('bit must be 0 or 1') params = [key, bit] ((start is not None) and params.append(start)) if ((start is not None) and (end is not None)): params.append(end) elif ((start is None) and (end is not None)): raise RedisError('...
'Decrements the value of ``key`` by ``amount``. If no key exists, the value will be initialized as 0 - ``amount``'
def decr(self, name, amount=1):
return self.execute_command('DECRBY', name, amount)
'Delete one or more keys specified by ``names``'
def delete(self, *names):
return self.execute_command('DEL', *names)
'Return a serialized version of the value stored at the specified key. If key does not exist a nil bulk reply is returned.'
def dump(self, name):
return self.execute_command('DUMP', name)
'Returns a boolean indicating whether key ``name`` exists'
def exists(self, name):
return self.execute_command('EXISTS', name)
'Set an expire flag on key ``name`` for ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object.'
def expire(self, name, time):
if isinstance(time, datetime.timedelta): time = (time.seconds + ((time.days * 24) * 3600)) return self.execute_command('EXPIRE', name, time)
'Set an expire flag on key ``name``. ``when`` can be represented as an integer indicating unix time or a Python datetime object.'
def expireat(self, name, when):
if isinstance(when, datetime.datetime): when = int(mod_time.mktime(when.timetuple())) return self.execute_command('EXPIREAT', name, when)
'Return the value at key ``name``, or None if the key doesn\'t exist'
def get(self, name):
return self.execute_command('GET', name)
'Return the value at key ``name``, raises a KeyError if the key doesn\'t exist.'
def __getitem__(self, name):
value = self.get(name) if (value is not None): return value raise KeyError(name)
'Returns a boolean indicating the value of ``offset`` in ``name``'
def getbit(self, name, offset):
return self.execute_command('GETBIT', name, offset)
'Returns the substring of the string value stored at ``key``, determined by the offsets ``start`` and ``end`` (both are inclusive)'
def getrange(self, key, start, end):
return self.execute_command('GETRANGE', key, start, end)
'Sets the value at key ``name`` to ``value`` and returns the old value at key ``name`` atomically.'
def getset(self, name, value):
return self.execute_command('GETSET', name, value)
'Increments the value of ``key`` by ``amount``. If no key exists, the value will be initialized as ``amount``'
def incr(self, name, amount=1):
return self.execute_command('INCRBY', name, amount)
'Increments the value of ``key`` by ``amount``. If no key exists, the value will be initialized as ``amount``'
def incrby(self, name, amount=1):
return self.incr(name, amount)
'Increments the value at key ``name`` by floating ``amount``. If no key exists, the value will be initialized as ``amount``'
def incrbyfloat(self, name, amount=1.0):
return self.execute_command('INCRBYFLOAT', name, amount)
'Returns a list of keys matching ``pattern``'
def keys(self, pattern='*'):
return self.execute_command('KEYS', pattern)
'Returns a list of values ordered identically to ``keys``'
def mget(self, keys, *args):
args = list_or_args(keys, args) return self.execute_command('MGET', *args)
'Sets key/values based on a mapping. Mapping can be supplied as a single dictionary argument or as kwargs.'
def mset(self, *args, **kwargs):
if args: if ((len(args) != 1) or (not isinstance(args[0], dict))): raise RedisError('MSET requires **kwargs or a single dict arg') kwargs.update(args[0]) items = [] for pair in iteritems(kwargs): items.extend(pair) return self.execute_command('MSE...
'Sets key/values based on a mapping if none of the keys are already set. Mapping can be supplied as a single dictionary argument or as kwargs. Returns a boolean indicating if the operation was successful.'
def msetnx(self, *args, **kwargs):
if args: if ((len(args) != 1) or (not isinstance(args[0], dict))): raise RedisError('MSETNX requires **kwargs or a single dict arg') kwargs.update(args[0]) items = [] for pair in iteritems(kwargs): items.extend(pair) return self.execute_command('M...
'Moves the key ``name`` to a different Redis database ``db``'
def move(self, name, db):
return self.execute_command('MOVE', name, db)
'Removes an expiration on ``name``'
def persist(self, name):
return self.execute_command('PERSIST', name)
'Set an expire flag on key ``name`` for ``time`` milliseconds. ``time`` can be represented by an integer or a Python timedelta object.'
def pexpire(self, name, time):
if isinstance(time, datetime.timedelta): ms = int((time.microseconds / 1000)) time = (((time.seconds + ((time.days * 24) * 3600)) * 1000) + ms) return self.execute_command('PEXPIRE', name, time)
'Set an expire flag on key ``name``. ``when`` can be represented as an integer representing unix time in milliseconds (unix time * 1000) or a Python datetime object.'
def pexpireat(self, name, when):
if isinstance(when, datetime.datetime): ms = int((when.microsecond / 1000)) when = ((int(mod_time.mktime(when.timetuple())) * 1000) + ms) return self.execute_command('PEXPIREAT', name, when)
'Set the value of key ``name`` to ``value`` that expires in ``time_ms`` milliseconds. ``time_ms`` can be represented by an integer or a Python timedelta object'
def psetex(self, name, time_ms, value):
if isinstance(time_ms, datetime.timedelta): ms = int((time_ms.microseconds / 1000)) time_ms = (((time_ms.seconds + ((time_ms.days * 24) * 3600)) * 1000) + ms) return self.execute_command('PSETEX', name, time_ms, value)
'Returns the number of milliseconds until the key ``name`` will expire'
def pttl(self, name):
return self.execute_command('PTTL', name)
'Returns the name of a random key'
def randomkey(self):
return self.execute_command('RANDOMKEY')
'Rename key ``src`` to ``dst``'
def rename(self, src, dst):
return self.execute_command('RENAME', src, dst)
'Rename key ``src`` to ``dst`` if ``dst`` doesn\'t already exist'
def renamenx(self, src, dst):
return self.execute_command('RENAMENX', src, dst)
'Create a key using the provided serialized value, previously obtained using DUMP.'
def restore(self, name, ttl, value, replace=False):
params = [name, ttl, value] if replace: params.append('REPLACE') return self.execute_command('RESTORE', *params)
'Set the value at key ``name`` to ``value`` ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds. ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds. ``nx`` if set to True, set the value at key ``name`` to ``value`` if it does not already exist. ``xx`` if set to True, set the value at key ``nam...
def set(self, name, value, ex=None, px=None, nx=False, xx=False):
pieces = [name, value] if (ex is not None): pieces.append('EX') if isinstance(ex, datetime.timedelta): ex = (ex.seconds + ((ex.days * 24) * 3600)) pieces.append(ex) if (px is not None): pieces.append('PX') if isinstance(px, datetime.timedelta): ...