sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
async def base_combine(source, switch=False, ordered=False, task_limit=None): """Base operator for managing an asynchronous sequence of sequences. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. The ``switch`` argument enables the switch mecanism, which cause the previous subsequence to be discarded when a new one is created. The items can either be generated in order or as soon as they're received, depending on the ``ordered`` argument. """ # Task limit if task_limit is not None and not task_limit > 0: raise ValueError('The task limit must be None or greater than 0') # Safe context async with StreamerManager() as manager: main_streamer = await manager.enter_and_create_task(source) # Loop over events while manager.tasks: # Extract streamer groups substreamers = manager.streamers[1:] mainstreamers = [main_streamer] if main_streamer in manager.tasks else [] # Switch - use the main streamer then the substreamer if switch: filters = mainstreamers + substreamers # Concat - use the first substreamer then the main streamer elif ordered: filters = substreamers[:1] + mainstreamers # Flat - use the substreamers then the main streamer else: filters = substreamers + mainstreamers # Wait for next event streamer, task = await manager.wait_single_event(filters) # Get result try: result = task.result() # End of stream except StopAsyncIteration: # Main streamer is finished if streamer is main_streamer: main_streamer = None # A substreamer is finished else: await manager.clean_streamer(streamer) # Re-schedule the main streamer if necessary if main_streamer is not None and main_streamer not in manager.tasks: manager.create_task(main_streamer) # Process result else: # Switch mecanism if switch and streamer is main_streamer: await manager.clean_streamers(substreamers) # Setup a new source if streamer is main_streamer: await manager.enter_and_create_task(result) # Re-schedule the main streamer if task limit allows it if task_limit is None or task_limit > len(manager.tasks): manager.create_task(streamer) # Yield the result else: yield result # Re-schedule the streamer manager.create_task(streamer)
Base operator for managing an asynchronous sequence of sequences. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. The ``switch`` argument enables the switch mecanism, which cause the previous subsequence to be discarded when a new one is created. The items can either be generated in order or as soon as they're received, depending on the ``ordered`` argument.
entailment
def concat(source, task_limit=None): """Given an asynchronous sequence of sequences, generate the elements of the sequences in order. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. Errors raised in the source or an element sequence are propagated. """ return base_combine.raw( source, task_limit=task_limit, switch=False, ordered=True)
Given an asynchronous sequence of sequences, generate the elements of the sequences in order. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. Errors raised in the source or an element sequence are propagated.
entailment
def flatten(source, task_limit=None): """Given an asynchronous sequence of sequences, generate the elements of the sequences as soon as they're received. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. Errors raised in the source or an element sequence are propagated. """ return base_combine.raw( source, task_limit=task_limit, switch=False, ordered=False)
Given an asynchronous sequence of sequences, generate the elements of the sequences as soon as they're received. The sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. Errors raised in the source or an element sequence are propagated.
entailment
def concatmap(source, func, *more_sources, task_limit=None): """Apply a given function that creates a sequence from the elements of one or several asynchronous sequences, and generate the elements of the created sequences in order. The function is applied as described in `map`, and must return an asynchronous sequence. The returned sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. """ return concat.raw( combine.smap.raw(source, func, *more_sources), task_limit=task_limit)
Apply a given function that creates a sequence from the elements of one or several asynchronous sequences, and generate the elements of the created sequences in order. The function is applied as described in `map`, and must return an asynchronous sequence. The returned sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument.
entailment
def flatmap(source, func, *more_sources, task_limit=None): """Apply a given function that creates a sequence from the elements of one or several asynchronous sequences, and generate the elements of the created sequences as soon as they arrive. The function is applied as described in `map`, and must return an asynchronous sequence. The returned sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. Errors raised in a source or output sequence are propagated. """ return flatten.raw( combine.smap.raw(source, func, *more_sources), task_limit=task_limit)
Apply a given function that creates a sequence from the elements of one or several asynchronous sequences, and generate the elements of the created sequences as soon as they arrive. The function is applied as described in `map`, and must return an asynchronous sequence. The returned sequences are awaited concurrently, although it's possible to limit the amount of running sequences using the `task_limit` argument. Errors raised in a source or output sequence are propagated.
entailment
def switchmap(source, func, *more_sources): """Apply a given function that creates a sequence from the elements of one or several asynchronous sequences and generate the elements of the most recently created sequence. The function is applied as described in `map`, and must return an asynchronous sequence. Errors raised in a source or output sequence (that was not already closed) are propagated. """ return switch.raw(combine.smap.raw(source, func, *more_sources))
Apply a given function that creates a sequence from the elements of one or several asynchronous sequences and generate the elements of the most recently created sequence. The function is applied as described in `map`, and must return an asynchronous sequence. Errors raised in a source or output sequence (that was not already closed) are propagated.
entailment
async def accumulate(source, func=op.add, initializer=None): """Generate a series of accumulated sums (or other binary function) from an asynchronous sequence. If ``initializer`` is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. """ iscorofunc = asyncio.iscoroutinefunction(func) async with streamcontext(source) as streamer: # Initialize if initializer is None: try: value = await anext(streamer) except StopAsyncIteration: return else: value = initializer # First value yield value # Iterate streamer async for item in streamer: value = func(value, item) if iscorofunc: value = await value yield value
Generate a series of accumulated sums (or other binary function) from an asynchronous sequence. If ``initializer`` is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty.
entailment
def reduce(source, func, initializer=None): """Apply a function of two arguments cumulatively to the items of an asynchronous sequence, reducing the sequence to a single value. If ``initializer`` is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty. """ acc = accumulate.raw(source, func, initializer) return select.item.raw(acc, -1)
Apply a function of two arguments cumulatively to the items of an asynchronous sequence, reducing the sequence to a single value. If ``initializer`` is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty.
entailment
async def list(source): """Generate a single list from an asynchronous sequence.""" result = [] async with streamcontext(source) as streamer: async for item in streamer: result.append(item) yield result
Generate a single list from an asynchronous sequence.
entailment
async def wait_stream(aiterable): """Wait for an asynchronous iterable to finish and return the last item. The iterable is executed within a safe stream context. A StreamEmpty exception is raised if the sequence is empty. """ async with streamcontext(aiterable) as streamer: async for item in streamer: item try: return item except NameError: raise StreamEmpty()
Wait for an asynchronous iterable to finish and return the last item. The iterable is executed within a safe stream context. A StreamEmpty exception is raised if the sequence is empty.
entailment
def operator(func=None, *, pipable=False): """Create a stream operator from an asynchronous generator (or any function returning an asynchronous iterable). Decorator usage:: @operator async def random(offset=0., width=1.): while True: yield offset + width * random.random() Decorator usage for pipable operators:: @operator(pipable=True) async def multiply(source, factor): async with streamcontext(source) as streamer: async for item in streamer: yield factor * item In the case of pipable operators, the first argument is expected to be the asynchronous iteratable used for piping. The return value is a dynamically created class. It has the same name, module and doc as the original function. A new stream is created by simply instanciating the operator:: xs = random() ys = multiply(xs, 2) The original function is called at instanciation to check that signature match. In the case of pipable operators, the source is also checked for asynchronous iteration. The operator also have a pipe class method that can be used along with the piping synthax:: xs = random() ys = xs | multiply.pipe(2) This is strictly equivalent to the previous example. Other methods are available: - `original`: the original function as a static method - `raw`: same as original but add extra checking The raw method is useful to create new operators from existing ones:: @operator(pipable=True) def double(source): return multiply.raw(source, 2) """ def decorator(func): """Inner decorator for stream operator.""" # Gather data bases = (Stream,) name = func.__name__ module = func.__module__ extra_doc = func.__doc__ doc = extra_doc or f'Regular {name} stream operator.' # Extract signature signature = inspect.signature(func) parameters = list(signature.parameters.values()) if parameters and parameters[0].name in ('self', 'cls'): raise ValueError( 'An operator cannot be created from a method, ' 'since the decorated function becomes an operator class') # Injected parameters self_parameter = inspect.Parameter( 'self', inspect.Parameter.POSITIONAL_OR_KEYWORD) cls_parameter = inspect.Parameter( 'cls', inspect.Parameter.POSITIONAL_OR_KEYWORD) # Wrapped static method original = func original.__qualname__ = name + '.original' # Raw static method raw = func raw.__qualname__ = name + '.raw' # Init method def init(self, *args, **kwargs): if pipable and args: assert_async_iterable(args[0]) factory = functools.partial(self.raw, *args, **kwargs) return Stream.__init__(self, factory) # Customize init signature new_parameters = [self_parameter] + parameters init.__signature__ = signature.replace(parameters=new_parameters) # Customize init method init.__qualname__ = name + '.__init__' init.__name__ = '__init__' init.__module__ = module init.__doc__ = f'Initialize the {name} stream.' if pipable: # Raw static method def raw(*args, **kwargs): if args: assert_async_iterable(args[0]) return func(*args, **kwargs) # Custonize raw method raw.__signature__ = signature raw.__qualname__ = name + '.raw' raw.__module__ = module raw.__doc__ = doc # Pipe class method def pipe(cls, *args, **kwargs): return lambda source: cls(source, *args, **kwargs) # Customize pipe signature if parameters and parameters[0].kind in ( inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): new_parameters = [cls_parameter] + parameters[1:] else: new_parameters = [cls_parameter] + parameters pipe.__signature__ = signature.replace(parameters=new_parameters) # Customize pipe method pipe.__qualname__ = name + '.pipe' pipe.__module__ = module pipe.__doc__ = f'Pipable "{name}" stream operator.' if extra_doc: pipe.__doc__ += "\n\n " + extra_doc # Gather attributes attrs = { '__init__': init, '__module__': module, '__doc__': doc, 'raw': staticmethod(raw), 'original': staticmethod(original), 'pipe': classmethod(pipe) if pipable else None} # Create operator class return type(name, bases, attrs) return decorator if func is None else decorator(func)
Create a stream operator from an asynchronous generator (or any function returning an asynchronous iterable). Decorator usage:: @operator async def random(offset=0., width=1.): while True: yield offset + width * random.random() Decorator usage for pipable operators:: @operator(pipable=True) async def multiply(source, factor): async with streamcontext(source) as streamer: async for item in streamer: yield factor * item In the case of pipable operators, the first argument is expected to be the asynchronous iteratable used for piping. The return value is a dynamically created class. It has the same name, module and doc as the original function. A new stream is created by simply instanciating the operator:: xs = random() ys = multiply(xs, 2) The original function is called at instanciation to check that signature match. In the case of pipable operators, the source is also checked for asynchronous iteration. The operator also have a pipe class method that can be used along with the piping synthax:: xs = random() ys = xs | multiply.pipe(2) This is strictly equivalent to the previous example. Other methods are available: - `original`: the original function as a static method - `raw`: same as original but add extra checking The raw method is useful to create new operators from existing ones:: @operator(pipable=True) def double(source): return multiply.raw(source, 2)
entailment
def action(source, func): """Perform an action for each element of an asynchronous sequence without modifying it. The given function can be synchronous or asynchronous. """ if asyncio.iscoroutinefunction(func): async def innerfunc(arg): await func(arg) return arg else: def innerfunc(arg): func(arg) return arg return map.raw(source, innerfunc)
Perform an action for each element of an asynchronous sequence without modifying it. The given function can be synchronous or asynchronous.
entailment
def print(source, template=None, **kwargs): """Print each element of an asynchronous sequence without modifying it. An optional template can be provided to be formatted with the elements. All the keyword arguments are forwarded to the builtin function print. """ def func(value): if template: value = template.format(value) builtins.print(value, **kwargs) return action.raw(source, func)
Print each element of an asynchronous sequence without modifying it. An optional template can be provided to be formatted with the elements. All the keyword arguments are forwarded to the builtin function print.
entailment
def async_(fn): """Wrap the given function into a coroutine function.""" @functools.wraps(fn) async def wrapper(*args, **kwargs): return await fn(*args, **kwargs) return wrapper
Wrap the given function into a coroutine function.
entailment
def aitercontext(aiterable, *, cls=AsyncIteratorContext): """Return an asynchronous context manager from an asynchronous iterable. The context management makes sure the aclose asynchronous method has run before it exits. It also issues warnings and RuntimeError if it is used incorrectly. It is safe to use with any asynchronous iterable and prevent asynchronous iterator context to be wrapped twice. Correct usage:: ait = some_asynchronous_iterable() async with aitercontext(ait) as safe_ait: async for item in safe_ait: <block> An optional subclass of AsyncIteratorContext can be provided. This class will be used to wrap the given iterable. """ assert issubclass(cls, AsyncIteratorContext) aiterator = aiter(aiterable) if isinstance(aiterator, cls): return aiterator return cls(aiterator)
Return an asynchronous context manager from an asynchronous iterable. The context management makes sure the aclose asynchronous method has run before it exits. It also issues warnings and RuntimeError if it is used incorrectly. It is safe to use with any asynchronous iterable and prevent asynchronous iterator context to be wrapped twice. Correct usage:: ait = some_asynchronous_iterable() async with aitercontext(ait) as safe_ait: async for item in safe_ait: <block> An optional subclass of AsyncIteratorContext can be provided. This class will be used to wrap the given iterable.
entailment
async def takelast(source, n): """Forward the last ``n`` elements from an asynchronous sequence. If ``n`` is negative, it simply terminates after iterating the source. Note: it is required to reach the end of the source before the first element is generated. """ queue = collections.deque(maxlen=n if n > 0 else 0) async with streamcontext(source) as streamer: async for item in streamer: queue.append(item) for item in queue: yield item
Forward the last ``n`` elements from an asynchronous sequence. If ``n`` is negative, it simply terminates after iterating the source. Note: it is required to reach the end of the source before the first element is generated.
entailment
async def skip(source, n): """Forward an asynchronous sequence, skipping the first ``n`` elements. If ``n`` is negative, no elements are skipped. """ source = transform.enumerate.raw(source) async with streamcontext(source) as streamer: async for i, item in streamer: if i >= n: yield item
Forward an asynchronous sequence, skipping the first ``n`` elements. If ``n`` is negative, no elements are skipped.
entailment
async def skiplast(source, n): """Forward an asynchronous sequence, skipping the last ``n`` elements. If ``n`` is negative, no elements are skipped. Note: it is required to reach the ``n+1`` th element of the source before the first element is generated. """ queue = collections.deque(maxlen=n if n > 0 else 0) async with streamcontext(source) as streamer: async for item in streamer: if n <= 0: yield item continue if len(queue) == n: yield queue[0] queue.append(item)
Forward an asynchronous sequence, skipping the last ``n`` elements. If ``n`` is negative, no elements are skipped. Note: it is required to reach the ``n+1`` th element of the source before the first element is generated.
entailment
async def filterindex(source, func): """Filter an asynchronous sequence using the index of the elements. The given function is synchronous, takes the index as an argument, and returns ``True`` if the corresponding should be forwarded, ``False`` otherwise. """ source = transform.enumerate.raw(source) async with streamcontext(source) as streamer: async for i, item in streamer: if func(i): yield item
Filter an asynchronous sequence using the index of the elements. The given function is synchronous, takes the index as an argument, and returns ``True`` if the corresponding should be forwarded, ``False`` otherwise.
entailment
def slice(source, *args): """Slice an asynchronous sequence. The arguments are the same as the builtin type slice. There are two limitations compare to regular slices: - Positive stop index with negative start index is not supported - Negative step is not supported """ s = builtins.slice(*args) start, stop, step = s.start or 0, s.stop, s.step or 1 # Filter the first items if start < 0: source = takelast.raw(source, abs(start)) elif start > 0: source = skip.raw(source, start) # Filter the last items if stop is not None: if stop >= 0 and start < 0: raise ValueError( "Positive stop with negative start is not supported") elif stop >= 0: source = take.raw(source, stop - start) else: source = skiplast.raw(source, abs(stop)) # Filter step items if step is not None: if step > 1: source = filterindex.raw(source, lambda i: i % step == 0) elif step < 0: raise ValueError("Negative step not supported") # Return return source
Slice an asynchronous sequence. The arguments are the same as the builtin type slice. There are two limitations compare to regular slices: - Positive stop index with negative start index is not supported - Negative step is not supported
entailment
async def item(source, index): """Forward the ``n``th element of an asynchronous sequence. The index can be negative and works like regular indexing. If the index is out of range, and ``IndexError`` is raised. """ # Prepare if index >= 0: source = skip.raw(source, index) else: source = takelast(source, abs(index)) async with streamcontext(source) as streamer: # Get first item try: result = await anext(streamer) except StopAsyncIteration: raise IndexError("Index out of range") # Check length if index < 0: count = 1 async for _ in streamer: count += 1 if count != abs(index): raise IndexError("Index out of range") # Yield result yield result
Forward the ``n``th element of an asynchronous sequence. The index can be negative and works like regular indexing. If the index is out of range, and ``IndexError`` is raised.
entailment
def getitem(source, index): """Forward one or several items from an asynchronous sequence. The argument can either be a slice or an integer. See the slice and item operators for more information. """ if isinstance(index, builtins.slice): return slice.raw(source, index.start, index.stop, index.step) if isinstance(index, int): return item.raw(source, index) raise TypeError("Not a valid index (int or slice)")
Forward one or several items from an asynchronous sequence. The argument can either be a slice or an integer. See the slice and item operators for more information.
entailment
async def takewhile(source, func): """Forward an asynchronous sequence while a condition is met. The given function takes the item as an argument and returns a boolean corresponding to the condition to meet. The function can either be synchronous or asynchronous. """ iscorofunc = asyncio.iscoroutinefunction(func) async with streamcontext(source) as streamer: async for item in streamer: result = func(item) if iscorofunc: result = await result if not result: return yield item
Forward an asynchronous sequence while a condition is met. The given function takes the item as an argument and returns a boolean corresponding to the condition to meet. The function can either be synchronous or asynchronous.
entailment
def update_pipe_module(): """Populate the pipe module dynamically.""" module_dir = __all__ operators = stream.__dict__ for key, value in operators.items(): if getattr(value, 'pipe', None): globals()[key] = value.pipe if key not in module_dir: module_dir.append(key)
Populate the pipe module dynamically.
entailment
async def enumerate(source, start=0, step=1): """Generate ``(index, value)`` tuples from an asynchronous sequence. This index is computed using a starting point and an increment, respectively defaulting to ``0`` and ``1``. """ count = itertools.count(start, step) async with streamcontext(source) as streamer: async for item in streamer: yield next(count), item
Generate ``(index, value)`` tuples from an asynchronous sequence. This index is computed using a starting point and an increment, respectively defaulting to ``0`` and ``1``.
entailment
def starmap(source, func, ordered=True, task_limit=None): """Apply a given function to the unpacked elements of an asynchronous sequence. Each element is unpacked before applying the function. The given function can either be synchronous or asynchronous. The results can either be returned in or out of order, depending on the corresponding ``ordered`` argument. This argument is ignored if the provided function is synchronous. The coroutines run concurrently but their amount can be limited using the ``task_limit`` argument. A value of ``1`` will cause the coroutines to run sequentially. This argument is ignored if the provided function is synchronous. """ if asyncio.iscoroutinefunction(func): async def starfunc(args): return await func(*args) else: def starfunc(args): return func(*args) return map.raw(source, starfunc, ordered=ordered, task_limit=task_limit)
Apply a given function to the unpacked elements of an asynchronous sequence. Each element is unpacked before applying the function. The given function can either be synchronous or asynchronous. The results can either be returned in or out of order, depending on the corresponding ``ordered`` argument. This argument is ignored if the provided function is synchronous. The coroutines run concurrently but their amount can be limited using the ``task_limit`` argument. A value of ``1`` will cause the coroutines to run sequentially. This argument is ignored if the provided function is synchronous.
entailment
async def cycle(source): """Iterate indefinitely over an asynchronous sequence. Note: it does not perform any buffering, but re-iterate over the same given sequence instead. If the sequence is not re-iterable, the generator might end up looping indefinitely without yielding any item. """ while True: async with streamcontext(source) as streamer: async for item in streamer: yield item # Prevent blocking while loop if the stream is empty await asyncio.sleep(0)
Iterate indefinitely over an asynchronous sequence. Note: it does not perform any buffering, but re-iterate over the same given sequence instead. If the sequence is not re-iterable, the generator might end up looping indefinitely without yielding any item.
entailment
async def chunks(source, n): """Generate chunks of size ``n`` from an asynchronous sequence. The chunks are lists, and the last chunk might contain less than ``n`` elements. """ async with streamcontext(source) as streamer: async for first in streamer: xs = select.take(create.preserve(streamer), n-1) yield [first] + await aggregate.list(xs)
Generate chunks of size ``n`` from an asynchronous sequence. The chunks are lists, and the last chunk might contain less than ``n`` elements.
entailment
async def random(offset=0., width=1., interval=0.1): """Generate a stream of random numbers.""" while True: await asyncio.sleep(interval) yield offset + width * random_module.random()
Generate a stream of random numbers.
entailment
async def power(source, exponent): """Raise the elements of an asynchronous sequence to the given power.""" async with streamcontext(source) as streamer: async for item in streamer: yield item ** exponent
Raise the elements of an asynchronous sequence to the given power.
entailment
async def spaceout(source, interval): """Make sure the elements of an asynchronous sequence are separated in time by the given interval. """ timeout = 0 loop = asyncio.get_event_loop() async with streamcontext(source) as streamer: async for item in streamer: delta = timeout - loop.time() delay = delta if delta > 0 else 0 await asyncio.sleep(delay) yield item timeout = loop.time() + interval
Make sure the elements of an asynchronous sequence are separated in time by the given interval.
entailment
async def timeout(source, timeout): """Raise a time-out if an element of the asynchronous sequence takes too long to arrive. Note: the timeout is not global but specific to each step of the iteration. """ async with streamcontext(source) as streamer: while True: try: item = await wait_for(anext(streamer), timeout) except StopAsyncIteration: break else: yield item
Raise a time-out if an element of the asynchronous sequence takes too long to arrive. Note: the timeout is not global but specific to each step of the iteration.
entailment
async def delay(source, delay): """Delay the iteration of an asynchronous sequence.""" await asyncio.sleep(delay) async with streamcontext(source) as streamer: async for item in streamer: yield item
Delay the iteration of an asynchronous sequence.
entailment
async def chain(*sources): """Chain asynchronous sequences together, in the order they are given. Note: the sequences are not iterated until it is required, so if the operation is interrupted, the remaining sequences will be left untouched. """ for source in sources: async with streamcontext(source) as streamer: async for item in streamer: yield item
Chain asynchronous sequences together, in the order they are given. Note: the sequences are not iterated until it is required, so if the operation is interrupted, the remaining sequences will be left untouched.
entailment
async def zip(*sources): """Combine and forward the elements of several asynchronous sequences. Each generated value is a tuple of elements, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. Note: the different sequences are awaited in parrallel, so that their waiting times don't add up. """ async with AsyncExitStack() as stack: # Handle resources streamers = [await stack.enter_async_context(streamcontext(source)) for source in sources] # Loop over items while True: try: coros = builtins.map(anext, streamers) items = await asyncio.gather(*coros) except StopAsyncIteration: break else: yield tuple(items)
Combine and forward the elements of several asynchronous sequences. Each generated value is a tuple of elements, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. Note: the different sequences are awaited in parrallel, so that their waiting times don't add up.
entailment
async def smap(source, func, *more_sources): """Apply a given function to the elements of one or several asynchronous sequences. Each element is used as a positional argument, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. The function is treated synchronously. Note: if more than one sequence is provided, they're awaited concurrently so that their waiting times don't add up. """ if more_sources: source = zip(source, *more_sources) async with streamcontext(source) as streamer: async for item in streamer: yield func(*item) if more_sources else func(item)
Apply a given function to the elements of one or several asynchronous sequences. Each element is used as a positional argument, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. The function is treated synchronously. Note: if more than one sequence is provided, they're awaited concurrently so that their waiting times don't add up.
entailment
def amap(source, corofn, *more_sources, ordered=True, task_limit=None): """Apply a given coroutine function to the elements of one or several asynchronous sequences. Each element is used as a positional argument, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. The results can either be returned in or out of order, depending on the corresponding ``ordered`` argument. The coroutines run concurrently but their amount can be limited using the ``task_limit`` argument. A value of ``1`` will cause the coroutines to run sequentially. If more than one sequence is provided, they're also awaited concurrently, so that their waiting times don't add up. """ def func(*args): return create.just(corofn(*args)) if ordered: return advanced.concatmap.raw( source, func, *more_sources, task_limit=task_limit) return advanced.flatmap.raw( source, func, *more_sources, task_limit=task_limit)
Apply a given coroutine function to the elements of one or several asynchronous sequences. Each element is used as a positional argument, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. The results can either be returned in or out of order, depending on the corresponding ``ordered`` argument. The coroutines run concurrently but their amount can be limited using the ``task_limit`` argument. A value of ``1`` will cause the coroutines to run sequentially. If more than one sequence is provided, they're also awaited concurrently, so that their waiting times don't add up.
entailment
def map(source, func, *more_sources, ordered=True, task_limit=None): """Apply a given function to the elements of one or several asynchronous sequences. Each element is used as a positional argument, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. The function can either be synchronous or asynchronous (coroutine function). The results can either be returned in or out of order, depending on the corresponding ``ordered`` argument. This argument is ignored if the provided function is synchronous. The coroutines run concurrently but their amount can be limited using the ``task_limit`` argument. A value of ``1`` will cause the coroutines to run sequentially. This argument is ignored if the provided function is synchronous. If more than one sequence is provided, they're also awaited concurrently, so that their waiting times don't add up. It might happen that the provided function returns a coroutine but is not a coroutine function per se. In this case, one can wrap the function with ``aiostream.async_`` in order to force ``map`` to await the resulting coroutine. The following example illustrates the use ``async_`` with a lambda function:: from aiostream import stream, async_ ... ys = stream.map(xs, async_(lambda ms: asyncio.sleep(ms / 1000))) """ if asyncio.iscoroutinefunction(func): return amap.raw( source, func, *more_sources, ordered=ordered, task_limit=task_limit) return smap.raw(source, func, *more_sources)
Apply a given function to the elements of one or several asynchronous sequences. Each element is used as a positional argument, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. The function can either be synchronous or asynchronous (coroutine function). The results can either be returned in or out of order, depending on the corresponding ``ordered`` argument. This argument is ignored if the provided function is synchronous. The coroutines run concurrently but their amount can be limited using the ``task_limit`` argument. A value of ``1`` will cause the coroutines to run sequentially. This argument is ignored if the provided function is synchronous. If more than one sequence is provided, they're also awaited concurrently, so that their waiting times don't add up. It might happen that the provided function returns a coroutine but is not a coroutine function per se. In this case, one can wrap the function with ``aiostream.async_`` in order to force ``map`` to await the resulting coroutine. The following example illustrates the use ``async_`` with a lambda function:: from aiostream import stream, async_ ... ys = stream.map(xs, async_(lambda ms: asyncio.sleep(ms / 1000)))
entailment
def iterate(it): """Generate values from a sychronous or asynchronous iterable.""" if is_async_iterable(it): return from_async_iterable.raw(it) if isinstance(it, Iterable): return from_iterable.raw(it) raise TypeError( f"{type(it).__name__!r} object is not (async) iterable")
Generate values from a sychronous or asynchronous iterable.
entailment
def repeat(value, times=None, *, interval=0): """Generate the same value a given number of times. If ``times`` is ``None``, the value is repeated indefinitely. An optional interval can be given to space the values out. """ args = () if times is None else (times,) it = itertools.repeat(value, *args) agen = from_iterable.raw(it) return time.spaceout.raw(agen, interval) if interval else agen
Generate the same value a given number of times. If ``times`` is ``None``, the value is repeated indefinitely. An optional interval can be given to space the values out.
entailment
def range(*args, interval=0): """Generate a given range of numbers. It supports the same arguments as the builtin function. An optional interval can be given to space the values out. """ agen = from_iterable.raw(builtins.range(*args)) return time.spaceout.raw(agen, interval) if interval else agen
Generate a given range of numbers. It supports the same arguments as the builtin function. An optional interval can be given to space the values out.
entailment
def count(start=0, step=1, *, interval=0): """Generate consecutive numbers indefinitely. Optional starting point and increment can be defined, respectively defaulting to ``0`` and ``1``. An optional interval can be given to space the values out. """ agen = from_iterable.raw(itertools.count(start, step)) return time.spaceout.raw(agen, interval) if interval else agen
Generate consecutive numbers indefinitely. Optional starting point and increment can be defined, respectively defaulting to ``0`` and ``1``. An optional interval can be given to space the values out.
entailment
def say(self, text): """ Say something to www.cleverbot.com :type text: string Returns: string """ params = { "input": text, "key": self.key, "cs": self.cs, "conversation_id": self.convo_id, "wrapper": "CleverWrap.py" } reply = self._send(params) self._process_reply(reply) return self.output
Say something to www.cleverbot.com :type text: string Returns: string
entailment
def _send(self, params): """ Make the request to www.cleverbot.com :type params: dict Returns: dict """ # Get a response try: r = requests.get(self.url, params=params) # catch errors, print then exit. except requests.exceptions.RequestException as e: print(e) return r.json(strict=False)
Make the request to www.cleverbot.com :type params: dict Returns: dict
entailment
def _process_reply(self, reply): """ take the cleverbot.com response and populate properties. """ self.cs = reply.get("cs", None) self.count = int(reply.get("interaction_count", None)) self.output = reply.get("output", None) self.convo_id = reply.get("conversation_id", None) self.history = {key:value for key, value in reply.items() if key.startswith("interaction")} self.time_taken = int(reply.get("time_taken", None)) self.time_elapsed = int(reply.get("time_elapsed", None))
take the cleverbot.com response and populate properties.
entailment
def end(self): '''Ends the tracer. May be called in any state. Transitions the state to ended and releases any SDK resources owned by this tracer (this includes only internal resources, things like passed-in :class:`oneagent.common.DbInfoHandle` need to be released manually). Prefer using the tracer as a context manager (i.e., with a :code:`with`-block) instead of manually calling this method. ''' if self.handle is not None: self.nsdk.tracer_end(self.handle) self.handle = None
Ends the tracer. May be called in any state. Transitions the state to ended and releases any SDK resources owned by this tracer (this includes only internal resources, things like passed-in :class:`oneagent.common.DbInfoHandle` need to be released manually). Prefer using the tracer as a context manager (i.e., with a :code:`with`-block) instead of manually calling this method.
entailment
def mark_failed(self, clsname, msg): '''Marks the tracer as failed with the given exception class name :code:`clsname` and message :code:`msg`. May only be called in the started state and only if the tracer is not already marked as failed. Note that this does not end the tracer! Once a tracer is marked as failed, attempts to do it again are forbidden. If possible, using the tracer as a context manager (i.e., with a :code:`with`-block) or :meth:`.mark_failed_exc` is more convenient than this method. :param str clsname: Fully qualified name of the exception type that caused the failure. :param str msg: Exception message that caused the failure. ''' self.nsdk.tracer_error(self.handle, clsname, msg)
Marks the tracer as failed with the given exception class name :code:`clsname` and message :code:`msg`. May only be called in the started state and only if the tracer is not already marked as failed. Note that this does not end the tracer! Once a tracer is marked as failed, attempts to do it again are forbidden. If possible, using the tracer as a context manager (i.e., with a :code:`with`-block) or :meth:`.mark_failed_exc` is more convenient than this method. :param str clsname: Fully qualified name of the exception type that caused the failure. :param str msg: Exception message that caused the failure.
entailment
def mark_failed_exc(self, e_val=None, e_ty=None): '''Marks the tracer as failed with the given exception :code:`e_val` of type :code:`e_ty` (defaults to the current exception). May only be called in the started state and only if the tracer is not already marked as failed. Note that this does not end the tracer! Once a tracer is marked as failed, attempts to do it again are forbidden. If possible, using the tracer as a context manager (i.e., with a :code:`with`-block) is more convenient than this method. If :code:`e_val` and :code:`e_ty` are both none, the current exception (as retured by :func:`sys.exc_info`) is used. :param BaseException e_val: The exception object that caused the failure. If :code:`None`, the current exception value (:code:`sys.exc_info()[1]`) is used. :param type e_ty: The type of the exception that caused the failure. If :code:`None` the type of :code:`e_val` is used. If that is also :code:`None`, the current exception type (:code:`sys.exc_info()[0]`) is used. ''' _error_from_exc(self.nsdk, self.handle, e_val, e_ty)
Marks the tracer as failed with the given exception :code:`e_val` of type :code:`e_ty` (defaults to the current exception). May only be called in the started state and only if the tracer is not already marked as failed. Note that this does not end the tracer! Once a tracer is marked as failed, attempts to do it again are forbidden. If possible, using the tracer as a context manager (i.e., with a :code:`with`-block) is more convenient than this method. If :code:`e_val` and :code:`e_ty` are both none, the current exception (as retured by :func:`sys.exc_info`) is used. :param BaseException e_val: The exception object that caused the failure. If :code:`None`, the current exception value (:code:`sys.exc_info()[1]`) is used. :param type e_ty: The type of the exception that caused the failure. If :code:`None` the type of :code:`e_val` is used. If that is also :code:`None`, the current exception type (:code:`sys.exc_info()[0]`) is used.
entailment
def _get_kvc(kv_arg): '''Returns a tuple keys, values, count for kv_arg (which can be a dict or a tuple containing keys, values and optinally count.''' if isinstance(kv_arg, Mapping): return six.iterkeys(kv_arg), six.itervalues(kv_arg), len(kv_arg) assert 2 <= len(kv_arg) <= 3, \ 'Argument must be a mapping or a sequence (keys, values, [len])' return ( kv_arg[0], kv_arg[1], kv_arg[2] if len(kv_arg) == 3 else len(kv_arg[0]))
Returns a tuple keys, values, count for kv_arg (which can be a dict or a tuple containing keys, values and optinally count.
entailment
def create_database_info( self, name, vendor, channel): '''Creates a database info with the given information for use with :meth:`trace_sql_database_request`. :param str name: The name (e.g., connection string) of the database. :param str vendor: The type of the database (e.g., sqlite, PostgreSQL, MySQL). :param Channel channel: The channel used to communicate with the database. :returns: A new handle, holding the given database information. :rtype: DbInfoHandle ''' return DbInfoHandle(self._nsdk, self._nsdk.databaseinfo_create( name, vendor, channel.type_, channel.endpoint))
Creates a database info with the given information for use with :meth:`trace_sql_database_request`. :param str name: The name (e.g., connection string) of the database. :param str vendor: The type of the database (e.g., sqlite, PostgreSQL, MySQL). :param Channel channel: The channel used to communicate with the database. :returns: A new handle, holding the given database information. :rtype: DbInfoHandle
entailment
def create_web_application_info( self, virtual_host, application_id, context_root): '''Creates a web application info for use with :meth:`trace_incoming_web_request`. See <https://www.dynatrace.com/support/help/server-side-services/introduction/how-does-dynatrace-detect-and-name-services/#web-request-services> for more information about the meaning of the parameters. :param str virtual_host: The logical name of the web server that hosts the application. :param str application_id: A unique ID for the web application. This will also be used as the display name. :param str context_root: The context root of the web application. This is the common path prefix for requests which will be routed to the web application. If all requests to this server are routed to this application, use a slash :code:`'/'`. :rtype: WebapplicationInfoHandle ''' return WebapplicationInfoHandle( self._nsdk, self._nsdk.webapplicationinfo_create( virtual_host, application_id, context_root))
Creates a web application info for use with :meth:`trace_incoming_web_request`. See <https://www.dynatrace.com/support/help/server-side-services/introduction/how-does-dynatrace-detect-and-name-services/#web-request-services> for more information about the meaning of the parameters. :param str virtual_host: The logical name of the web server that hosts the application. :param str application_id: A unique ID for the web application. This will also be used as the display name. :param str context_root: The context root of the web application. This is the common path prefix for requests which will be routed to the web application. If all requests to this server are routed to this application, use a slash :code:`'/'`. :rtype: WebapplicationInfoHandle
entailment
def trace_sql_database_request(self, database, sql): '''Create a tracer for the given database info and SQL statement. :param DbInfoHandle database: Database information (see :meth:`create_database_info`). :param str sql: The SQL statement to trace. :rtype: tracers.DatabaseRequestTracer ''' assert isinstance(database, DbInfoHandle) return tracers.DatabaseRequestTracer( self._nsdk, self._nsdk.databaserequesttracer_create_sql(database.handle, sql))
Create a tracer for the given database info and SQL statement. :param DbInfoHandle database: Database information (see :meth:`create_database_info`). :param str sql: The SQL statement to trace. :rtype: tracers.DatabaseRequestTracer
entailment
def trace_incoming_web_request( self, webapp_info, url, method, headers=None, remote_address=None, str_tag=None, byte_tag=None): '''Create a tracer for an incoming webrequest. :param WebapplicationInfoHandle webapp_info: Web application information (see :meth:`create_web_application_info`). :param str url: The requested URL (including scheme, hostname/port, path and query). :param str method: The HTTP method of the request (e.g., GET or POST). :param headers: The HTTP headers of the request. Can be either a dictionary mapping header name to value (:class:`str` to :class:`str`) or a tuple containing a sequence of string header names as first element, an equally long sequence of corresponding values as second element and optionally a count as third element (this will default to the :func:`len` of the header names). Some headers can appear multiple times in an HTTP request. To capture all the values, either use the tuple-form and provide the name and corresponding values for each, or if possible for that particular header, set the value to an appropriately concatenated string. .. warning:: If you use Python 2, be sure to use the UTF-8 encoding or the :class:`unicode` type! See :ref:`here <http-encoding-warning>` for more information. :type headers: \ dict[str, str] or \ tuple[~typing.Collection[str], ~typing.Collection[str]] or \ tuple[~typing.Iterable[str], ~typing.Iterable[str], int]] :param str remote_address: The remote (client) IP address (of the peer of the socket connection via which the request was received). The remote address is useful to gain information about load balancers, proxies and ultimately the end user that is sending the request. For the other parameters, see :ref:`tagging`. :rtype: tracers.IncomingWebRequestTracer ''' assert isinstance(webapp_info, WebapplicationInfoHandle) result = tracers.IncomingWebRequestTracer( self._nsdk, self._nsdk.incomingwebrequesttracer_create( webapp_info.handle, url, method)) if not result: return result try: if headers: self._nsdk.incomingwebrequesttracer_add_request_headers( result.handle, *_get_kvc(headers)) if remote_address: self._nsdk.incomingwebrequesttracer_set_remote_address( result.handle, remote_address) self._applytag(result, str_tag, byte_tag) except: result.end() raise return result
Create a tracer for an incoming webrequest. :param WebapplicationInfoHandle webapp_info: Web application information (see :meth:`create_web_application_info`). :param str url: The requested URL (including scheme, hostname/port, path and query). :param str method: The HTTP method of the request (e.g., GET or POST). :param headers: The HTTP headers of the request. Can be either a dictionary mapping header name to value (:class:`str` to :class:`str`) or a tuple containing a sequence of string header names as first element, an equally long sequence of corresponding values as second element and optionally a count as third element (this will default to the :func:`len` of the header names). Some headers can appear multiple times in an HTTP request. To capture all the values, either use the tuple-form and provide the name and corresponding values for each, or if possible for that particular header, set the value to an appropriately concatenated string. .. warning:: If you use Python 2, be sure to use the UTF-8 encoding or the :class:`unicode` type! See :ref:`here <http-encoding-warning>` for more information. :type headers: \ dict[str, str] or \ tuple[~typing.Collection[str], ~typing.Collection[str]] or \ tuple[~typing.Iterable[str], ~typing.Iterable[str], int]] :param str remote_address: The remote (client) IP address (of the peer of the socket connection via which the request was received). The remote address is useful to gain information about load balancers, proxies and ultimately the end user that is sending the request. For the other parameters, see :ref:`tagging`. :rtype: tracers.IncomingWebRequestTracer
entailment
def trace_outgoing_web_request(self, url, method, headers=None): '''Create a tracer for an outgoing webrequest. :param str url: The request URL (including scheme, hostname/port, path and query). :param str method: The HTTP method of the request (e.g., GET or POST). :param headers: The HTTP headers of the request. Can be either a dictionary mapping header name to value (:class:`str` to :class:`str`) or a tuple containing a sequence of string header names as first element, an equally long sequence of corresponding values as second element and optionally a count as third element (this will default to the :func:`len` of the header names). Some headers can appear multiple times in an HTTP request. To capture all the values, either use the tuple-form and provide the name and corresponding values for each, or if possible for that particular header, set the value to an appropriately concatenated string. .. warning:: If you use Python 2, be sure to use the UTF-8 encoding or the :class:`unicode` type! See :ref:`here <http-encoding-warning>` for more information. :type headers: \ dict[str, str] or \ tuple[~typing.Collection[str], ~typing.Collection[str]] or \ tuple[~typing.Iterable[str], ~typing.Iterable[str], int]] :rtype: tracers.OutgoingWebRequestTracer .. versionadded:: 1.1.0 ''' result = tracers.OutgoingWebRequestTracer( self._nsdk, self._nsdk.outgoingwebrequesttracer_create(url, method)) if not result: return result try: if headers: self._nsdk.outgoingwebrequesttracer_add_request_headers(result.handle, *_get_kvc(headers)) except: result.end() raise return result
Create a tracer for an outgoing webrequest. :param str url: The request URL (including scheme, hostname/port, path and query). :param str method: The HTTP method of the request (e.g., GET or POST). :param headers: The HTTP headers of the request. Can be either a dictionary mapping header name to value (:class:`str` to :class:`str`) or a tuple containing a sequence of string header names as first element, an equally long sequence of corresponding values as second element and optionally a count as third element (this will default to the :func:`len` of the header names). Some headers can appear multiple times in an HTTP request. To capture all the values, either use the tuple-form and provide the name and corresponding values for each, or if possible for that particular header, set the value to an appropriately concatenated string. .. warning:: If you use Python 2, be sure to use the UTF-8 encoding or the :class:`unicode` type! See :ref:`here <http-encoding-warning>` for more information. :type headers: \ dict[str, str] or \ tuple[~typing.Collection[str], ~typing.Collection[str]] or \ tuple[~typing.Iterable[str], ~typing.Iterable[str], int]] :rtype: tracers.OutgoingWebRequestTracer .. versionadded:: 1.1.0
entailment
def trace_outgoing_remote_call( self, method, service, endpoint, channel, protocol_name=None): '''Creates a tracer for outgoing remote calls. :param str method: The name of the service method/operation. :param str service: The name of the service class/type. :param str endpoint: A string identifying the "instance" of the the service. See also `the general documentation on service endpoints`__. :param Channel channel: The channel used to communicate with the service. :param str protocol_name: The name of the remoting protocol (on top of the communication protocol specified in :code:`channel.type_`.) that is used to to communicate with the service (e.g., RMI, Protobuf, ...). __ \ https://github.com/Dynatrace/OneAgent-SDK#common-concepts-service-endpoints-and-communication-endpoints :rtype: tracers.OutgoingRemoteCallTracer ''' result = tracers.OutgoingRemoteCallTracer( self._nsdk, self._nsdk.outgoingremotecalltracer_create( method, service, endpoint, channel.type_, channel.endpoint)) if protocol_name is not None: self._nsdk.outgoingremotecalltracer_set_protocol_name( result.handle, protocol_name) return result
Creates a tracer for outgoing remote calls. :param str method: The name of the service method/operation. :param str service: The name of the service class/type. :param str endpoint: A string identifying the "instance" of the the service. See also `the general documentation on service endpoints`__. :param Channel channel: The channel used to communicate with the service. :param str protocol_name: The name of the remoting protocol (on top of the communication protocol specified in :code:`channel.type_`.) that is used to to communicate with the service (e.g., RMI, Protobuf, ...). __ \ https://github.com/Dynatrace/OneAgent-SDK#common-concepts-service-endpoints-and-communication-endpoints :rtype: tracers.OutgoingRemoteCallTracer
entailment
def trace_incoming_remote_call( self, method, name, endpoint, protocol_name=None, str_tag=None, byte_tag=None): '''Creates a tracer for incoming remote calls. For the parameters, see :ref:`tagging` (:code:`str_tag` and :code:`byte_tag`) and :meth:`trace_outgoing_remote_call` (all others). :rtype: tracers.IncomingRemoteCallTracer ''' result = tracers.IncomingRemoteCallTracer( self._nsdk, self._nsdk.incomingremotecalltracer_create(method, name, endpoint)) if protocol_name is not None: self._nsdk.incomingremotecalltracer_set_protocol_name( result.handle, protocol_name) self._applytag(result, str_tag, byte_tag) return result
Creates a tracer for incoming remote calls. For the parameters, see :ref:`tagging` (:code:`str_tag` and :code:`byte_tag`) and :meth:`trace_outgoing_remote_call` (all others). :rtype: tracers.IncomingRemoteCallTracer
entailment
def trace_in_process_link(self, link_bytes): '''Creates a tracer for tracing asynchronous related processing in the same process. For more information see :meth:`create_in_process_link`. :param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`. :rtype: tracers.InProcessLinkTracer .. versionadded:: 1.1.0 ''' return tracers.InProcessLinkTracer(self._nsdk, self._nsdk.trace_in_process_link(link_bytes))
Creates a tracer for tracing asynchronous related processing in the same process. For more information see :meth:`create_in_process_link`. :param bytes link_bytes: An in-process link created using :meth:`create_in_process_link`. :rtype: tracers.InProcessLinkTracer .. versionadded:: 1.1.0
entailment
def add_custom_request_attribute(self, key, value): '''Adds a custom request attribute to the current active tracer. :param str key: The name of the custom request attribute, the name is mandatory and may not be None. :param value: The value of the custom request attribute. Currently supported types are integer, float and string values. The value is mandatory and may not be None. :type value: str or int or float .. versionadded:: 1.1.0 ''' if isinstance(value, int): self._nsdk.customrequestattribute_add_integer(key, value) elif isinstance(value, float): self._nsdk.customrequestattribute_add_float(key, value) elif isinstance(value, six.string_types): self._nsdk.customrequestattribute_add_string(key, value) else: warn = self._nsdk.agent_get_logging_callback() if warn: warn('Can\'t add custom request attribute \'{0}\' ' 'because the value type \'{1}\' is not supported!'.format(key, type(value)))
Adds a custom request attribute to the current active tracer. :param str key: The name of the custom request attribute, the name is mandatory and may not be None. :param value: The value of the custom request attribute. Currently supported types are integer, float and string values. The value is mandatory and may not be None. :type value: str or int or float .. versionadded:: 1.1.0
entailment
def close(self): '''Closes the handle, if it is still open. Usually, you should prefer using the handle as a context manager to calling :meth:`close` manually.''' if self.handle is not None: self.close_handle(self.nsdk, self.handle) self.handle = None
Closes the handle, if it is still open. Usually, you should prefer using the handle as a context manager to calling :meth:`close` manually.
entailment
def all_original_children(self): '''Yields all (direct and indirect) children with LINK_CHILD.''' return chain.from_iterable( c.all_nodes_in_subtree() for lnk, c in self.children if lnk == self.LINK_CHILD)
Yields all (direct and indirect) children with LINK_CHILD.
entailment
def sdkopts_from_commandline(argv=None, remove=False, prefix='--dt_'): '''Creates a SDK option list for use with the :code:`sdkopts` parameter of :func:`.initialize` from a list :code:`argv` of command line parameters. An element in :code:`argv` is treated as an SDK option if starts with :code:`prefix`. The return value of this function will then contain the remainder of that parameter (without the prefix). If :code:`remove` is :data:`True`, these arguments will be removed from :code:`argv`. :param argv: An iterable of command line parameter strings. Defaults to :data:`sys.argv`. Must be a :obj:`~typing.MutableSequence` if :code:`remove` is :data:`True`. :type argv: ~typing.Iterable[str] or ~typing.MutableSequence[str] :param bool remove: Whether to remove a command line parameter that was recognized as an SDK option from :code:`argv` (if :data:`True`) or leave :code:`argv` unmodified (if :data:`False`). If :data:`True`, :code:`argv` must be a :obj:`~typing.MutableSequence`. :param str prefix: The prefix string by which SDK options are recognized and which is removed from the copy of the command line parameter that is added to the return value. :rtype: list[str] ''' if argv is None: argv = sys.argv if not remove: return [param[len(prefix):] for param in argv if param.startswith(prefix)] result = [] for i in range(len(argv) - 1, -1, -1): if argv[i].startswith(prefix): result.append(argv[i][len(prefix):]) del argv[i] result.reverse() return result
Creates a SDK option list for use with the :code:`sdkopts` parameter of :func:`.initialize` from a list :code:`argv` of command line parameters. An element in :code:`argv` is treated as an SDK option if starts with :code:`prefix`. The return value of this function will then contain the remainder of that parameter (without the prefix). If :code:`remove` is :data:`True`, these arguments will be removed from :code:`argv`. :param argv: An iterable of command line parameter strings. Defaults to :data:`sys.argv`. Must be a :obj:`~typing.MutableSequence` if :code:`remove` is :data:`True`. :type argv: ~typing.Iterable[str] or ~typing.MutableSequence[str] :param bool remove: Whether to remove a command line parameter that was recognized as an SDK option from :code:`argv` (if :data:`True`) or leave :code:`argv` unmodified (if :data:`False`). If :data:`True`, :code:`argv` must be a :obj:`~typing.MutableSequence`. :param str prefix: The prefix string by which SDK options are recognized and which is removed from the copy of the command line parameter that is added to the return value. :rtype: list[str]
entailment
def initialize(sdkopts=(), sdklibname=None): '''Attempts to initialize the SDK with the specified options. Even if initialization fails, a dummy SDK will be available so that SDK functions can be called but will do nothing. If you call this function multiple times, you must call :func:`shutdown` just as many times. The options from all but the first :code:`initialize` call will be ignored (the return value will have the :data:`InitResult.STATUS_ALREADY_INITIALIZED` status code in that case). :param sdkopts: A sequence of strings of the form :samp:`{NAME}={VALUE}` that set the given SDK options. Igored in all but the first :code:`initialize` call. :type sdkopts: ~typing.Iterable[str] :param str sdklibname: The file or directory name of the native C SDK DLL. If None, the shared library packaged directly with the agent is used. Using a value other than None is only acceptable for debugging. You are responsible for providing a native SDK version that matches the Python SDK version. :rtype: .InitResult ''' global _sdk_ref_count #pylint:disable=global-statement global _sdk_instance #pylint:disable=global-statement with _sdk_ref_lk: logger.debug("initialize: ref count = %d", _sdk_ref_count) result = _try_init_noref(sdkopts, sdklibname) if _sdk_instance is None: _sdk_instance = SDK(try_get_sdk()) _sdk_ref_count += 1 return result
Attempts to initialize the SDK with the specified options. Even if initialization fails, a dummy SDK will be available so that SDK functions can be called but will do nothing. If you call this function multiple times, you must call :func:`shutdown` just as many times. The options from all but the first :code:`initialize` call will be ignored (the return value will have the :data:`InitResult.STATUS_ALREADY_INITIALIZED` status code in that case). :param sdkopts: A sequence of strings of the form :samp:`{NAME}={VALUE}` that set the given SDK options. Igored in all but the first :code:`initialize` call. :type sdkopts: ~typing.Iterable[str] :param str sdklibname: The file or directory name of the native C SDK DLL. If None, the shared library packaged directly with the agent is used. Using a value other than None is only acceptable for debugging. You are responsible for providing a native SDK version that matches the Python SDK version. :rtype: .InitResult
entailment
def shutdown(): '''Shut down the SDK. :returns: An exception object if an error occurred, a falsy value otherwise. :rtype: Exception ''' global _sdk_ref_count #pylint:disable=global-statement global _sdk_instance #pylint:disable=global-statement global _should_shutdown #pylint:disable=global-statement with _sdk_ref_lk: logger.debug("shutdown: ref count = %d, should_shutdown = %s", \ _sdk_ref_count, _should_shutdown) nsdk = nativeagent.try_get_sdk() if not nsdk: logger.warning('shutdown: SDK not initialized or already shut down') _sdk_ref_count = 0 return None if _sdk_ref_count > 1: logger.debug('shutdown: reference count is now %d', _sdk_ref_count) _sdk_ref_count -= 1 return None logger.info('shutdown: Shutting down SDK.') try: if _should_shutdown: _rc = nsdk.shutdown() if _rc == ErrorCode.NOT_INITIALIZED: logger.warning('shutdown: native SDK was not initialized') else: nativeagent.checkresult(nsdk, _rc, 'shutdown') _should_shutdown = False except SDKError as e: logger.warning('shutdown failed', exc_info=sys.exc_info()) return e _sdk_ref_count = 0 _sdk_instance = None nativeagent._force_initialize(None) #pylint:disable=protected-access logger.debug('shutdown: completed') return None
Shut down the SDK. :returns: An exception object if an error occurred, a falsy value otherwise. :rtype: Exception
entailment
def error_from_exc(nsdk, tracer_h, e_val=None, e_ty=None): """Attach appropriate error information to tracer_h. If e_val and e_ty are None, the current exception is used.""" if not tracer_h: return if e_ty is None and e_val is None: e_ty, e_val = sys.exc_info()[:2] if e_ty is None and e_val is not None: e_ty = type(e_val) nsdk.tracer_error(tracer_h, getfullname(e_ty), str(e_val))
Attach appropriate error information to tracer_h. If e_val and e_ty are None, the current exception is used.
entailment
def waitinglist_entry_form(context): """ Get a (new) form object to post a new comment. Syntax:: {% waitinglist_entry_form as [varname] %} """ initial = {} if "request" in context: initial.update({ "referrer": context["request"].META.get("HTTP_REFERER", ""), "campaign": context["request"].GET.get("wlc", "") }) return WaitingListEntryForm(initial=initial)
Get a (new) form object to post a new comment. Syntax:: {% waitinglist_entry_form as [varname] %}
entailment
def _host(): """Get the Host from the most recent HTTP request.""" host_and_port = request.urlparts[1] try: host, _ = host_and_port.split(':') except ValueError: # No port yet. Host defaults to '127.0.0.1' in bottle.request. return DEFAULT_BIND return host or DEFAULT_BIND
Get the Host from the most recent HTTP request.
entailment
def wait_for(port_num, timeout): """waits while process starts. Args: port_num - port number timeout - specify how long, in seconds, a command can take before times out. return True if process started, return False if not """ logger.debug("wait for {port_num}".format(**locals())) t_start = time.time() sleeps = 0.1 while time.time() - t_start < timeout: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((_host(), port_num)) return True except (IOError, socket.error): time.sleep(sleeps) finally: s.close() return False
waits while process starts. Args: port_num - port number timeout - specify how long, in seconds, a command can take before times out. return True if process started, return False if not
entailment
def repair_mongo(name, dbpath): """repair mongodb after usafe shutdown""" log_file = os.path.join(dbpath, 'mongod.log') cmd = [name, "--dbpath", dbpath, "--logpath", log_file, "--logappend", "--repair"] proc = subprocess.Popen( cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) timeout = 45 t_start = time.time() while time.time() - t_start < timeout: line = str(proc.stdout.readline()) logger.info("repair output: %s" % (line,)) return_code = proc.poll() if return_code is not None: if return_code: raise Exception("mongod --repair failed with exit code %s, " "check log file: %s" % (return_code, log_file)) # Success when poll() returns 0 return time.sleep(1) proc.terminate() raise Exception("mongod --repair failed to exit after %s seconds, " "check log file: %s" % (timeout, log_file))
repair mongodb after usafe shutdown
entailment
def mprocess(name, config_path, port=None, timeout=180, silence_stdout=True): """start 'name' process with params from config_path. Args: name - process name or path config_path - path to file where should be stored configuration port - process's port timeout - specify how long, in seconds, a command can take before times out. if timeout <=0 - doesn't wait for complete start process silence_stdout - if True (default), redirect stdout to /dev/null return tuple (Popen object, host) if process started, return (None, None) if not """ logger.debug( "mprocess(name={name!r}, config_path={config_path!r}, port={port!r}, " "timeout={timeout!r})".format(**locals())) if not (config_path and isinstance(config_path, str) and os.path.exists(config_path)): raise OSError("can't find config file {config_path}".format(**locals())) cfg = read_config(config_path) cmd = [name, "--config", config_path] if cfg.get('port', None) is None or port: port = port or PortPool().port(check=True) cmd.extend(['--port', str(port)]) host = "{host}:{port}".format(host=_host(), port=port) try: logger.debug("execute process: %s", ' '.join(cmd)) proc = subprocess.Popen( cmd, stdout=DEVNULL if silence_stdout else None, stderr=subprocess.STDOUT) if proc.poll() is not None: logger.debug("process is not alive") raise OSError("Process started, but died immediately.") except (OSError, TypeError) as err: message = "exception while executing process: {err}".format(err=err) logger.debug(message) raise OSError(message) if timeout > 0 and wait_for(port, timeout): logger.debug("process '{name}' has started: pid={proc.pid}, host={host}".format(**locals())) return (proc, host) elif timeout > 0: logger.debug("hasn't connected to pid={proc.pid} with host={host} during timeout {timeout} ".format(**locals())) logger.debug("terminate process with pid={proc.pid}".format(**locals())) kill_mprocess(proc) proc_alive(proc) and time.sleep(3) # wait while process stoped message = ("Could not connect to process during " "{timeout} seconds".format(timeout=timeout)) raise TimeoutError(message, errno.ETIMEDOUT) return (proc, host)
start 'name' process with params from config_path. Args: name - process name or path config_path - path to file where should be stored configuration port - process's port timeout - specify how long, in seconds, a command can take before times out. if timeout <=0 - doesn't wait for complete start process silence_stdout - if True (default), redirect stdout to /dev/null return tuple (Popen object, host) if process started, return (None, None) if not
entailment
def wait_mprocess(process, timeout): """Compatibility function for waiting on a process with a timeout. Raises TimeoutError when the timeout is reached. """ if PY3: try: return process.wait(timeout=timeout) except subprocess.TimeoutExpired as exc: raise TimeoutError(str(exc)) # On Python 2, simulate the timeout parameter and raise TimeoutError. start = time.time() while True: exit_code = process.poll() if exit_code is not None: return exit_code if time.time() - start > timeout: raise TimeoutError("Process %s timed out after %s seconds" % (process.pid, timeout)) time.sleep(0.05)
Compatibility function for waiting on a process with a timeout. Raises TimeoutError when the timeout is reached.
entailment
def kill_mprocess(process): """kill process Args: process - Popen object for process """ if process and proc_alive(process): process.terminate() process.communicate() return not proc_alive(process)
kill process Args: process - Popen object for process
entailment
def cleanup_mprocess(config_path, cfg): """remove all process's stuff Args: config_path - process's options file cfg - process's config """ for key in ('keyFile', 'logPath', 'dbpath'): remove_path(cfg.get(key, None)) isinstance(config_path, str) and os.path.exists(config_path) and remove_path(config_path)
remove all process's stuff Args: config_path - process's options file cfg - process's config
entailment
def remove_path(path): """remove path from file system If path is None - do nothing""" if path is None or not os.path.exists(path): return if platform.system() == 'Windows': # Need to have write permission before deleting the file. os.chmod(path, stat.S_IWRITE) try: if os.path.isdir(path): shutil.rmtree(path) elif os.path.isfile(path): shutil.os.remove(path) except OSError: logger.exception("Could not remove path: %s" % path)
remove path from file system If path is None - do nothing
entailment
def write_config(params, config_path=None): """write mongo*'s config file Args: params - options wich file contains config_path - path to the config_file, will create if None Return config_path where config_path - path to mongo*'s options file """ if config_path is None: config_path = tempfile.mktemp(prefix="mongo-") cfg = params.copy() if 'setParameter' in cfg: set_parameters = cfg.pop('setParameter') try: for key, value in set_parameters.items(): cfg['setParameter = ' + key] = value except AttributeError: reraise(RequestError, 'Not a valid value for setParameter: %r ' 'Expected "setParameter": {<param name> : value, ...}' % set_parameters) # fix boolean value for key, value in cfg.items(): if isinstance(value, bool): cfg[key] = json.dumps(value) with open(config_path, 'w') as fd: data = '\n'.join('%s=%s' % (key, item) for key, item in cfg.items()) fd.write(data) return config_path
write mongo*'s config file Args: params - options wich file contains config_path - path to the config_file, will create if None Return config_path where config_path - path to mongo*'s options file
entailment
def read_config(config_path): """read config_path and return options as dictionary""" result = {} with open(config_path, 'r') as fd: for line in fd.readlines(): if '=' in line: key, value = line.split('=', 1) try: result[key] = json.loads(value) except ValueError: result[key] = value.rstrip('\n') return result
read config_path and return options as dictionary
entailment
def __check_port(self, port): """check port status return True if port is free, False else """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((_host(), port)) return True except socket.error: return False finally: s.close()
check port status return True if port is free, False else
entailment
def release_port(self, port): """release port""" if port in self.__closed: self.__closed.remove(port) self.__ports.add(port)
release port
entailment
def port(self, check=False): """return next opened port Args: check - check is port realy free """ if not self.__ports: # refresh ports if sequence is empty self.refresh() try: port = self.__ports.pop() if check: while not self.__check_port(port): self.release_port(port) port = self.__ports.pop() except (IndexError, KeyError): raise IndexError("Could not find a free port,\nclosed ports: {closed}".format(closed=self.__closed)) self.__closed.add(port) return port
return next opened port Args: check - check is port realy free
entailment
def refresh(self, only_closed=False): """refresh ports status Args: only_closed - check status only for closed ports """ if only_closed: opened = filter(self.__check_port, self.__closed) self.__closed = self.__closed.difference(opened) self.__ports = self.__ports.union(opened) else: ports = self.__closed.union(self.__ports) self.__ports = set(filter(self.__check_port, ports)) self.__closed = ports.difference(self.__ports)
refresh ports status Args: only_closed - check status only for closed ports
entailment
def change_range(self, min_port=1025, max_port=2000, port_sequence=None): """change Pool port range""" self.__init_range(min_port, max_port, port_sequence)
change Pool port range
entailment
def setup_versioned_routes(routes, version=None): """Set up routes with a version prefix.""" prefix = '/' + version if version else "" for r in routes: path, method = r route(prefix + path, method, routes[r])
Set up routes with a version prefix.
entailment
def daemonize_posix(self): """ do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ logger.info('daemonize_posix') try: pid = os.fork() if pid > 0: logger.debug('forked first child, pid = %d' % (pid,)) return pid logger.debug('in child after first fork, pid = %d' % (pid, )) except OSError as error: logger.exception('fork #1') sys.stderr.write("fork #1 failed: %d (%s)\n" % (error.errno, error.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent logger.debug('forked second child, pid = %d, exiting' % (pid,)) sys.exit(0) except OSError as error: logger.exception('fork #2') sys.stderr.write("fork #2 failed: %d (%s)\n" % (error.errno, error.strerror)) sys.exit(1) # redirect standard file descriptors logger.info('daemonized, pid = %d' % (pid, )) sys.stdin.flush() sys.stdout.flush() sys.stderr.flush() os.dup2(self.stdin.fileno(), sys.stdin.fileno()) os.dup2(self.stdout.fileno(), sys.stdout.fileno()) os.dup2(self.stderr.fileno(), sys.stderr.fileno()) # write pidfile atexit.register(self.delpid) pid = str(os.getpid()) with open(self.pidfile, 'w+') as fd: fd.write("%s\n" % pid)
do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
entailment
def start(self): """ Start the daemon """ # Check for a pidfile to see if the daemon already runs logger.info('Starting daemon') try: with open(self.pidfile, 'r') as fd: pid = int(fd.read().strip()) except IOError: pid = None if pid: message = "pidfile %s already exist. Daemon already running?\n" sys.stderr.write(message % self.pidfile) sys.exit(1) # Start the daemon pid = self.daemonize() if pid: return pid self.run()
Start the daemon
entailment
def stop(self): """ Stop the daemon """ # Get the pid from the pidfile logger.debug("reading %s" % (self.pidfile,)) try: with open(self.pidfile, 'r') as fd: pid = int(fd.read().strip()) except IOError: logger.exception("reading %s" % (self.pidfile, )) pid = None if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % self.pidfile) return # not an error in a restart if os.name == "nt": subprocess.call(["taskkill", "/f", "/t", "/pid", str(pid)]) if os.path.exists(self.pidfile): os.remove(self.pidfile) else: # Try killing the daemon process try: os.kill(pid, SIGTERM) except OSError as err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: raise
Stop the daemon
entailment
def cleanup(self): """remove all members without reconfig""" for item in self.server_map: self.member_del(item, reconfig=False) self.server_map.clear()
remove all members without reconfig
entailment
def host2id(self, hostname): """return member id by hostname""" for key, value in self.server_map.items(): if value == hostname: return key
return member id by hostname
entailment
def update_server_map(self, config): """update server_map ({member_id:hostname})""" self.server_map = dict([(member['_id'], member['host']) for member in config['members']])
update server_map ({member_id:hostname})
entailment
def repl_init(self, config): """create replica set by config return True if replica set created successfuly, else False""" self.update_server_map(config) # init_server - server which can init replica set init_server = [member['host'] for member in config['members'] if not (member.get('arbiterOnly', False) or member.get('priority', 1) == 0)][0] servers = [member['host'] for member in config['members']] if not self.wait_while_reachable(servers): logger.error("all servers must be reachable") self.cleanup() return False try: result = self.connection(init_server).admin.command("replSetInitiate", config) logger.debug("replica init result: {result}".format(**locals())) except pymongo.errors.PyMongoError: raise if int(result.get('ok', 0)) == 1: # Wait while members come up return self.waiting_member_state() else: self.cleanup() return False
create replica set by config return True if replica set created successfuly, else False
entailment
def reset(self): """Ensure all members are running and available.""" # Need to use self.server_map, in case no Servers are left running. for member_id in self.server_map: host = self.member_id_to_host(member_id) server_id = self._servers.host_to_server_id(host) # Reset each member. self._servers.command(server_id, 'reset') # Wait for all members to have a state of 1, 2, or 7. # Note that this also waits for a primary to become available. self.waiting_member_state() # Wait for Server states to match the config from the primary. self.waiting_config_state() return self.info()
Ensure all members are running and available.
entailment
def repl_update(self, config): """Reconfig Replicaset with new config""" cfg = config.copy() cfg['version'] += 1 try: result = self.run_command("replSetReconfig", cfg) if int(result.get('ok', 0)) != 1: return False except pymongo.errors.AutoReconnect: self.update_server_map(cfg) # use new server_map self.waiting_member_state() self.waiting_config_state() return self.connection() and True
Reconfig Replicaset with new config
entailment
def info(self): """return information about replica set""" hosts = ','.join(x['host'] for x in self.members()) mongodb_uri = 'mongodb://' + hosts + '/?replicaSet=' + self.repl_id result = {"id": self.repl_id, "auth_key": self.auth_key, "members": self.members(), "mongodb_uri": mongodb_uri, "orchestration": 'replica_sets'} if self.login: # Add replicaSet URI parameter. uri = ('%s&replicaSet=%s' % (self.mongodb_auth_uri(hosts), self.repl_id)) result['mongodb_auth_uri'] = uri return result
return information about replica set
entailment
def repl_member_add(self, params): """create new mongod instances and add it to the replica set. Args: params - mongod params return True if operation success otherwise False """ repl_config = self.config member_id = max([member['_id'] for member in repl_config['members']]) + 1 member_config = self.member_create(params, member_id) repl_config['members'].append(member_config) if not self.repl_update(repl_config): self.member_del(member_id, reconfig=True) raise ReplicaSetError("Could not add member to ReplicaSet.") return member_id
create new mongod instances and add it to the replica set. Args: params - mongod params return True if operation success otherwise False
entailment
def run_command(self, command, arg=None, is_eval=False, member_id=None): """run command on replica set if member_id is specified command will be execute on this server if member_id is not specified command will be execute on the primary Args: command - command string arg - command argument is_eval - if True execute command as eval member_id - member id return command's result """ logger.debug("run_command({command}, {arg}, {is_eval}, {member_id})".format(**locals())) mode = is_eval and 'eval' or 'command' hostname = None if isinstance(member_id, int): hostname = self.member_id_to_host(member_id) result = getattr(self.connection(hostname=hostname).admin, mode)(command, arg) logger.debug("command result: {result}".format(result=result)) return result
run command on replica set if member_id is specified command will be execute on this server if member_id is not specified command will be execute on the primary Args: command - command string arg - command argument is_eval - if True execute command as eval member_id - member id return command's result
entailment
def config(self): """return replica set config, use rs.conf() command""" try: admin = self.connection().admin config = admin.command('replSetGetConfig')['config'] except pymongo.errors.OperationFailure: # replSetGetConfig was introduced in 2.7.5. config = self.connection().local.system.replset.find_one() return config
return replica set config, use rs.conf() command
entailment
def member_create(self, params, member_id): """start new mongod instances as part of replica set Args: params - member params member_id - member index return member config """ member_config = params.get('rsParams', {}) server_id = params.pop('server_id', None) version = params.pop('version', self._version) proc_params = {'replSet': self.repl_id} proc_params.update(params.get('procParams', {})) if self.enable_ipv6: enable_ipv6_single(proc_params) # Make sure that auth isn't set the first time we start the servers. proc_params = self._strip_auth(proc_params) # Don't pass in auth_key the first time we start the servers. server_id = self._servers.create( name='mongod', procParams=proc_params, sslParams=self.sslParams, version=version, server_id=server_id ) member_config.update({"_id": member_id, "host": self._servers.hostname(server_id)}) return member_config
start new mongod instances as part of replica set Args: params - member params member_id - member index return member config
entailment
def member_del(self, member_id, reconfig=True): """remove member from replica set Args: member_id - member index reconfig - is need reconfig replica return True if operation success otherwise False """ server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) if reconfig and member_id in [member['_id'] for member in self.members()]: config = self.config config['members'].pop(member_id) self.repl_update(config) self._servers.remove(server_id) return True
remove member from replica set Args: member_id - member index reconfig - is need reconfig replica return True if operation success otherwise False
entailment
def member_update(self, member_id, params): """update member's values with reconfig replica Args: member_id - member index params - updates member params return True if operation success otherwise False """ config = self.config config['members'][member_id].update(params.get("rsParams", {})) return self.repl_update(config)
update member's values with reconfig replica Args: member_id - member index params - updates member params return True if operation success otherwise False
entailment
def member_info(self, member_id): """return information about member""" server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) server_info = self._servers.info(server_id) result = {'_id': member_id, 'server_id': server_id, 'mongodb_uri': server_info['mongodb_uri'], 'procInfo': server_info['procInfo'], 'statuses': server_info['statuses']} if self.login: result['mongodb_auth_uri'] = self.mongodb_auth_uri( self._servers.hostname(server_id)) result['rsInfo'] = {} if server_info['procInfo']['alive']: # Can't call serverStatus on arbiter when running with auth enabled. # (SERVER-5479) if self.login or self.auth_key: arbiter_ids = [member['_id'] for member in self.arbiters()] if member_id in arbiter_ids: result['rsInfo'] = { 'arbiterOnly': True, 'secondary': False, 'primary': False} return result repl = self.run_command('serverStatus', arg=None, is_eval=False, member_id=member_id)['repl'] logger.debug("member {member_id} repl info: {repl}".format(**locals())) for key in ('votes', 'tags', 'arbiterOnly', 'buildIndexes', 'hidden', 'priority', 'slaveDelay', 'votes', 'secondary'): if key in repl: result['rsInfo'][key] = repl[key] result['rsInfo']['primary'] = repl.get('ismaster', False) return result
return information about member
entailment
def member_command(self, member_id, command): """apply command (start/stop/restart) to member instance of replica set Args: member_id - member index command - string command (start/stop/restart) return True if operation success otherwise False """ server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) return self._servers.command(server_id, command)
apply command (start/stop/restart) to member instance of replica set Args: member_id - member index command - string command (start/stop/restart) return True if operation success otherwise False
entailment
def members(self): """return list of members information""" result = list() for member in self.run_command(command="replSetGetStatus", is_eval=False)['members']: result.append({ "_id": member['_id'], "host": member["name"], "server_id": self._servers.host_to_server_id(member["name"]), "state": member['state'] }) return result
return list of members information
entailment