signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def worker_teardown(self, worker_ctx):
Called after a service worker has executed a task. Dependencies should do any post-processing here, raising exceptions in the event of failure. Example: a database session dependency may commit the session :Parameters: worker_ctx : WorkerContext See ``nameko.containers.ServiceContainer.spawn_worker``
f7192:c2:m4
def wait_for_providers(self):
if self._providers_registered:<EOL><INDENT>_log.debug('<STR_LIT>', self)<EOL>self._last_provider_unregistered.wait()<EOL>_log.debug('<STR_LIT>', self)<EOL><DEDENT>
Wait for any providers registered with the collector to have unregistered. Returns immediately if no providers were ever registered.
f7192:c3:m3
def stop(self):
self.wait_for_providers()<EOL>
Default `:meth:Extension.stop()` implementation for subclasses using `ProviderCollector` as a mixin.
f7192:c3:m4
def __init__(<EOL>self, expected_exceptions=(), sensitive_arguments=(), **kwargs<EOL>):
<EOL>sensitive_variables = kwargs.pop('<STR_LIT>', ())<EOL>if sensitive_variables:<EOL><INDENT>sensitive_arguments = sensitive_variables<EOL>warnings.warn(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>", DeprecationWarning)<EOL><DEDENT>self.expected_exceptions = expected_exceptions<EOL>self.sensitive_arguments = sensitive_arguments<EOL>super(Entrypoint, self).__init__(**kwargs)<EOL>
:Parameters: expected_exceptions : exception class or tuple of exception classes Specify exceptions that may be caused by the caller (e.g. by providing bad arguments). Saved on the entrypoint instance as ``entrypoint.expected_exceptions`` for later inspection by other extensions, for example a monitoring system. sensitive_arguments : string or tuple of strings Mark an argument or part of an argument as sensitive. Saved on the entrypoint instance as ``entrypoint.sensitive_arguments`` for later inspection by other extensions, for example a logging system. :seealso: :func:`nameko.utils.get_redacted_args`
f7192:c4:m0
def bind(self, container, method_name):
instance = super(Entrypoint, self).bind(container)<EOL>instance.method_name = method_name<EOL>return instance<EOL>
Get an instance of this Entrypoint to bind to `container` with `method_name`.
f7192:c4:m1
def wait(self):
<EOL>if self.exception:<EOL><INDENT>raise self.exception<EOL><DEDENT>if self.queue_consumer.stopped:<EOL><INDENT>raise RuntimeError(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>if self.queue_consumer.connection.connected is False:<EOL><INDENT>raise RuntimeError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>try:<EOL><INDENT>self.queue_consumer.get_message(self.correlation_id)<EOL><DEDENT>except socket.error as exc:<EOL><INDENT>self.exception = exc<EOL><DEDENT>if self.exception:<EOL><INDENT>raise self.exception<EOL><DEDENT>return self.body<EOL>
Makes a blocking call to its queue_consumer until the message with the given correlation_id has been processed. By the time the blocking call exits, self.send() will have been called with the body of the received message (see :meth:`~nameko.rpc.ReplyListener.handle_message`). Exceptions are raised directly.
f7193:c0:m3
def __getitem__(self, name):
return getattr(self, name)<EOL>
Enable dict-like access on the proxy.
f7193:c5:m2
def get_event_exchange(service_name):
exchange_name = "<STR_LIT>".format(service_name)<EOL>exchange = Exchange(<EOL>exchange_name, type='<STR_LIT>', durable=True, delivery_mode=PERSISTENT<EOL>)<EOL>return exchange<EOL>
Get an exchange for ``service_name`` events.
f7195:m0
def event_dispatcher(nameko_config, **kwargs):
amqp_uri = nameko_config[AMQP_URI_CONFIG_KEY]<EOL>serializer, _ = serialization.setup(nameko_config)<EOL>serializer = kwargs.pop('<STR_LIT>', serializer)<EOL>ssl = nameko_config.get(AMQP_SSL_CONFIG_KEY)<EOL>publisher = Publisher(amqp_uri, serializer=serializer, ssl=ssl, **kwargs)<EOL>def dispatch(service_name, event_type, event_data):<EOL><INDENT>"""<STR_LIT>"""<EOL>exchange = get_event_exchange(service_name)<EOL>publisher.publish(<EOL>event_data,<EOL>exchange=exchange,<EOL>routing_key=event_type<EOL>)<EOL><DEDENT>return dispatch<EOL>
Return a function that dispatches nameko events.
f7195:m1
def __init__(self, interval, eager=False, **kwargs):
self.interval = interval<EOL>self.eager = eager<EOL>self.should_stop = Event()<EOL>self.worker_complete = Event()<EOL>self.gt = None<EOL>super(Timer, self).__init__(**kwargs)<EOL>
Timer entrypoint. Fires every `interval` seconds or as soon as the previous worker completes if that took longer. The default behaviour is to wait `interval` seconds before firing for the first time. If you want the entrypoint to fire as soon as the service starts, pass `eager=True`. Example:: timer = Timer.decorator class Service(object): name = "service" @timer(interval=5) def tick(self): pass
f7196:c0:m0
def _run(self):
def get_next_interval():<EOL><INDENT>start_time = time.time()<EOL>start = <NUM_LIT:0> if self.eager else <NUM_LIT:1><EOL>for count in itertools.count(start=start):<EOL><INDENT>yield max(start_time + count * self.interval - time.time(), <NUM_LIT:0>)<EOL><DEDENT><DEDENT>interval = get_next_interval()<EOL>sleep_time = next(interval)<EOL>while True:<EOL><INDENT>with Timeout(sleep_time, exception=False):<EOL><INDENT>self.should_stop.wait()<EOL>break<EOL><DEDENT>self.handle_timer_tick()<EOL>self.worker_complete.wait()<EOL>self.worker_complete.reset()<EOL>sleep_time = next(interval)<EOL><DEDENT>
Runs the interval loop.
f7196:c0:m4
def __init__(self, exchange=None, queue=None, declare=None, **options):
self.exchange = exchange<EOL>self.options = options<EOL>self.declare = declare[:] if declare is not None else []<EOL>if self.exchange:<EOL><INDENT>self.declare.append(self.exchange)<EOL><DEDENT>if queue is not None:<EOL><INDENT>warnings.warn(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>",<EOL>DeprecationWarning<EOL>)<EOL>if exchange is None:<EOL><INDENT>self.exchange = queue.exchange<EOL><DEDENT>self.declare.append(queue)<EOL><DEDENT>compat_attrs = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>for compat_attr in compat_attrs:<EOL><INDENT>if hasattr(self, compat_attr):<EOL><INDENT>warnings.warn(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(compat_attr), DeprecationWarning<EOL>)<EOL>self.options[compat_attr] = getattr(self, compat_attr)<EOL><DEDENT><DEDENT>
Provides an AMQP message publisher method via dependency injection. In AMQP, messages are published to *exchanges* and routed to bound *queues*. This dependency accepts the `exchange` to publish to and will ensure that it is declared before publishing. Optionally, you may use the `declare` keyword argument to pass a list of other :class:`kombu.Exchange` or :class:`kombu.Queue` objects to declare before publishing. :Parameters: exchange : :class:`kombu.Exchange` Destination exchange queue : :class:`kombu.Queue` **Deprecated**: Bound queue. The event will be published to this queue's exchange. declare : list List of :class:`kombu.Exchange` or :class:`kombu.Queue` objects to declare before publishing. If `exchange` is not provided, the message will be published to the default exchange. Example:: class Foobar(object): publish = Publisher(exchange=...) def spam(self, data): self.publish('spam:' + data)
f7197:c2:m0
@property<EOL><INDENT>def serializer(self):<DEDENT>
return self.container.serializer<EOL>
Default serializer to use when publishing messages. Must be registered as a `kombu serializer <http://bit.do/kombu_serialization>`_.
f7197:c2:m2
def stop(self):
if not self._consumers_ready.ready():<EOL><INDENT>_log.debug('<STR_LIT>', self)<EOL>stop_exc = QueueConsumerStopped()<EOL>self._gt.kill(stop_exc)<EOL><DEDENT>self.wait_for_providers()<EOL>try:<EOL><INDENT>_log.debug('<STR_LIT>', self)<EOL>self._gt.wait()<EOL><DEDENT>except QueueConsumerStopped:<EOL><INDENT>pass<EOL><DEDENT>super(QueueConsumer, self).stop()<EOL>_log.debug('<STR_LIT>', self)<EOL>
Stop the queue-consumer gracefully. Wait until the last provider has been unregistered and for the ConsumerMixin's greenthread to exit (i.e. until all pending messages have been acked or requeued and all consumers stopped).
f7197:c3:m6
def kill(self):
<EOL>if self._gt is not None and not self._gt.dead:<EOL><INDENT>self._providers = set()<EOL>self._pending_remove_providers = {}<EOL>self.should_stop = True<EOL>try:<EOL><INDENT>self._gt.wait()<EOL><DEDENT>except Exception as exc:<EOL><INDENT>_log.warn(<EOL>'<STR_LIT>', self, exc)<EOL><DEDENT>super(QueueConsumer, self).kill()<EOL>_log.debug('<STR_LIT>', self)<EOL><DEDENT>
Kill the queue-consumer. Unlike `stop()` any pending message ack or requeue-requests, requests to remove providers, etc are lost and the consume thread is asked to terminate as soon as possible.
f7197:c3:m7
@property<EOL><INDENT>def connection(self):<DEDENT>
heartbeat = self.container.config.get(<EOL>HEARTBEAT_CONFIG_KEY, DEFAULT_HEARTBEAT<EOL>)<EOL>transport_options = self.container.config.get(<EOL>TRANSPORT_OPTIONS_CONFIG_KEY, DEFAULT_TRANSPORT_OPTIONS<EOL>)<EOL>ssl = self.container.config.get(AMQP_SSL_CONFIG_KEY)<EOL>conn = Connection(self.amqp_uri,<EOL>transport_options=transport_options,<EOL>heartbeat=heartbeat,<EOL>ssl=ssl<EOL>)<EOL>return conn<EOL>
Provide the connection parameters for kombu's ConsumerMixin. The `Connection` object is a declaration of connection parameters that is lazily evaluated. It doesn't represent an established connection to the broker at this point.
f7197:c3:m12
def get_consumers(self, consumer_cls, channel):
_log.debug('<STR_LIT>', self)<EOL>for provider in self._providers:<EOL><INDENT>callbacks = [partial(self.handle_message, provider)]<EOL>consumer = consumer_cls(<EOL>queues=[provider.queue],<EOL>callbacks=callbacks,<EOL>accept=self.accept<EOL>)<EOL>consumer.qos(prefetch_count=self.prefetch_count)<EOL>self._consumers[provider] = consumer<EOL><DEDENT>return self._consumers.values()<EOL>
Kombu callback to set up consumers. Called after any (re)connection to the broker.
f7197:c3:m14
def on_iteration(self):
self._cancel_consumers_if_requested()<EOL>if len(self._consumers) == <NUM_LIT:0>:<EOL><INDENT>_log.debug('<STR_LIT>')<EOL>self.should_stop = True<EOL><DEDENT>
Kombu callback for each `drain_events` loop iteration.
f7197:c3:m15
def on_consume_ready(self, connection, channel, consumers, **kwargs):
if not self._consumers_ready.ready():<EOL><INDENT>_log.debug('<STR_LIT>', self)<EOL>self._consumers_ready.send(None)<EOL><DEDENT>
Kombu callback when consumers are ready to accept messages. Called after any (re)connection to the broker.
f7197:c3:m17
def __init__(self, queue, requeue_on_error=False, **kwargs):
self.queue = queue<EOL>self.requeue_on_error = requeue_on_error<EOL>super(Consumer, self).__init__(**kwargs)<EOL>
Decorates a method as a message consumer. Messages from the queue will be deserialized depending on their content type and passed to the the decorated method. When the consumer method returns without raising any exceptions, the message will automatically be acknowledged. If any exceptions are raised during the consumption and `requeue_on_error` is True, the message will be requeued. If `requeue_on_error` is true, handlers will return the event to the queue if an error occurs while handling it. Defaults to false. Example:: @consume(...) def handle_message(self, body): if not self.spam(body): raise Exception('message will be requeued') self.shrub(body) Args: queue: The queue to consume from.
f7197:c4:m0
def stop(self):
if not self._providers_registered:<EOL><INDENT>self.queue_consumer.unregister_provider(self)<EOL>self._unregistered_from_queue_consumer.send(True)<EOL><DEDENT>
Stop the RpcConsumer. The RpcConsumer ordinary unregisters from the QueueConsumer when the last Rpc subclass unregisters from it. If no providers were registered, we should unregister from the QueueConsumer as soon as we're asked to stop.
f7198:c0:m2
def unregister_provider(self, provider):
self._unregistering_providers.add(provider)<EOL>remaining_providers = self._providers - self._unregistering_providers<EOL>if not remaining_providers:<EOL><INDENT>_log.debug('<STR_LIT>', self)<EOL>self.queue_consumer.unregister_provider(self)<EOL>_log.debug('<STR_LIT>', self)<EOL>self._unregistered_from_queue_consumer.send(True)<EOL><DEDENT>_log.debug('<STR_LIT>', self)<EOL>self._unregistered_from_queue_consumer.wait()<EOL>super(RpcConsumer, self).unregister_provider(provider)<EOL>
Unregister a provider. Blocks until this RpcConsumer is unregistered from its QueueConsumer, which only happens when all providers have asked to unregister.
f7198:c0:m3
def __init__(<EOL>self, worker_ctx, service_name, method_name, reply_listener, **options<EOL>):
self.worker_ctx = worker_ctx<EOL>self.service_name = service_name<EOL>self.method_name = method_name<EOL>self.reply_listener = reply_listener<EOL>compat_attrs = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>for compat_attr in compat_attrs:<EOL><INDENT>if hasattr(self, compat_attr):<EOL><INDENT>warnings.warn(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(compat_attr), DeprecationWarning<EOL>)<EOL>options[compat_attr] = getattr(self, compat_attr)<EOL><DEDENT><DEDENT>serializer = options.pop('<STR_LIT>', self.serializer)<EOL>self.publisher = self.publisher_cls(<EOL>self.amqp_uri, serializer=serializer, ssl=self.ssl, **options<EOL>)<EOL>
Note that mechanism which raises :class:`UnknownService` exceptions relies on publish confirms being enabled in the proxy.
f7198:c7:m0
@property<EOL><INDENT>def serializer(self):<DEDENT>
return self.container.config.get(<EOL>SERIALIZER_CONFIG_KEY, DEFAULT_SERIALIZER<EOL>)<EOL>
Default serializer to use when publishing message payloads. Must be registered as a `kombu serializer <http://bit.do/kombu_serialization>`_.
f7198:c7:m5
def publish(self, payload, **kwargs):
publish_kwargs = self.publish_kwargs.copy()<EOL>headers = publish_kwargs.pop('<STR_LIT>', {}).copy()<EOL>headers.update(kwargs.pop('<STR_LIT>', {}))<EOL>headers.update(kwargs.pop('<STR_LIT>', {}))<EOL>use_confirms = kwargs.pop('<STR_LIT>', self.use_confirms)<EOL>transport_options = kwargs.pop('<STR_LIT>',<EOL>self.transport_options<EOL>)<EOL>transport_options['<STR_LIT>'] = use_confirms<EOL>delivery_mode = kwargs.pop('<STR_LIT>', self.delivery_mode)<EOL>mandatory = kwargs.pop('<STR_LIT>', self.mandatory)<EOL>priority = kwargs.pop('<STR_LIT>', self.priority)<EOL>expiration = kwargs.pop('<STR_LIT>', self.expiration)<EOL>serializer = kwargs.pop('<STR_LIT>', self.serializer)<EOL>compression = kwargs.pop('<STR_LIT>', self.compression)<EOL>retry = kwargs.pop('<STR_LIT>', self.retry)<EOL>retry_policy = kwargs.pop('<STR_LIT>', self.retry_policy)<EOL>declare = self.declare[:]<EOL>declare.extend(kwargs.pop('<STR_LIT>', ()))<EOL>publish_kwargs.update(kwargs) <EOL>with get_producer(self.amqp_uri,<EOL>use_confirms,<EOL>self.ssl,<EOL>transport_options,<EOL>) as producer:<EOL><INDENT>try:<EOL><INDENT>producer.publish(<EOL>payload,<EOL>headers=headers,<EOL>delivery_mode=delivery_mode,<EOL>mandatory=mandatory,<EOL>priority=priority,<EOL>expiration=expiration,<EOL>compression=compression,<EOL>declare=declare,<EOL>retry=retry,<EOL>retry_policy=retry_policy,<EOL>serializer=serializer,<EOL>**publish_kwargs<EOL>)<EOL><DEDENT>except ChannelError as exc:<EOL><INDENT>if "<STR_LIT>" in str(exc):<EOL><INDENT>raise UndeliverableMessage()<EOL><DEDENT>raise<EOL><DEDENT>if mandatory:<EOL><INDENT>if not use_confirms:<EOL><INDENT>warnings.warn(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT><DEDENT><DEDENT>
Publish a message.
f7202:c1:m1
@contextmanager<EOL>def entrypoint_hook(container, method_name, context_data=None, timeout=<NUM_LIT:30>):
entrypoint = get_extension(container, Entrypoint, method_name=method_name)<EOL>if entrypoint is None:<EOL><INDENT>raise ExtensionNotFound(<EOL>"<STR_LIT>".format(<EOL>method_name, container))<EOL><DEDENT>def hook(*args, **kwargs):<EOL><INDENT>hook_result = event.Event()<EOL>def wait_for_entrypoint():<EOL><INDENT>try:<EOL><INDENT>with entrypoint_waiter(<EOL>container, method_name,<EOL>timeout=timeout<EOL>) as waiter_result:<EOL><INDENT>container.spawn_worker(<EOL>entrypoint, args, kwargs,<EOL>context_data=context_data<EOL>)<EOL><DEDENT>hook_result.send(waiter_result.get())<EOL><DEDENT>except Exception as exc:<EOL><INDENT>hook_result.send_exception(exc)<EOL><DEDENT><DEDENT>def wait_for_container():<EOL><INDENT>try:<EOL><INDENT>container.wait()<EOL><DEDENT>except Exception as exc:<EOL><INDENT>if not hook_result.ready():<EOL><INDENT>hook_result.send_exception(exc)<EOL><DEDENT><DEDENT><DEDENT>eventlet.spawn_n(wait_for_entrypoint)<EOL>eventlet.spawn_n(wait_for_container)<EOL>return hook_result.wait()<EOL><DEDENT>yield hook<EOL>
Yield a function providing an entrypoint into a hosted service. The yielded function may be called as if it were the bare method defined in the service class. Intended to be used as an integration testing utility. :Parameters: container : ServiceContainer The container hosting the service owning the entrypoint method_name : str The name of the entrypoint decorated method on the service class context_data : dict Context data to provide for the call, e.g. a language, auth token or session. timeout : int Maximum seconds to wait **Usage** To verify that `ServiceX` and `ServiceY` are compatible, make an integration test that checks their interaction: .. literalinclude:: ../examples/testing/integration_x_y_test.py
f7206:m0
@contextmanager<EOL>def entrypoint_waiter(container, method_name, timeout=<NUM_LIT:30>, callback=None):
if not get_extension(container, Entrypoint, method_name=method_name):<EOL><INDENT>raise RuntimeError("<STR_LIT>".format(<EOL>container.service_name, method_name))<EOL><DEDENT>class Result(WaitResult):<EOL><INDENT>worker_ctx = None<EOL>def send(self, worker_ctx, result, exc_info):<EOL><INDENT>self.worker_ctx = worker_ctx<EOL>super(Result, self).send(result, exc_info)<EOL><DEDENT><DEDENT>waiter_callback = callback<EOL>waiter_result = Result()<EOL>def on_worker_result(worker_ctx, result, exc_info):<EOL><INDENT>complete = False<EOL>if worker_ctx.entrypoint.method_name == method_name:<EOL><INDENT>if not callable(waiter_callback):<EOL><INDENT>complete = True<EOL><DEDENT>else:<EOL><INDENT>complete = waiter_callback(worker_ctx, result, exc_info)<EOL><DEDENT><DEDENT>if complete:<EOL><INDENT>waiter_result.send(worker_ctx, result, exc_info)<EOL><DEDENT>return complete<EOL><DEDENT>def on_worker_teardown(worker_ctx):<EOL><INDENT>if waiter_result.worker_ctx is worker_ctx:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL><DEDENT>exc = entrypoint_waiter.Timeout(<EOL>"<STR_LIT>".format(<EOL>container.service_name, method_name, timeout)<EOL>)<EOL>with eventlet.Timeout(timeout, exception=exc):<EOL><INDENT>with wait_for_call(<EOL>container, '<STR_LIT>',<EOL>lambda args, kwargs, res, exc: on_worker_teardown(*args)<EOL>):<EOL><INDENT>with wait_for_call(<EOL>container, '<STR_LIT>',<EOL>lambda args, kwargs, res, exc: on_worker_result(*args)<EOL>):<EOL><INDENT>yield waiter_result<EOL><DEDENT><DEDENT><DEDENT>
Context manager that waits until an entrypoint has fired, and the generated worker has exited and been torn down. It yields a :class:`nameko.testing.waiting.WaitResult` object that can be used to get the result returned (exception raised) by the entrypoint after the waiter has exited. :Parameters: container : ServiceContainer The container hosting the service owning the entrypoint method_name : str The name of the entrypoint decorated method on the service class timeout : int Maximum seconds to wait callback : callable Function to conditionally control whether the entrypoint_waiter should exit for a particular invocation The `timeout` argument specifies the maximum number of seconds the `entrypoint_waiter` should wait before exiting. It can be disabled by passing `None`. The default is 30 seconds. Optionally allows a `callback` to be provided which is invoked whenever the entrypoint fires. If provided, the callback must return `True` for the `entrypoint_waiter` to exit. The signature for the callback function is:: def callback(worker_ctx, result, exc_info): pass Where there parameters are as follows: worker_ctx (WorkerContext): WorkerContext of the entrypoint call. result (object): The return value of the entrypoint. exc_info (tuple): Tuple as returned by `sys.exc_info` if the entrypoint raised an exception, otherwise `None`. **Usage** :: class Service(object): name = "service" @event_handler('srcservice', 'eventtype') def handle_event(self, msg): return msg container = ServiceContainer(Service, config) container.start() # basic with entrypoint_waiter(container, 'handle_event'): ... # action that dispatches event # giving access to the result with entrypoint_waiter(container, 'handle_event') as result: ... # action that dispatches event res = result.get() # with custom timeout with entrypoint_waiter(container, 'handle_event', timeout=5): ... # action that dispatches event # with callback that waits until entrypoint stops raising def callback(worker_ctx, result, exc_info): if exc_info is None: return True with entrypoint_waiter(container, 'handle_event', callback=callback): ... # action that dispatches event
f7206:m1
def worker_factory(service_cls, **dependencies):
service = service_cls()<EOL>for name, attr in inspect.getmembers(service_cls):<EOL><INDENT>if isinstance(attr, DependencyProvider):<EOL><INDENT>try:<EOL><INDENT>dependency = dependencies.pop(name)<EOL><DEDENT>except KeyError:<EOL><INDENT>dependency = MagicMock()<EOL><DEDENT>setattr(service, name, dependency)<EOL><DEDENT><DEDENT>if dependencies:<EOL><INDENT>raise ExtensionNotFound(<EOL>"<STR_LIT>".format(<EOL>dependencies.keys(), service_cls))<EOL><DEDENT>return service<EOL>
Return an instance of ``service_cls`` with its injected dependencies replaced with :class:`~mock.MagicMock` objects, or as given in ``dependencies``. **Usage** The following example service proxies calls to a "maths" service via an ``RpcProxy`` dependency:: from nameko.rpc import RpcProxy, rpc class ConversionService(object): name = "conversions" maths_rpc = RpcProxy("maths") @rpc def inches_to_cm(self, inches): return self.maths_rpc.multiply(inches, 2.54) @rpc def cm_to_inches(self, cms): return self.maths_rpc.divide(cms, 2.54) Use the ``worker_factory`` to create an instance of ``ConversionService`` with its dependencies replaced by MagicMock objects:: service = worker_factory(ConversionService) Nameko's entrypoints do not modify the service methods, so instance methods can be called directly with the same signature. The replaced dependencies can be used as any other MagicMock object, so a complete unit test for the conversion service may look like this:: # create worker instance service = worker_factory(ConversionService) # replace "maths" service service.maths_rpc.multiply.side_effect = lambda x, y: x * y service.maths_rpc.divide.side_effect = lambda x, y: x / y # test inches_to_cm business logic assert service.inches_to_cm(300) == 762 service.maths_rpc.multiply.assert_called_once_with(300, 2.54) # test cms_to_inches business logic assert service.cms_to_inches(762) == 300 service.maths_rpc.divide.assert_called_once_with(762, 2.54) *Providing Dependencies* The ``**dependencies`` kwargs to ``worker_factory`` can be used to provide a replacement dependency instead of a mock. For example, to unit test a service against a real database: .. literalinclude:: ../examples/testing/alternative_dependency_unit_test.py If a named dependency provider does not exist on ``service_cls``, a ``ExtensionNotFound`` exception is raised.
f7206:m2
def replace_dependencies(container, *dependencies, **dependency_map):
if set(dependencies).intersection(dependency_map):<EOL><INDENT>raise RuntimeError(<EOL>"<STR_LIT>")<EOL><DEDENT>arg_replacements = OrderedDict((dep, MagicMock()) for dep in dependencies)<EOL>dependency_map.update(arg_replacements)<EOL>_replace_dependencies(container, **dependency_map)<EOL>res = (replacement for replacement in arg_replacements.values())<EOL>if len(arg_replacements) == <NUM_LIT:1>:<EOL><INDENT>return next(res)<EOL><DEDENT>return res<EOL>
Replace the dependency providers on ``container`` with instances of :class:`MockDependencyProvider`. Dependencies named in *dependencies will be replaced with a :class:`MockDependencyProvider`, which injects a MagicMock instead of the dependency. Alternatively, you may use keyword arguments to name a dependency and provide the replacement value that the `MockDependencyProvider` should inject. Return the :attr:`MockDependencyProvider.dependency` for every dependency specified in the (*dependencies) args so that calls to the replaced dependencies can be inspected. Return a single object if only one dependency was replaced, and a generator yielding the replacements in the same order as ``dependencies`` otherwise. Note that any replaced dependencies specified via kwargs `**dependency_map` will not be returned. Replacements are made on the container instance and have no effect on the service class. New container instances are therefore unaffected by replacements on previous instances. **Usage** :: from nameko.rpc import RpcProxy, rpc from nameko.standalone.rpc import ServiceRpcProxy class ConversionService(object): name = "conversions" maths_rpc = RpcProxy("maths") @rpc def inches_to_cm(self, inches): return self.maths_rpc.multiply(inches, 2.54) @rpc def cm_to_inches(self, cms): return self.maths_rpc.divide(cms, 2.54) container = ServiceContainer(ConversionService, config) mock_maths_rpc = replace_dependencies(container, "maths_rpc") mock_maths_rpc.divide.return_value = 39.37 container.start() with ServiceRpcProxy('conversions', config) as proxy: proxy.cm_to_inches(100) # assert that the dependency was called as expected mock_maths_rpc.divide.assert_called_once_with(100, 2.54) Providing a specific replacement by keyword: :: class StubMaths(object): def divide(self, val1, val2): return val1 / val2 replace_dependencies(container, maths_rpc=StubMaths()) container.start() with ServiceRpcProxy('conversions', config) as proxy: assert proxy.cm_to_inches(127) == 50.0
f7206:m4
def restrict_entrypoints(container, *entrypoints):
if container.started:<EOL><INDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>entrypoint_deps = list(container.entrypoints)<EOL>entrypoint_names = {ext.method_name for ext in entrypoint_deps}<EOL>missing = set(entrypoints) - entrypoint_names<EOL>if missing:<EOL><INDENT>raise ExtensionNotFound("<STR_LIT>".format(<EOL>missing, container))<EOL><DEDENT>for entrypoint in entrypoint_deps:<EOL><INDENT>if entrypoint.method_name not in entrypoints:<EOL><INDENT>container.entrypoints.remove(entrypoint)<EOL><DEDENT><DEDENT>
Restrict the entrypoints on ``container`` to those named in ``entrypoints``. This method must be called before the container is started. **Usage** The following service definition has two entrypoints: .. code-block:: python class Service(object): name = "service" @timer(interval=1) def foo(self, arg): pass @rpc def bar(self, arg) pass @rpc def baz(self, arg): pass container = ServiceContainer(Service, config) To disable the timer entrypoint on ``foo``, leaving just the RPC entrypoints: .. code-block:: python restrict_entrypoints(container, "bar", "baz") Note that it is not possible to identify multiple entrypoints on the same method individually.
f7206:m5
def get_extension(container, extension_cls, **match_attrs):
for ext in container.extensions:<EOL><INDENT>if isinstance(ext, extension_cls):<EOL><INDENT>if not match_attrs:<EOL><INDENT>return ext<EOL><DEDENT>def has_attribute(name, value):<EOL><INDENT>return getattr(ext, name) == value<EOL><DEDENT>if all([has_attribute(name, value)<EOL>for name, value in match_attrs.items()]):<EOL><INDENT>return ext<EOL><DEDENT><DEDENT><DEDENT>
Inspect ``container.extensions`` and return the first item that is an instance of ``extension_cls``. Optionally also require that the instance has an attribute with a particular value as given in the ``match_attrs`` kwargs.
f7207:m0
def get_container(runner, service_cls):
for container in runner.containers:<EOL><INDENT>if container.service_cls == service_cls:<EOL><INDENT>return container<EOL><DEDENT><DEDENT>
Inspect ``runner.containers`` and return the first item that is hosting an instance of ``service_cls``.
f7207:m1
@contextmanager<EOL>def wait_for_call(timeout, mock_method):
with eventlet.Timeout(timeout):<EOL><INDENT>while not mock_method.called:<EOL><INDENT>eventlet.sleep()<EOL><DEDENT><DEDENT>yield mock_method<EOL>
Return a context manager that waits ``timeout`` seconds for ``mock_method`` to be called, yielding the mock if so. Raises an :class:`eventlet.Timeout` if the method was not called within ``timeout`` seconds.
f7207:m2
def wait_for_worker_idle(container, timeout=<NUM_LIT:10>):
warnings.warn(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>", DeprecationWarning<EOL>)<EOL>with eventlet.Timeout(timeout):<EOL><INDENT>container._worker_pool.waitall()<EOL><DEDENT>
Blocks until ``container`` has no running workers. Raises an :class:`eventlet.Timeout` if the method was not called within ``timeout`` seconds.
f7207:m3
def assert_stops_raising(fn, exception_type=Exception, timeout=<NUM_LIT:10>,<EOL>interval=<NUM_LIT:0.1>):
with eventlet.Timeout(timeout):<EOL><INDENT>while True:<EOL><INDENT>try:<EOL><INDENT>fn()<EOL><DEDENT>except exception_type:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT>eventlet.sleep(interval)<EOL><DEDENT><DEDENT>
Assert that ``fn`` returns successfully within ``timeout`` seconds, trying every ``interval`` seconds. If ``exception_type`` is provided, fail unless the exception thrown is an instance of ``exception_type``. If not specified, any `:class:`Exception` instance is allowed.
f7207:m4
def get_redacted_args(entrypoint, *args, **kwargs):
sensitive_arguments = entrypoint.sensitive_arguments<EOL>if isinstance(sensitive_arguments, six.string_types):<EOL><INDENT>sensitive_arguments = (sensitive_arguments,)<EOL><DEDENT>method = getattr(entrypoint.container.service_cls, entrypoint.method_name)<EOL>callargs = inspect.getcallargs(method, None, *args, **kwargs)<EOL>del callargs['<STR_LIT>']<EOL>callargs = deepcopy(callargs)<EOL>def redact(data, keys):<EOL><INDENT>key = keys[<NUM_LIT:0>]<EOL>if len(keys) == <NUM_LIT:1>:<EOL><INDENT>try:<EOL><INDENT>data[key] = REDACTED<EOL><DEDENT>except (KeyError, IndexError, TypeError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if key in data:<EOL><INDENT>redact(data[key], keys[<NUM_LIT:1>:])<EOL><DEDENT><DEDENT><DEDENT>for variable in sensitive_arguments:<EOL><INDENT>keys = []<EOL>for dict_key, list_index in re.findall(r"<STR_LIT>", variable):<EOL><INDENT>if dict_key:<EOL><INDENT>keys.append(dict_key)<EOL><DEDENT>elif list_index:<EOL><INDENT>keys.append(int(list_index))<EOL><DEDENT><DEDENT>if keys[<NUM_LIT:0>] in callargs:<EOL><INDENT>redact(callargs, keys)<EOL><DEDENT><DEDENT>return callargs<EOL>
Utility function for use with entrypoints that are marked with ``sensitive_arguments`` -- e.g. :class:`nameko.rpc.Rpc` and :class:`nameko.events.EventHandler`. :Parameters: entrypoint : :class:`~nameko.extensions.Entrypoint` The entrypoint that fired. args : tuple Positional arguments for the method call. kwargs : dict Keyword arguments for the method call. The entrypoint should have a ``sensitive_arguments`` attribute, the value of which is a string or tuple of strings specifying the arguments or partial arguments that should be redacted. To partially redact an argument, the following syntax is used:: <argument-name>.<dict-key>[<list-index>] :Returns: A dictionary as returned by :func:`inspect.getcallargs`, but with sensitive arguments or partial arguments redacted. .. note:: This function does not raise if one of the ``sensitive_arguments`` doesn't match or partially match the calling ``args`` and ``kwargs``. This allows "fuzzier" pattern matching (e.g. redact a field if it is present, and otherwise do nothing). To avoid exposing sensitive arguments through a typo, it is recommend to test the configuration of each entrypoint with ``sensitive_arguments`` individually. For example: .. code-block:: python class Service(object): @rpc(sensitive_arguments="foo.bar") def method(self, foo): pass container = ServiceContainer(Service, {}) entrypoint = get_extension(container, Rpc, method_name="method") # no redaction foo = "arg" expected_foo = {'foo': "arg"} assert get_redacted_args(entrypoint, foo) == expected # 'bar' key redacted foo = {'bar': "secret value", 'baz': "normal value"} expected = {'foo': {'bar': "********", 'baz': "normal value"}} assert get_redacted_args(entrypoint, foo) == expected .. seealso:: The tests for this utility demonstrate its full usage: :class:`test.test_utils.TestGetRedactedArgs`
f7211:m0
def import_from_path(path):
if path is None:<EOL><INDENT>return<EOL><DEDENT>obj = locate(path)<EOL>if obj is None:<EOL><INDENT>raise ImportError(<EOL>"<STR_LIT>".format(path)<EOL>)<EOL><DEDENT>return obj<EOL>
Import and return the object at `path` if it exists. Raises an :exc:`ImportError` if the object is not found.
f7211:m1
def sanitize_url(url):
parts = urlparse(url)<EOL>if parts.password is None:<EOL><INDENT>return url<EOL><DEDENT>host_info = parts.netloc.rsplit('<STR_LIT:@>', <NUM_LIT:1>)[-<NUM_LIT:1>]<EOL>parts = parts._replace(netloc='<STR_LIT>'.format(<EOL>parts.username, REDACTED, host_info))<EOL>return parts.geturl()<EOL>
Redact password in urls.
f7211:m2
def fail_fast_imap(pool, call, items):
result_queue = LightQueue(maxsize=len(items))<EOL>spawned_threads = set()<EOL>def handle_result(finished_thread):<EOL><INDENT>try:<EOL><INDENT>thread_result = finished_thread.wait()<EOL>spawned_threads.remove(finished_thread)<EOL>result_queue.put((thread_result, None))<EOL><DEDENT>except Exception:<EOL><INDENT>spawned_threads.remove(finished_thread)<EOL>result_queue.put((None, sys.exc_info()))<EOL><DEDENT><DEDENT>for item in items:<EOL><INDENT>gt = pool.spawn(call, item)<EOL>spawned_threads.add(gt)<EOL>gt.link(handle_result)<EOL><DEDENT>while spawned_threads:<EOL><INDENT>result, exc_info = result_queue.get()<EOL>if exc_info is not None:<EOL><INDENT>for ongoing_thread in spawned_threads:<EOL><INDENT>ongoing_thread.kill()<EOL><DEDENT>eventlet.getcurrent().throw(*exc_info)<EOL><DEDENT>yield result<EOL><DEDENT>
Run a function against each item in a given list, yielding each function result in turn, where the function call is handled in a :class:`~eventlet.greenthread.GreenThread` spawned by the provided pool. If any function raises an exception, all other ongoing threads are killed, and the exception is raised to the caller. This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`. :param pool: Pool to spawn function threads from :type pool: eventlet.greenpool.GreenPool :param call: Function call to make, expecting to receive an item from the given list
f7212:m0
def __init__(self, items, abort_on_error=False):
self._items = items<EOL>self.abort_on_error = abort_on_error<EOL>
Wraps an iterable set of items such that a call on the returned SpawningProxy instance will spawn a call in a :class:`~eventlet.greenthread.GreenThread` for each item. Returns when every spawned thread has completed. :param items: Iterable item set to process :param abort_on_error: If True, any exceptions raised on an individual item call will cause all peer item call threads to be killed, and for the exception to be propagated to the caller immediately.
f7212:c0:m0
def make_nameko_helper(config):
module = ModuleType('<STR_LIT>')<EOL>module.__doc__ =
Create a fake module that provides some convenient access to nameko standalone functionality for interactive shell usage.
f7216:m0
def make_timing_logger(logger, precision=<NUM_LIT:3>, level=logging.DEBUG):
@contextmanager<EOL>def log_time(msg, *args):<EOL><INDENT>"""<STR_LIT>"""<EOL>start_time = time.time()<EOL>try:<EOL><INDENT>yield<EOL><DEDENT>finally:<EOL><INDENT>message = "<STR_LIT>".format(msg, precision)<EOL>duration = time.time() - start_time<EOL>args = args + (duration,)<EOL>logger.log(level, message, *args)<EOL><DEDENT><DEDENT>return log_time<EOL>
Return a timing logger. Usage:: >>> logger = logging.getLogger('foobar') >>> log_time = make_timing_logger( ... logger, level=logging.INFO, precision=2) >>> >>> with log_time("hello %s", "world"): ... time.sleep(1) INFO:foobar:hello world in 1.00s
f7222:m0
def get_module_path(exc_type):
module = inspect.getmodule(exc_type)<EOL>return "<STR_LIT>".format(module.__name__, exc_type.__name__)<EOL>
Return the dotted module path of `exc_type`, including the class name. e.g.:: >>> get_module_path(MethodNotFound) >>> "nameko.exceptions.MethodNotFound"
f7223:m0
def safe_for_serialization(value):
if isinstance(value, six.string_types):<EOL><INDENT>return value<EOL><DEDENT>if isinstance(value, dict):<EOL><INDENT>return {<EOL>safe_for_serialization(key): safe_for_serialization(val)<EOL>for key, val in six.iteritems(value)<EOL>}<EOL><DEDENT>if isinstance(value, collections.Iterable):<EOL><INDENT>return list(map(safe_for_serialization, value))<EOL><DEDENT>try:<EOL><INDENT>return six.text_type(value)<EOL><DEDENT>except Exception:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>
Transform a value in preparation for serializing as json no-op for strings, mappings and iterables have their entries made safe, and all other values are stringified, with a fallback value if that fails
f7223:m1
def serialize(exc):
return {<EOL>'<STR_LIT>': type(exc).__name__,<EOL>'<STR_LIT>': get_module_path(type(exc)),<EOL>'<STR_LIT>': list(map(safe_for_serialization, exc.args)),<EOL>'<STR_LIT:value>': safe_for_serialization(exc),<EOL>}<EOL>
Serialize `self.exc` into a data dictionary representing it.
f7223:m2
def deserialize(data):
key = data.get('<STR_LIT>')<EOL>if key in registry:<EOL><INDENT>exc_args = data.get('<STR_LIT>', ())<EOL>return registry[key](*exc_args)<EOL><DEDENT>exc_type = data.get('<STR_LIT>')<EOL>value = data.get('<STR_LIT:value>')<EOL>return RemoteError(exc_type=exc_type, value=value)<EOL>
Deserialize `data` to an exception instance. If the `exc_path` value matches an exception registered as ``deserializable``, return an instance of that exception type. Otherwise, return a `RemoteError` instance describing the exception that occurred.
f7223:m3
def deserialize_to_instance(exc_type):
key = get_module_path(exc_type)<EOL>registry[key] = exc_type<EOL>return exc_type<EOL>
Decorator that registers `exc_type` as deserializable back into an instance, rather than a :class:`RemoteError`. See :func:`deserialize`.
f7223:m4
def get_dependency(self, worker_ctx):
extra_headers = self.get_message_headers(worker_ctx)<EOL>def dispatch(event_type, event_data):<EOL><INDENT>self.publisher.publish(<EOL>event_data,<EOL>exchange=self.exchange,<EOL>routing_key=event_type,<EOL>extra_headers=extra_headers<EOL>)<EOL><DEDENT>return dispatch<EOL>
Inject a dispatch method onto the service instance
f7224:c1:m1
def __init__(self, source_service, event_type, handler_type=SERVICE_POOL,<EOL>reliable_delivery=True, requeue_on_error=False, **kwargs):
self.source_service = source_service<EOL>self.event_type = event_type<EOL>self.handler_type = handler_type<EOL>self.reliable_delivery = reliable_delivery<EOL>super(EventHandler, self).__init__(<EOL>queue=None, requeue_on_error=requeue_on_error, **kwargs<EOL>)<EOL>
r""" Decorate a method as a handler of ``event_type`` events on the service called ``source_service``. :Parameters: source_service : str Name of the service that dispatches the event event_type : str Type of the event to handle handler_type : str Determines the behaviour of the handler in a cluster: - ``events.SERVICE_POOL``: Event handlers are pooled by service type and method, and one service instance from each pool receives the event. :: .-[queue]- (service X handler-meth-1) / exchange o --[queue]- (service X handler-meth-2) \ \ (service Y(inst. 1) handler-meth) \ / [queue] \ (service Y(inst. 2) handler-meth) - ``events.SINGLETON``: Events are received by only one registered handler, regardless of service type. If requeued on error, they may be handled by a different service instance. :: (service X handler-meth) / exchange o -- [queue] \ (service Y handler-meth) - ``events.BROADCAST``: Events will be received by every handler. Events are broadcast to every service instance, not just every service type. Instances are differentiated using :attr:`EventHandler.broadcast_identifier`. :: [queue]- (service X(inst. 1) handler-meth) / exchange o - [queue]- (service X(inst. 2) handler-meth) \ [queue]- (service Y handler-meth) requeue_on_error : bool # TODO: defined by Consumer actually.. If true, handlers will return the event to the queue if an error occurs while handling it. Defaults to False. reliable_delivery : bool If true, events will be held in the queue until there is a handler to consume them. Defaults to True.
f7224:c2:m0
@property<EOL><INDENT>def broadcast_identifier(self):<DEDENT>
if self.handler_type is not BROADCAST:<EOL><INDENT>return None<EOL><DEDENT>if self.reliable_delivery:<EOL><INDENT>raise EventHandlerConfigurationError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>return uuid.uuid4().hex<EOL>
A unique string to identify a service instance for `BROADCAST` type handlers. The `broadcast_identifier` is appended to the queue name when the `BROADCAST` handler type is used. It must uniquely identify service instances that receive broadcasts. The default `broadcast_identifier` is a uuid that is set when the service starts. It will change when the service restarts, meaning that any unconsumed messages that were broadcast to the 'old' service instance will not be received by the 'new' one. :: @property def broadcast_identifier(self): # use a uuid as the identifier. # the identifier will change when the service restarts and # any unconsumed messages will be lost return uuid.uuid4().hex The default behaviour is therefore incompatible with reliable delivery. An alternative `broadcast_identifier` that would survive service restarts is :: @property def broadcast_identifier(self): # use the machine hostname as the identifier. # this assumes that only one instance of a service runs on # any given machine return socket.gethostname() If neither of these approaches are appropriate, you could read the value out of a configuration file :: @property def broadcast_identifier(self): return self.config['SERVICE_IDENTIFIER'] # or similar Broadcast queues are exclusive to ensure that `broadcast_identifier` values are unique. Because this method is a descriptor, it will be called during container creation, regardless of the configured `handler_type`. See :class:`nameko.extensions.Extension` for more details.
f7224:c2:m1
@contextmanager<EOL>def run_services(config, *services, **kwargs):
kill_on_exit = kwargs.pop('<STR_LIT>', False)<EOL>runner = ServiceRunner(config)<EOL>for service in services:<EOL><INDENT>runner.add_service(service)<EOL><DEDENT>runner.start()<EOL>yield runner<EOL>if kill_on_exit:<EOL><INDENT>runner.kill()<EOL><DEDENT>else:<EOL><INDENT>runner.stop()<EOL><DEDENT>
Serves a number of services for a contextual block. The caller can specify a number of service classes then serve them either stopping (default) or killing them on exiting the contextual block. Example:: with run_services(config, Foobar, Spam) as runner: # interact with services and stop them on exiting the block # services stopped Additional configuration available to :class:``ServiceRunner`` instances can be specified through keyword arguments:: with run_services(config, Foobar, Spam, kill_on_exit=True): # interact with services # services killed :Parameters: config : dict Configuration to instantiate the service containers with services : service definitions Services to be served for the contextual block kill_on_exit : bool (default=False) If ``True``, run ``kill()`` on the service containers when exiting the contextual block. Otherwise ``stop()`` will be called on the service containers on exiting the block. :Returns: The configured :class:`ServiceRunner` instance
f7225:m0
def add_service(self, cls):
service_name = get_service_name(cls)<EOL>container = self.container_cls(cls, self.config)<EOL>self.service_map[service_name] = container<EOL>
Add a service class to the runner. There can only be one service class for a given service name. Service classes must be registered before calling start()
f7225:c0:m3
def start(self):
service_names = '<STR_LIT:U+002CU+0020>'.join(self.service_names)<EOL>_log.info('<STR_LIT>', service_names)<EOL>SpawningProxy(self.containers).start()<EOL>_log.debug('<STR_LIT>', service_names)<EOL>
Start all the registered services. A new container is created for each service using the container class provided in the __init__ method. All containers are started concurrently and the method will block until all have completed their startup routine.
f7225:c0:m4
def stop(self):
service_names = '<STR_LIT:U+002CU+0020>'.join(self.service_names)<EOL>_log.info('<STR_LIT>', service_names)<EOL>SpawningProxy(self.containers).stop()<EOL>_log.debug('<STR_LIT>', service_names)<EOL>
Stop all running containers concurrently. The method blocks until all containers have stopped.
f7225:c0:m5
def kill(self):
service_names = '<STR_LIT:U+002CU+0020>'.join(self.service_names)<EOL>_log.info('<STR_LIT>', service_names)<EOL>SpawningProxy(self.containers).kill()<EOL>_log.debug('<STR_LIT>', service_names)<EOL>
Kill all running containers concurrently. The method will block until all containers have stopped.
f7225:c0:m6
def wait(self):
try:<EOL><INDENT>SpawningProxy(self.containers, abort_on_error=True).wait()<EOL><DEDENT>except Exception:<EOL><INDENT>self.stop()<EOL>raise<EOL><DEDENT>
Wait for all running containers to stop.
f7225:c0:m7
@property<EOL><INDENT>def interface(self):<DEDENT>
return self<EOL>
An interface to this container for use by extensions.
f7226:c1:m2
def start(self):
_log.debug('<STR_LIT>', self)<EOL>self.started = True<EOL>with _log_time('<STR_LIT>', self):<EOL><INDENT>self.extensions.all.setup()<EOL>self.extensions.all.start()<EOL><DEDENT>
Start a container by starting all of its extensions.
f7226:c1:m3
def stop(self):
if self._died.ready():<EOL><INDENT>_log.debug('<STR_LIT>', self)<EOL>return<EOL><DEDENT>if self._being_killed:<EOL><INDENT>_log.debug('<STR_LIT>', self)<EOL>try:<EOL><INDENT>self._died.wait()<EOL><DEDENT>except:<EOL><INDENT>pass <EOL><DEDENT>return<EOL><DEDENT>_log.debug('<STR_LIT>', self)<EOL>with _log_time('<STR_LIT>', self):<EOL><INDENT>self.entrypoints.all.stop()<EOL>self._worker_pool.waitall()<EOL>self.dependencies.all.stop()<EOL>self.subextensions.all.stop()<EOL>self._kill_managed_threads()<EOL>self.started = False<EOL>if not self._died.ready():<EOL><INDENT>self._died.send(None)<EOL><DEDENT><DEDENT>
Stop the container gracefully. First all entrypoints are asked to ``stop()``. This ensures that no new worker threads are started. It is the extensions' responsibility to gracefully shut down when ``stop()`` is called on them and only return when they have stopped. After all entrypoints have stopped the container waits for any active workers to complete. After all active workers have stopped the container stops all dependency providers. At this point there should be no more managed threads. In case there are any managed threads, they are killed by the container.
f7226:c1:m4
def kill(self, exc_info=None):
if self._being_killed:<EOL><INDENT>_log.debug('<STR_LIT>', self)<EOL>try:<EOL><INDENT>self._died.wait()<EOL><DEDENT>except:<EOL><INDENT>pass <EOL><DEDENT>return<EOL><DEDENT>self._being_killed = True<EOL>if self._died.ready():<EOL><INDENT>_log.debug('<STR_LIT>', self)<EOL>return<EOL><DEDENT>if exc_info is not None:<EOL><INDENT>_log.info('<STR_LIT>', self, exc_info[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>_log.info('<STR_LIT>', self)<EOL><DEDENT>def safely_kill_extensions(ext_set):<EOL><INDENT>try:<EOL><INDENT>ext_set.kill()<EOL><DEDENT>except Exception as exc:<EOL><INDENT>_log.warning('<STR_LIT>', exc)<EOL><DEDENT><DEDENT>safely_kill_extensions(self.entrypoints.all)<EOL>self._kill_worker_threads()<EOL>safely_kill_extensions(self.extensions.all)<EOL>self._kill_managed_threads()<EOL>self.started = False<EOL>if not self._died.ready():<EOL><INDENT>self._died.send(None, exc_info)<EOL><DEDENT>
Kill the container in a semi-graceful way. Entrypoints are killed, followed by any active worker threads. Next, dependencies are killed. Finally, any remaining managed threads are killed. If ``exc_info`` is provided, the exception will be raised by :meth:`~wait``.
f7226:c1:m5
def wait(self):
return self._died.wait()<EOL>
Block until the container has been stopped. If the container was stopped due to an exception, ``wait()`` will raise it. Any unhandled exception raised in a managed thread or in the worker lifecycle (e.g. inside :meth:`DependencyProvider.worker_setup`) results in the container being ``kill()``ed, and the exception raised from ``wait()``.
f7226:c1:m6
def spawn_worker(self, entrypoint, args, kwargs,<EOL>context_data=None, handle_result=None):
if self._being_killed:<EOL><INDENT>_log.info("<STR_LIT>")<EOL>raise ContainerBeingKilled()<EOL><DEDENT>service = self.service_cls()<EOL>worker_ctx = WorkerContext(<EOL>self, service, entrypoint, args, kwargs, data=context_data<EOL>)<EOL>_log.debug('<STR_LIT>', worker_ctx)<EOL>gt = self._worker_pool.spawn(<EOL>self._run_worker, worker_ctx, handle_result<EOL>)<EOL>gt.link(self._handle_worker_thread_exited, worker_ctx)<EOL>self._worker_threads[worker_ctx] = gt<EOL>return worker_ctx<EOL>
Spawn a worker thread for running the service method decorated by `entrypoint`. ``args`` and ``kwargs`` are used as parameters for the service method. ``context_data`` is used to initialize a ``WorkerContext``. ``handle_result`` is an optional function which may be passed in by the entrypoint. It is called with the result returned or error raised by the service method. If provided it must return a value for ``result`` and ``exc_info`` to propagate to dependencies; these may be different to those returned by the service method.
f7226:c1:m7
def spawn_managed_thread(self, fn, identifier=None):
if identifier is None:<EOL><INDENT>identifier = getattr(fn, '<STR_LIT>', "<STR_LIT>")<EOL><DEDENT>gt = eventlet.spawn(fn)<EOL>self._managed_threads[gt] = identifier<EOL>gt.link(self._handle_managed_thread_exited, identifier)<EOL>return gt<EOL>
Spawn a managed thread to run ``fn`` on behalf of an extension. The passed `identifier` will be included in logs related to this thread, and otherwise defaults to `fn.__name__`, if it is set. Any uncaught errors inside ``fn`` cause the container to be killed. It is the caller's responsibility to terminate their spawned threads. Threads are killed automatically if they are still running after all extensions are stopped during :meth:`ServiceContainer.stop`. Extensions should delegate all thread spawning to the container.
f7226:c1:m8
def _kill_worker_threads(self):
num_workers = len(self._worker_threads)<EOL>if num_workers:<EOL><INDENT>_log.warning('<STR_LIT>', num_workers)<EOL>for worker_ctx, gt in list(self._worker_threads.items()):<EOL><INDENT>_log.warning('<STR_LIT>', worker_ctx)<EOL>gt.kill()<EOL><DEDENT><DEDENT>
Kill any currently executing worker threads. See :meth:`ServiceContainer.spawn_worker`
f7226:c1:m14
def _kill_managed_threads(self):
num_threads = len(self._managed_threads)<EOL>if num_threads:<EOL><INDENT>_log.warning('<STR_LIT>', num_threads)<EOL>for gt, identifier in list(self._managed_threads.items()):<EOL><INDENT>_log.warning('<STR_LIT>', identifier)<EOL>gt.kill()<EOL><DEDENT><DEDENT>
Kill any currently executing managed threads. See :meth:`ServiceContainer.spawn_managed_thread`
f7226:c1:m15
def get_wsgi_app(self):
return WsgiApp(self)<EOL>
Get the WSGI application used to process requests. This method can be overriden to apply WSGI middleware or replace the WSGI application all together.
f7227:c1:m5
def get_wsgi_server(<EOL>self, sock, wsgi_app, protocol=HttpOnlyProtocol, debug=False<EOL>):
return wsgi.Server(<EOL>sock,<EOL>sock.getsockname(),<EOL>wsgi_app,<EOL>protocol=protocol,<EOL>debug=debug,<EOL>log=getLogger(__name__)<EOL>)<EOL>
Get the WSGI server used to process requests.
f7227:c1:m6
def get_subscriptions(self, socket_id):
con = self._get_connection(socket_id, create=False)<EOL>if con is None:<EOL><INDENT>return []<EOL><DEDENT>return sorted(con.subscriptions)<EOL>
Returns a list of all the subscriptions of a socket.
f7229:c3:m2
def subscribe(self, socket_id, channel):
con = self._get_connection(socket_id)<EOL>self.subscriptions.setdefault(channel, set()).add(socket_id)<EOL>con.subscriptions.add(channel)<EOL>
Subscribes a socket to a channel.
f7229:c3:m3
def unsubscribe(self, socket_id, channel):
con = self._get_connection(socket_id, create=False)<EOL>if con is not None:<EOL><INDENT>con.subscriptions.discard(channel)<EOL><DEDENT>try:<EOL><INDENT>self.subscriptions[channel].discard(socket_id)<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>
Unsubscribes a socket from a channel.
f7229:c3:m4
def broadcast(self, channel, event, data):
payload = self._server.serialize_event(event, data)<EOL>for socket_id in self.subscriptions.get(channel, ()):<EOL><INDENT>rv = self._server.sockets.get(socket_id)<EOL>if rv is not None:<EOL><INDENT>rv.socket.send(payload)<EOL><DEDENT><DEDENT>
Broadcasts an event to all sockets listening on a channel.
f7229:c3:m5
def unicast(self, socket_id, event, data):
payload = self._server.serialize_event(event, data)<EOL>rv = self._server.sockets.get(socket_id)<EOL>if rv is not None:<EOL><INDENT>rv.socket.send(payload)<EOL>return True<EOL><DEDENT>return False<EOL>
Sends an event to a single socket. Returns `True` if that worked or `False` if not.
f7229:c3:m6
@receive('<STR_LIT>')<EOL><INDENT>def handle_sqs_message(self, body):<DEDENT>
print(body)<EOL>return body<EOL>
This method is called by the `receive` entrypoint whenever a message sent to the given SQS queue.
f7231:c0:m0
@rpc<EOL><INDENT>def add_to_basket(self, item_code):<DEDENT>
stock_level = self.stock_rpc.check_stock(item_code)<EOL>if stock_level > <NUM_LIT:0>:<EOL><INDENT>self.user_basket.add(item_code)<EOL>self.fire_event("<STR_LIT>", item_code)<EOL>return item_code<EOL><DEDENT>raise ItemOutOfStockError(item_code)<EOL>
Add item identified by ``item_code`` to the shopping basket. This is a toy example! Ignore the obvious race condition.
f7247:c4:m0
@rpc<EOL><INDENT>def checkout(self):<DEDENT>
total_price = sum(self.stock_rpc.check_price(item)<EOL>for item in self.user_basket)<EOL>invoice = self.invoice_rpc.prepare_invoice(total_price)<EOL>self.payment_rpc.take_payment(invoice)<EOL>checkout_event_data = {<EOL>'<STR_LIT>': invoice,<EOL>'<STR_LIT>': list(self.user_basket)<EOL>}<EOL>self.fire_event("<STR_LIT>", checkout_event_data)<EOL>return total_price<EOL>
Take payment for all items in the shopping basket.
f7247:c4:m1
@rpc<EOL><INDENT>def check_price(self, item_code):<DEDENT>
try:<EOL><INDENT>return self.warehouse[item_code]['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ItemDoesNotExistError(item_code)<EOL><DEDENT>
Check the price of an item.
f7247:c6:m0
@rpc<EOL><INDENT>def check_stock(self, item_code):<DEDENT>
try:<EOL><INDENT>return self.warehouse[item_code]['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ItemDoesNotExistError(item_code)<EOL><DEDENT>
Check the stock level of an item.
f7247:c6:m1
@rpc<EOL><INDENT>@timer(<NUM_LIT:100>)<EOL>def monitor_stock(self):<DEDENT>
raise NotImplementedError()<EOL>
Periodic stock monitoring method. Can also be triggered manually over RPC. This is an expensive process that we don't want to exercise during integration testing...
f7247:c6:m2
@event_handler('<STR_LIT>', "<STR_LIT>")<EOL><INDENT>def dispatch_items(self, event_data):<DEDENT>
raise NotImplementedError()<EOL>
Dispatch items from stock on successful checkouts. This is an expensive process that we don't want to exercise during integration testing...
f7247:c6:m3
@rpc<EOL><INDENT>def prepare_invoice(self, amount):<DEDENT>
address = self.get_user_details().get('<STR_LIT:address>')<EOL>fullname = self.get_user_details().get('<STR_LIT>')<EOL>username = self.get_user_details().get('<STR_LIT:username>')<EOL>msg = "<STR_LIT>".format(fullname, amount)<EOL>invoice = {<EOL>'<STR_LIT:message>': msg,<EOL>'<STR_LIT>': amount,<EOL>'<STR_LIT>': username,<EOL>'<STR_LIT:address>': address<EOL>}<EOL>return invoice<EOL>
Prepare an invoice for ``amount`` for the current user.
f7247:c8:m0
@rpc<EOL><INDENT>def take_payment(self, invoice):<DEDENT>
raise NotImplementedError()<EOL>
Take payment from a customer according to ``invoice``. This is an expensive process that we don't want to exercise during integration testing...
f7247:c9:m0
@task<EOL>def develop_link(options, info):
project_dir = ph.path(__file__).realpath().parent<EOL>info('<STR_LIT>')<EOL>version_info = ch.conda_version_info('<STR_LIT>')<EOL>if version_info.get('<STR_LIT>') is not None:<EOL><INDENT>info('<STR_LIT>')<EOL>ch.conda_exec('<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>verbose=True)<EOL><DEDENT>else:<EOL><INDENT>info('<STR_LIT>')<EOL><DEDENT>info('<STR_LIT>')<EOL>recipe_dir = project_dir.joinpath('<STR_LIT>').realpath()<EOL>ch.conda_exec('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:root>', '<STR_LIT>', verbose=True)<EOL>ch.development_setup(recipe_dir, verbose=True)<EOL>info('<STR_LIT>')<EOL>pio_bin_dir = pioh.conda_bin_path()<EOL>fw_bin_dir = pio_bin_dir.joinpath('<STR_LIT>')<EOL>if not fw_bin_dir.exists():<EOL><INDENT>project_dir.joinpath('<STR_LIT>').junction(fw_bin_dir)<EOL><DEDENT>fw_config_ini = fw_bin_dir.joinpath('<STR_LIT>')<EOL>if not fw_config_ini.exists():<EOL><INDENT>project_dir.joinpath('<STR_LIT>').link(fw_config_ini)<EOL><DEDENT>info('<STR_LIT>')<EOL>ch.conda_exec('<STR_LIT>', project_dir, verbose=True)<EOL>info(<NUM_LIT> * '<STR_LIT:->' + '<STR_LIT>')<EOL>
Prepare development environment. Perform the following steps: - Uninstall ``dmf_control_board_firmware`` if installed as Conda package. - Install build and run-time Conda dependencies. - Link working ``.pioenvs`` directory into Conda ``Library`` directory to make development versions of compiled firmware binaries available to Python API. - Link ``dmf_control_board_firmware`` Python package into site packages directory. See Also -------- :func:`develop_unlink`
f7262:m5
@task<EOL>def develop_unlink(options, info):
project_dir = ph.path(__file__).realpath().parent<EOL>info('<STR_LIT>')<EOL>pio_bin_dir = pioh.conda_bin_path()<EOL>fw_bin_dir = pio_bin_dir.joinpath('<STR_LIT>')<EOL>if fw_bin_dir.exists():<EOL><INDENT>fw_config_ini = fw_bin_dir.joinpath('<STR_LIT>')<EOL>if fw_config_ini.exists():<EOL><INDENT>fw_config_ini.unlink()<EOL><DEDENT>fw_bin_dir.unlink()<EOL><DEDENT>info('<STR_LIT>')<EOL>ch.conda_exec('<STR_LIT>', '<STR_LIT>', project_dir, verbose=True)<EOL>info(<NUM_LIT> * '<STR_LIT:->' + '<STR_LIT>')<EOL>
Prepare development environment. Perform the following steps: - Unlink working ``.pioenvs`` directory into Conda ``Library`` directory. - Unlink ``dmf_control_board_firmware`` Python package from site packages directory. See Also -------- :func:`develop_link`
f7262:m6
def disttar_string(target, source, env):
return '<STR_LIT>' % target[<NUM_LIT:0>]<EOL>
This is what gets printed on the console. We'll strip out the list or source files, since it tends to get very long. If you want to see the contents, the easiest way is to uncomment the line 'Adding to TAR file' below.
f7265:m2
def disttar(target, source, env):
import tarfile<EOL>env_dict = env.Dictionary()<EOL>if env_dict.get("<STR_LIT>") in ["<STR_LIT>", "<STR_LIT>"]:<EOL><INDENT>tar_format = env_dict["<STR_LIT>"]<EOL><DEDENT>else:<EOL><INDENT>tar_format = "<STR_LIT>"<EOL><DEDENT>base_name = str(target[<NUM_LIT:0>]).split('<STR_LIT>')[<NUM_LIT:0>]<EOL>(target_dir, dir_name) = os.path.split(base_name)<EOL>if target_dir and not os.path.exists(target_dir):<EOL><INDENT>os.makedirs(target_dir)<EOL><DEDENT>print >> sys.stderr, '<STR_LIT>' % str(target[<NUM_LIT:0>])<EOL>print >> sys.stderr, '<STR_LIT>' % [str(s) for s in source]<EOL>tar = tarfile.open(str(target[<NUM_LIT:0>]), "<STR_LIT>" % tar_format)<EOL>for item in source:<EOL><INDENT>item = str(item)<EOL>sys.stderr.write("<STR_LIT:.>")<EOL>tar.add(item,'<STR_LIT>' % (dir_name,item))<EOL><DEDENT>sys.stderr.write("<STR_LIT:\n>") <EOL>tar.close()<EOL>
tar archive builder
f7265:m3
def disttar_suffix(env, sources):
env_dict = env.Dictionary()<EOL>if env_dict.has_key("<STR_LIT>") and env_dict["<STR_LIT>"] in ["<STR_LIT>", "<STR_LIT>"]:<EOL><INDENT>return "<STR_LIT>" + env_dict["<STR_LIT>"]<EOL><DEDENT>else:<EOL><INDENT>return "<STR_LIT>"<EOL><DEDENT>
tar archive suffix generator
f7265:m4
def generate(env):
disttar_action=SCons.Action.Action(disttar, disttar_string)<EOL>env['<STR_LIT>']['<STR_LIT>'] = Builder(<EOL>action=disttar_action<EOL>, emitter=disttar_emitter<EOL>, suffix = disttar_suffix<EOL>, target_factory = env.fs.Entry<EOL>)<EOL>env.AppendUnique(<EOL>DISTTAR_FORMAT = '<STR_LIT>'<EOL>)<EOL>
Add builders and construction variables for the DistTar builder.
f7265:m5
def exists(env):
try:<EOL><INDENT>import os<EOL>import tarfile<EOL><DEDENT>except ImportError:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>
Make sure this tool exists.
f7265:m6
def fit_fb_calibration(df, calibration):
<EOL>R_fb = pd.Series([<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>])<EOL>C_fb = pd.Series(len(calibration.C_fb) * [<NUM_LIT>])<EOL>def error(p0, df, calibration):<EOL><INDENT>Z = <NUM_LIT><EOL>R_fb = p0[<NUM_LIT:0>]<EOL>if len(p0) == <NUM_LIT:2>:<EOL><INDENT>C_fb = p0[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>C_fb = <NUM_LIT:0><EOL><DEDENT>R_hv = calibration.R_hv[df.hv_resistor.values]<EOL>C_hv = calibration.C_hv[df.hv_resistor.values]<EOL>V_actuation = compute_from_transfer_function(calibration.hw_version<EOL>.major, '<STR_LIT>', V2=df.V_hv,<EOL>R1=Z, R2=R_hv, C2=C_hv,<EOL>f=df.frequency)<EOL>V_impedance = compute_from_transfer_function(calibration.hw_version<EOL>.major, '<STR_LIT>',<EOL>V1=V_actuation,<EOL>C1=df.test_capacitor,<EOL>R2=R_fb, C2=C_fb,<EOL>f=df.frequency)<EOL>return df.V_fb - V_impedance<EOL><DEDENT>def fit_model(p0, df, calibration):<EOL><INDENT>p1, cov_x, infodict, mesg, ier = scipy.optimize.leastsq(<EOL>error, p0, args=(df, calibration), full_output=True)<EOL>p1 = np.abs(p1)<EOL>E = error(p1, df, calibration)<EOL>return p1, E, cov_x<EOL><DEDENT>CI = []<EOL>feedback_records = []<EOL>for i in range(len(calibration.R_fb)):<EOL><INDENT>df_i = df.loc[(df.fb_resistor == i)].dropna()<EOL>if df_i.shape[<NUM_LIT:0>] < <NUM_LIT:2>:<EOL><INDENT>CI.append([<NUM_LIT:0>, <NUM_LIT:0>])<EOL>continue<EOL><DEDENT>p0_1 = [R_fb[i]]<EOL>p1_1, E_1, cov_x_1 = fit_model(p0_1, df_i, calibration)<EOL>df_1 = (len(E_1) - len(p0_1))<EOL>chi2_1 = np.sum(E_1 ** <NUM_LIT:2>)<EOL>chi2r_1 = chi2_1 / (df_1 - <NUM_LIT:1>)<EOL>p0_2 = [R_fb[i], C_fb[i]]<EOL>p1_2, E_2, cov_x_2 = fit_model(p0_2, df_i, calibration)<EOL>df_2 = (len(E_2) - len(p0_2))<EOL>chi2_2 = np.sum(E_2 ** <NUM_LIT:2>)<EOL>chi2r_2 = chi2_2 / (df_2 - <NUM_LIT:1>)<EOL>F = (chi2_1 - chi2_2) / chi2r_2<EOL>p_value = scipy.stats.f.cdf(F, <NUM_LIT:1>, df_2-<NUM_LIT:1>)<EOL>if p_value > <NUM_LIT> and cov_x_2 is not None:<EOL><INDENT>model = '<STR_LIT>'<EOL>chi2r = chi2r_2<EOL>R_fb_i = p1_2[<NUM_LIT:0>]<EOL>C_fb_i = p1_2[<NUM_LIT:1>]<EOL>CI.append((<NUM_LIT:100> * np.sqrt(chi2r_2 * np.diag(cov_x_2)) / p1_2))<EOL><DEDENT>else: <EOL><INDENT>model = '<STR_LIT>'<EOL>chi2r = chi2r_2<EOL>R_fb_i = p1_1[<NUM_LIT:0>]<EOL>C_fb_i = <NUM_LIT:0><EOL>if cov_x_1 is None:<EOL><INDENT>cov_x_1 = [<NUM_LIT:0>]<EOL><DEDENT>CI.append((<NUM_LIT:100> * np.sqrt(chi2r_1 * np.diag(cov_x_1)) /<EOL>p1_1).tolist() + [<NUM_LIT:0>])<EOL><DEDENT>feedback_records.append([int(i), model, df_i.shape[<NUM_LIT:0>], R_fb_i, CI[i][<NUM_LIT:0>],<EOL>C_fb_i, CI[i][<NUM_LIT:1>], F, (<NUM_LIT> * np.sqrt(chi2r)),<EOL>p_value])<EOL><DEDENT>calibration_df = pd.DataFrame(feedback_records,<EOL>columns=['<STR_LIT>', '<STR_LIT>', '<STR_LIT:N>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT:F>',<EOL>'<STR_LIT>', '<STR_LIT>'])<EOL>return calibration_df<EOL>
Fit feedback calibration data to solve for values of `C_fb[:]` and `R_fb[:]`. Returns a `pandas.DataFrame` indexed by the feedback resistor/capacitance index, and with the following columns: - Model: Either with parasitic capacitance term or not. - N: Number of samples used for fit. - F: F-value - p-value: p-value from Chi squared test. - R_fb: Feedback resistor value based on fit. - R-CI %: Confidence interval for feedback resistor value. - C_fb: Feedback capacitor value based on fit (0 if no-capacitance model is used). - C-CI %: Confidence interval for feedback capacitance value. __N.B.__ This function does not actually _update_ the calibration, it only performs the fit. See `apply_calibration`.
f7270:m2
def apply_calibration(df, calibration_df, calibration):
from dmf_control_board_firmware import FeedbackResults<EOL>for i, (fb_resistor, R_fb, C_fb) in calibration_df[['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']].iterrows():<EOL><INDENT>calibration.R_fb[int(fb_resistor)] = R_fb<EOL>calibration.C_fb[int(fb_resistor)] = C_fb<EOL><DEDENT>cleaned_df = df.dropna()<EOL>grouped = cleaned_df.groupby(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>for (f, channel, repeat_index), group in grouped:<EOL><INDENT>r = FeedbackResults(group.V_actuation.iloc[<NUM_LIT:0>], f, <NUM_LIT>,<EOL>group.V_hv.values, group.hv_resistor.values,<EOL>group.V_fb.values, group.fb_resistor.values,<EOL>calibration)<EOL>df.loc[group.index, '<STR_LIT:C>'] = r.capacitance()<EOL><DEDENT>
Apply calibration values from `fit_fb_calibration` result to `calibration` object.
f7270:m3
def swap_default(mode, equation, symbol_names, default, **kwargs):
if mode == '<STR_LIT>':<EOL><INDENT>swap_f = _subs<EOL>default_swap_f = _subs<EOL><DEDENT>elif mode == '<STR_LIT>':<EOL><INDENT>swap_f = _limit<EOL>default_swap_f = _subs<EOL><DEDENT>elif mode == '<STR_LIT>':<EOL><INDENT>swap_f = _subs<EOL>default_swap_f = _limit<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('''<STR_LIT>'''<EOL>'''<STR_LIT>''')<EOL><DEDENT>result = equation<EOL>for s in symbol_names:<EOL><INDENT>if s in kwargs:<EOL><INDENT>if isinstance(kwargs[s], Iterable):<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>result = swap_f(result, s, kwargs[s])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>result = default_swap_f(result, s, default)<EOL><DEDENT><DEDENT>return result<EOL>
Given a `sympy` equation or equality, along with a list of symbol names, substitute the specified default value for each symbol for which a value is not provided through a keyword argument. For example, consider the following equality: >>> sp.pprint(H) V₂ Z₂ ── = ── V₁ Z₁ Let us substitute a default value of 1 for terms Z1 and Z2: >>> sp.pprint(subs_default(H, ['Z1', 'Z2'], 1)) V₂ ── = 1 V₁ Now, let us specify a default value of 1 for terms Z1 and Z2, but provide an overriding value for Z1: >>> sp.pprint(subs_default(H, ['Z1', 'Z2'], 1, Z1=4)) V₂ ── = 1/4 V₁ Note that keyword arguments for terms not specified in the list of symbol names are ignored: >>> sp.pprint(subs_default(H, ['Z1', 'Z2'], 1, Z1=4, Q=7)) V₂ ── = 1/4 V₁
f7271:m4
def z_transfer_functions():
<EOL>V1, V2, Z1, Z2 = sp.symbols('<STR_LIT>')<EOL>xfer_funcs = pd.Series([sp.Eq(V2 / Z2, V1 / (Z1 + Z2)),<EOL>sp.Eq(V2 / V1, Z2 / Z1)],<EOL>index=[<NUM_LIT:1>, <NUM_LIT:2>])<EOL>xfer_funcs.index.name = '<STR_LIT>'<EOL>return xfer_funcs<EOL>
r''' Return a symbolic equality representation of the transfer function of RMS voltage measured by either control board analog feedback circuits. According to the figure below, the transfer function describes the following relationship:: # Hardware V1 # # Hardware V2 # V₂ V₁ V₂ Z₁ ── = ─────── ── = ── Z₂ Z₁ + Z₂ V₁ Z₂ where $V_{1}$ denotes the high-voltage actuation signal from the amplifier output and $V_{2}$ denotes the signal sufficiently attenuated to fall within the measurable input range of the analog-to-digital converter *(approx. 5V)*. The feedback circuits for control board **hardware version 1** and **hardware version 2** are shown below. .. code-block:: none # Hardware V1 # # Hardware V2 # V_1 @ frequency V_1 @ frequency ┯ ┯ ┌─┴─┐ ┌─┴─┐ ┌───┐ │Z_1│ │Z_1│ ┌─┤Z_2├─┐ └─┬─┘ └─┬─┘ │ └───┘ │ ├───⊸ V_2 │ │ │╲ ├───⊸ V_2 ┌─┴─┐ └────┴──│-╲__│ │Z_2│ ┌──│+╱ └─┬─┘ │ │╱ ═╧═ │ ¯ ═╧═ ¯ Notes ----- - The symbolic equality can be solved for any symbol, _e.g.,_ $V_{1}$ or $V_{2}$. - A symbolically solved representation can be converted to a Python function using `sympy.utilities.lambdify.lambdify`_, to compute results for specific values of the remaining parameters. .. _`sympy.utilities.lambdify.lambdify`: http://docs.sympy.org/dev/modules/utilities/lambdify.html
f7271:m5
def rc_transfer_function(eq, Zs=None):
if Zs is None:<EOL><INDENT>Zs = [s.name for s in eq.atoms(sp.Symbol) if s.name.startswith('<STR_LIT>')]<EOL><DEDENT>result = eq<EOL>cre_Z = re.compile(r'<STR_LIT>')<EOL>sub_params = [(z, sp.symbols('<STR_LIT>' % (s, s)))<EOL>for z, s in [(Z, cre_Z.match(Z).group('<STR_LIT>'))<EOL>for Z in Zs]]<EOL>for Z, (R, C, omega) in sub_params:<EOL><INDENT>result = result.subs(Z, <NUM_LIT:1> / (<NUM_LIT:1> / R + sp.I * omega * C))<EOL><DEDENT>return result<EOL>
Substitute resistive and capacitive components for all ``Zs`` terms _(all terms starting with ``Z`` by default)_ in the provided equation, where $Z$ is equivalent to parallel resistive and capacitive impedances, as shown below. .. code-block:: none Z_C ┌───┐ ┌──┤ ├──┐ ┄─┤ Z ├─┄ ┄┤ Z_R ├┄ └───┘ └─/\/\/─┘ See the definitions of $Z_R$ *(resistive impedance)* and $Z_C$ *(capacitive impedance)* `here`_, where $\omega$ is the `angular frequency`_. Specifically:: 1 Z = ───────── 1 ⅈ⋅C⋅ω + ─ R .. _`here`: http://en.wikipedia.org/wiki/Electrical_impedance#Device_examples .. _`angular frequency`: http://en.wikipedia.org/wiki/Angular_frequency
f7271:m6
@lru_cache(maxsize=<NUM_LIT>)<EOL>def get_transfer_function(hardware_major_version, solve_for=None, Zs=None):
xfer_func = z_transfer_functions()[hardware_major_version]<EOL>symbols = OrderedDict([(s.name, s)<EOL>for s in xfer_func.atoms(sp.Symbol)])<EOL>if Zs is None:<EOL><INDENT>Zs = [s for s in symbols if s != solve_for and s.startswith('<STR_LIT>')]<EOL><DEDENT>H = rc_transfer_function(xfer_func, Zs)<EOL>if solve_for is None:<EOL><INDENT>return H<EOL><DEDENT>symbols = OrderedDict([(s.name, s)<EOL>for s in H.atoms(sp.Symbol)])<EOL>solve_for_symbol = symbols[solve_for]<EOL>solved = sp.Eq(solve_for_symbol, sp.solve(H, solve_for_symbol)[<NUM_LIT:0>])<EOL>return solved<EOL>
Parameters ---------- hardware_major_version : int Major version of control board hardware *(1 or 2)*. solve_for : str, optional Rearrange equality with ``solve_for`` symbol as LHS. Zs : list List of impedance (i.e., ``Z``) to substitute with resistive and capacitive terms. By default, all ``Z`` terms are substituted with corresponding ``R`` and ``C`` values. Returns ------- sympy.Equality Feedback measurement circuit symbolic transfer function for specified control board hardware version. Note ---- This function is memoized, to improve performance for repeated calls with the same arguments.
f7271:m7
def compute_from_transfer_function(hardware_major_version, solve_for,<EOL>**kwargs):
symbolic = kwargs.pop('<STR_LIT>', False)<EOL>Zs = tuple([s for s in kwargs if s != solve_for and s.startswith('<STR_LIT>')])<EOL>if not Zs:<EOL><INDENT>Zs = None<EOL><DEDENT>result = get_transfer_function(hardware_major_version, solve_for=solve_for,<EOL>Zs=Zs)<EOL>Rs = set([s.name for s in result.atoms(sp.Symbol)<EOL>if s.name.startswith('<STR_LIT:R>')])<EOL>Cs = set([s.name for s in result.atoms(sp.Symbol)<EOL>if s.name.startswith('<STR_LIT:C>')])<EOL>result = limit_default(result, Rs, sp.oo, **kwargs)<EOL>result = limit_default(result, Cs, <NUM_LIT:0>, **kwargs)<EOL>if '<STR_LIT:f>' in kwargs:<EOL><INDENT>result = result.subs('<STR_LIT>', sp.sympify('<STR_LIT>'))<EOL>if not isinstance(kwargs['<STR_LIT:f>'], Iterable) and kwargs['<STR_LIT:f>'] == True:<EOL><INDENT>del kwargs['<STR_LIT:f>']<EOL><DEDENT><DEDENT>for k, v in kwargs.iteritems():<EOL><INDENT>if isinstance(v, Iterable):<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>result = result.subs(k, v)<EOL><DEDENT><DEDENT>if symbolic:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>H = result.rhs<EOL>symbols = [s.name for s in H.atoms(sp.Symbol)]<EOL>func = sp.lambdify('<STR_LIT:U+002CU+0020>'.join(symbols), sp.Abs(H), '<STR_LIT>')<EOL>return func(*[kwargs[s] for s in symbols])<EOL><DEDENT>
Conventions: - Symbols starting with ``Z`` are assumed to be impedance terms. - Symbols starting with ``R`` are assumed to be resistive impedance terms. - Symbols starting with ``C`` are assumed to be capacitive impedance terms. - ``omega`` is assumed to be an angular frequency term. - ``f`` is assumed to be a frequency term (i.e., ``omega = 2 * pi * f``). Parameters ---------- hardware_major_version : int Major version of control board hardware *(1 or 2)*. solve_for : str Variable in feedback transfer function to solve for e.g.,_ ``Z1``, ``V1``, ``V2``, etc.). symbolic : bool, optional If ``True``, return :mod:`sympy` symbolic equality. **kwargs Scalar or array-like value to substitute for term with corresponding name in transfer function. In the case of the frequency term, ``f``, if set to ``True``, substitute ``2 * pi * f`` for angular frequency (i.e., ``omega``) in transfer function. Note ---- Either ``f`` or ``omega`` may be specified, *not* both.
f7271:m8
def plot_stat_summary(df, fig=None):
if fig is None:<EOL><INDENT>fig = plt.figure(figsize=(<NUM_LIT:8>, <NUM_LIT:8>))<EOL><DEDENT>grid = GridSpec(<NUM_LIT:3>, <NUM_LIT:2>)<EOL>stats = calculate_stats(df, groupby=['<STR_LIT>',<EOL>'<STR_LIT>']).dropna()<EOL>for i, stat in enumerate(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>axis = fig.add_subplot(grid[i, <NUM_LIT:0>])<EOL>axis.set_title(stat)<EOL>plot_colormap(stats, stat, axis=axis, fig=fig)<EOL>axis = fig.add_subplot(grid[i, <NUM_LIT:1>])<EOL>axis.set_title(stat)<EOL>try:<EOL><INDENT>axis.hist(stats[stat].values, bins=<NUM_LIT:50>)<EOL><DEDENT>except AttributeError:<EOL><INDENT>print(stats[stat].describe())<EOL><DEDENT><DEDENT>fig.tight_layout()<EOL>
Plot stats grouped by test capacitor load _and_ frequency. In other words, we calculate the mean of all samples in the data frame for each test capacitance and frequency pairing, plotting the following stats: - Root mean squared error - Coefficient of variation - Bias ## [Coefficient of variation][1] ## > In probability theory and statistics, the coefficient of > variation (CV) is a normalized measure of dispersion of a > probability distribution or frequency distribution. It is defined > as the ratio of the standard deviation to the mean. [1]: http://en.wikipedia.org/wiki/Coefficient_of_variation
f7272:m7
def measure_board_rms(control_board, n_samples=<NUM_LIT:10>, sampling_ms=<NUM_LIT:10>,<EOL>delay_between_samples_ms=<NUM_LIT:0>):
try:<EOL><INDENT>results = control_board.measure_impedance(n_samples, sampling_ms,<EOL>delay_between_samples_ms,<EOL>True, True, [])<EOL><DEDENT>except RuntimeError:<EOL><INDENT>logger.warning('<STR_LIT>'<EOL>'<STR_LIT>', exc_info=True)<EOL>data = pd.DataFrame(None, columns=['<STR_LIT>',<EOL>'<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>data = pd.DataFrame({'<STR_LIT>': results.V_hv})<EOL>data['<STR_LIT>'] = results.hv_resistor<EOL><DEDENT>return data<EOL>
Read RMS voltage samples from control board high-voltage feedback circuit.
f7275:m0
def find_good(control_board, actuation_steps, resistor_index, start_index,<EOL>end_index):
lower = start_index<EOL>upper = end_index<EOL>while lower < upper - <NUM_LIT:1>:<EOL><INDENT>index = lower + (upper - lower) / <NUM_LIT:2><EOL>v = actuation_steps[index]<EOL>control_board.set_waveform_voltage(v)<EOL>data = measure_board_rms(control_board)<EOL>valid_data = data[data['<STR_LIT>'] >= <NUM_LIT:0>]<EOL>if (valid_data['<STR_LIT>'] < resistor_index).sum():<EOL><INDENT>upper = index<EOL><DEDENT>else:<EOL><INDENT>lower = index<EOL><DEDENT><DEDENT>control_board.set_waveform_voltage(actuation_steps[lower])<EOL>data = measure_board_rms(control_board)<EOL>return lower, data<EOL>
Use a binary search over the range of provided actuation_steps to find the maximum actuation voltage that is measured by the board feedback circuit using the specified feedback resistor.
f7275:m1
def resistor_max_actuation_readings(control_board, frequencies,<EOL>oscope_reading_func):
<EOL>control_board.set_waveform_voltage(<NUM_LIT:0>)<EOL>control_board.auto_adjust_amplifier_gain = False<EOL>control_board.amplifier_gain = <NUM_LIT:1.><EOL>target_voltage = <NUM_LIT:0.1><EOL>control_board.set_waveform_voltage(target_voltage)<EOL>oscope_rms = oscope_reading_func()<EOL>estimated_amplifier_gain = oscope_rms / target_voltage<EOL>max_post_gain_V = <NUM_LIT> * control_board.max_waveform_voltage<EOL>max_actuation_V = max_post_gain_V / estimated_amplifier_gain<EOL>actuation_steps = np.linspace(<NUM_LIT>, max_actuation_V, num=<NUM_LIT:50>)<EOL>resistor_count = len(control_board.a0_series_resistance)<EOL>conditions = pd.DataFrame([[r, f] for r in range(resistor_count - <NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>)<EOL>for f in frequencies],<EOL>columns=['<STR_LIT>', '<STR_LIT>'])<EOL>def max_actuation_reading(x):<EOL><INDENT>'''<STR_LIT>'''<EOL>r = x['<STR_LIT>'].values[<NUM_LIT:0>]<EOL>f = x['<STR_LIT>'].values[<NUM_LIT:0>]<EOL>control_board.set_waveform_frequency(f)<EOL>actuation_index, data = find_good(control_board, actuation_steps, r, <NUM_LIT:0>,<EOL>len(actuation_steps) - <NUM_LIT:1>)<EOL>board_measured_rms = data.loc[data['<STR_LIT>'] >= <NUM_LIT:0>,<EOL>'<STR_LIT>'].mean()<EOL>oscope_rms = oscope_reading_func()<EOL>print('<STR_LIT>' % (r, f))<EOL>return pd.DataFrame([[r, f, actuation_index, board_measured_rms,<EOL>oscope_rms]],<EOL>columns=['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'])<EOL><DEDENT>return (conditions.groupby(['<STR_LIT>', '<STR_LIT>'])<EOL>.apply(max_actuation_reading).reset_index(drop=True))<EOL>
For each resistor in the high-voltage feedback resistor bank, read the board measured voltage and the oscilloscope measured voltage for an actuation voltage that nearly saturates the feedback resistor. By searching for an actuation voltage near saturation, the signal-to-noise ratio is minimized.
f7275:m2
def fit_feedback_params(calibration, max_resistor_readings):
R1 = <NUM_LIT><EOL>def fit_resistor_params(x):<EOL><INDENT>resistor_index = x['<STR_LIT>'].values[<NUM_LIT:0>]<EOL>p0 = [calibration.R_hv[resistor_index],<EOL>calibration.C_hv[resistor_index]]<EOL>def error(p, df, R1):<EOL><INDENT>v1 = compute_from_transfer_function(calibration.hw_version.major,<EOL>'<STR_LIT>',<EOL>V2=df['<STR_LIT>'],<EOL>R1=R1, R2=p[<NUM_LIT:0>], C2=p[<NUM_LIT:1>],<EOL>f=df['<STR_LIT>'].values)<EOL>e = df['<STR_LIT>'] - v1<EOL>return e<EOL><DEDENT>p1, success = optimize.leastsq(error, p0, args=(x, R1))<EOL>p1 = np.abs(p1)<EOL>return pd.DataFrame([p0 + p1.tolist()],<EOL>columns=['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']).T<EOL><DEDENT>results = (max_resistor_readings<EOL>[max_resistor_readings['<STR_LIT>'] >= <NUM_LIT:0>]<EOL>.groupby(['<STR_LIT>']).apply(fit_resistor_params))<EOL>data = results.unstack()<EOL>data.columns = data.columns.droplevel()<EOL>return data<EOL>
Fit model of control board high-voltage feedback resistor and parasitic capacitance values based on measured voltage readings.
f7275:m3