sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def local_ip():
"""Get the local network IP of this machine"""
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith('127.'):
# Check eth0, eth1, eth2, en0, ...
interfaces = [
i + str(n) for i in ("eth", "en", "wlan") for n in xrange(3)
] # :(
for interface in interfaces:
try:
ip = interface_ip(interface)
break
except IOError:
pass
return ip | Get the local network IP of this machine | entailment |
def client_for(service, service_module, thrift_service_name=None):
"""Build a client class for the given Thrift service.
The generated class accepts a TChannel and an optional hostport as
initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered with
Hyperbahn under the name "comment", here's how this may be used:
.. code-block:: python
from comment import CommentService
CommentServiceClient = client_for("comment", CommentService)
@gen.coroutine
def post_comment(articleId, msg, hostport=None):
client = CommentServiceClient(tchannel, hostport)
yield client.postComment(articleId, CommentService.Comment(msg))
:param service:
Name of the Hyperbahn service being called. This is the name with
which the service registered with Hyperbahn.
:param service_module:
The Thrift-generated module for that service. This usually has the
same name as defined for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use this
parameter to specify it.
:returns:
An object with the same interface as the service that uses the given
TChannel to call the service.
"""
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def new(cls, tchannel, hostport=None, trace=False, protocol_headers=None):
"""
:param tchannel:
TChannel through which the requests will be sent.
:param hostport:
Address of the machine to which the requests will be sent, or None
if the TChannel will do peer selection on a per-request basis.
:param trace:
Whether tracing is enabled.
:param protocol_headers:
Protocol-level headers to send with the request.
"""
protocol_headers = protocol_headers or {}
protocol_headers['as'] = 'thrift'
return _ClientBase.__new__(
cls, tchannel, hostport, service, trace, protocol_headers
)
new.__name__ = '__new__'
methods = {'__new__': new}
for method_name in method_names:
methods[method_name] = generate_method(
service_module, thrift_service_name, method_name
)
return type(thrift_service_name + 'Client', (_ClientBase,), methods) | Build a client class for the given Thrift service.
The generated class accepts a TChannel and an optional hostport as
initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered with
Hyperbahn under the name "comment", here's how this may be used:
.. code-block:: python
from comment import CommentService
CommentServiceClient = client_for("comment", CommentService)
@gen.coroutine
def post_comment(articleId, msg, hostport=None):
client = CommentServiceClient(tchannel, hostport)
yield client.postComment(articleId, CommentService.Comment(msg))
:param service:
Name of the Hyperbahn service being called. This is the name with
which the service registered with Hyperbahn.
:param service_module:
The Thrift-generated module for that service. This usually has the
same name as defined for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use this
parameter to specify it.
:returns:
An object with the same interface as the service that uses the given
TChannel to call the service. | entailment |
def generate_method(service_module, service_name, method_name):
"""Generate a method for the given Thrift service.
:param service_module:
Thrift-generated service module
:param service_name:
Name of the Thrift service
:param method_name:
Method being called
"""
assert service_module
assert service_name
assert method_name
args_type = getattr(service_module, method_name + '_args')
result_type = getattr(service_module, method_name + '_result', None)
serializer = ThriftSerializer(result_type)
# oneway not currently supported
# TODO - write test for this
if result_type is None:
def not_supported(self, *args, **kwags):
raise OneWayNotSupportedError(
'TChannel+Thrift does not currently support oneway procedues'
)
return not_supported
result_spec = result_type.thrift_spec
# result_spec is a tuple of tuples in the form:
#
# (fieldId, fieldType, fieldName, ...)
#
# Where "..." is other information we don't care about right now.
#
# result_spec will be empty if there is no return value or exception for
# the method.
#
# Its first element, with field ID 0, contains the spec for the return
# value. It is None if the result type is void but the method may still
# throw exceptions.
#
# Elements after the first one are specs for the exceptions.
endpoint = '%s::%s' % (service_name, method_name)
@gen.coroutine
def send(self, *args, **kwargs):
params = inspect.getcallargs(
getattr(service_module.Iface, method_name), self, *args, **kwargs
)
params.pop('self') # self is already known
# $methodName_args is the implicit struct containing the various
# method parameters.
call_args = args_type()
for name, value in params.items():
setattr(call_args, name, value)
tracer = tracing.ClientTracer(channel=self.tchannel)
span, headers = tracer.start_span(
service=service_name, endpoint=method_name, headers={}
)
body = serializer.serialize_body(call_args)
header = serializer.serialize_header(headers)
# Glue for old API.
if hasattr(self.tchannel, 'request'):
tracing.apply_trace_flag(span, self.trace, True)
with span:
response = yield self.tchannel.request(
hostport=self.hostport, service=self.service
).send(
arg1=endpoint,
arg2=header,
arg3=body, # body
headers=self.protocol_headers,
)
body = yield response.get_body()
else:
with span:
response = yield self.tchannel.call(
scheme=schemes.THRIFT,
service=self.service,
arg1=endpoint,
arg2=header,
arg3=body,
hostport=self.hostport,
trace=self.trace,
tracing_span=span
# TODO: Need to handle these!
# headers=self.protocol_headers,
)
body = response.body
call_result = serializer.deserialize_body(body)
if not result_spec:
# void return type and no exceptions allowed
raise gen.Return(None)
for exc_spec in result_spec[1:]:
# May have failed with an exception
exc = getattr(call_result, exc_spec[2])
if exc is not None:
raise exc
if result_spec[0]:
# Non-void return type. Return the result.
success = getattr(call_result, result_spec[0][2])
if success is not None:
raise gen.Return(success)
else:
# No return type specified and no exceptions raised.
raise gen.Return(None)
# Expected a result but nothing was present in the object. Something
# went wrong.
from thrift import Thrift
raise Thrift.TApplicationException(
Thrift.TApplicationException.MISSING_RESULT,
'%s failed: did not receive a result as expected' % method_name
)
# TODO: We should probably throw a custom exception instead.
send.__name__ = method_name
return send | Generate a method for the given Thrift service.
:param service_module:
Thrift-generated service module
:param service_name:
Name of the Thrift service
:param method_name:
Method being called | entailment |
def connect(self):
"""Get a connection to this peer.
If an connection to the peer already exists (either incoming or
outgoing), that's returned. Otherwise, a new outgoing connection to
this peer is created.
:return:
A future containing a connection to this host.
"""
# Prefer incoming connections over outgoing connections.
if self.connections:
# First value is an incoming connection
future = gen.Future()
future.set_result(self.connections[0])
return future
if self._connecting:
# If we're in the process of connecting to the peer, just wait
# and re-use that connection.
return self._connecting
conn_future = self._connecting = self.connection_class.outgoing(
hostport=self.hostport,
process_name=self.tchannel.process_name,
serve_hostport=self.tchannel.hostport,
handler=self.tchannel.receive_call,
tchannel=self.tchannel,
)
def on_connect(_):
if not conn_future.exception():
# We don't actually need to handle the exception. That's on
# the caller.
connection = conn_future.result()
self.register_outgoing_conn(connection)
self._connecting = None
conn_future.add_done_callback(on_connect)
return conn_future | Get a connection to this peer.
If an connection to the peer already exists (either incoming or
outgoing), that's returned. Otherwise, a new outgoing connection to
this peer is created.
:return:
A future containing a connection to this host. | entailment |
def register_outgoing_conn(self, conn):
"""Add outgoing connection into the heap."""
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.append(conn)
self._set_on_close_cb(conn)
self._on_conn_change() | Add outgoing connection into the heap. | entailment |
def register_incoming_conn(self, conn):
"""Add incoming connection into the heap."""
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.appendleft(conn)
self._set_on_close_cb(conn)
self._on_conn_change() | Add incoming connection into the heap. | entailment |
def outgoing_connections(self):
"""Returns a list of all outgoing connections for this peer."""
# Outgoing connections are on the right
return list(
dropwhile(lambda c: c.direction != OUTGOING, self.connections)
) | Returns a list of all outgoing connections for this peer. | entailment |
def incoming_connections(self):
"""Returns a list of all incoming connections for this peer."""
# Incoming connections are on the left.
return list(
takewhile(lambda c: c.direction == INCOMING, self.connections)
) | Returns a list of all incoming connections for this peer. | entailment |
def _get_peer_connection(self, blacklist=None):
"""Find a peer and connect to it.
Returns a ``(peer, connection)`` tuple.
Raises ``NoAvailablePeerError`` if no healthy peers are found.
:param blacklist:
If given, a set of hostports for peers that we must not try.
"""
blacklist = blacklist or set()
peer = None
connection = None
while connection is None:
peer = self._choose(blacklist)
if not peer:
raise NoAvailablePeerError(
"Can't find an available peer for '%s'" % self.service
)
try:
connection = yield peer.connect()
except NetworkError as e:
log.info(
'Failed to connect to %s. Trying a different host.',
peer.hostport,
exc_info=e,
)
connection = None
blacklist.add(peer.hostport)
raise gen.Return((peer, connection)) | Find a peer and connect to it.
Returns a ``(peer, connection)`` tuple.
Raises ``NoAvailablePeerError`` if no healthy peers are found.
:param blacklist:
If given, a set of hostports for peers that we must not try. | entailment |
def send(
self, arg1, arg2, arg3,
headers=None,
retry_limit=None,
ttl=None,
):
"""Make a request to the Peer.
:param arg1:
String or Stream containing the contents of arg1. If None, an empty
stream is used.
:param arg2:
String or Stream containing the contents of arg2. If None, an empty
stream is used.
:param arg3:
String or Stream containing the contents of arg3. If None, an empty
stream is used.
:param headers:
Headers will be put in the message as protocol header.
:param retry_limit:
Maximum number of retries will perform on the message. If the number
is 0, it means no retry.
:param ttl:
Timeout for each request (second).
:return:
Future that contains the response from the peer.
"""
# find a peer connection
# If we can't find available peer at the first time, we throw
# NoAvailablePeerError. Later during retry, if we can't find available
# peer, we throw exceptions from retry not NoAvailablePeerError.
peer, connection = yield self._get_peer_connection()
arg1, arg2, arg3 = (
maybe_stream(arg1), maybe_stream(arg2), maybe_stream(arg3)
)
if retry_limit is None:
retry_limit = DEFAULT_RETRY_LIMIT
ttl = ttl or DEFAULT_TIMEOUT
# hack to get endpoint from arg_1 for trace name
arg1.close()
endpoint = yield read_full(arg1)
# set default transport headers
headers = headers or {}
for k, v in self.headers.iteritems():
headers.setdefault(k, v)
if self.tracing_span is None:
tracer = ClientTracer(channel=self.tchannel)
self.tracing_span, _ = tracer.start_span(
service=self.service, endpoint=endpoint,
hostport=self._hostport, encoding=self.headers.get('as')
)
request = Request(
service=self.service,
argstreams=[InMemStream(endpoint), arg2, arg3],
id=connection.writer.next_message_id(),
headers=headers,
endpoint=endpoint,
ttl=ttl,
tracing=tracing.span_to_tracing_field(self.tracing_span)
)
# only retry on non-stream request
if request.is_streaming_request or self._hostport:
retry_limit = 0
if request.is_streaming_request:
request.ttl = 0
try:
with self.tracing_span: # to ensure span is finished
response = yield self.send_with_retry(
request, peer, retry_limit, connection
)
except Exception as e:
# event: on_exception
exc_info = sys.exc_info()
yield self.tchannel.event_emitter.fire(
EventType.on_exception, request, e,
)
six.reraise(*exc_info)
log.debug("Got response %s", response)
raise gen.Return(response) | Make a request to the Peer.
:param arg1:
String or Stream containing the contents of arg1. If None, an empty
stream is used.
:param arg2:
String or Stream containing the contents of arg2. If None, an empty
stream is used.
:param arg3:
String or Stream containing the contents of arg3. If None, an empty
stream is used.
:param headers:
Headers will be put in the message as protocol header.
:param retry_limit:
Maximum number of retries will perform on the message. If the number
is 0, it means no retry.
:param ttl:
Timeout for each request (second).
:return:
Future that contains the response from the peer. | entailment |
def clear(self):
"""Reset this PeerGroup.
This closes all connections to all known peers and forgets about
these peers.
:returns:
A Future that resolves with a value of None when the operation
has finished
"""
try:
for peer in self._peers.values():
peer.close()
finally:
self._peers = {}
self._resetting = False | Reset this PeerGroup.
This closes all connections to all known peers and forgets about
these peers.
:returns:
A Future that resolves with a value of None when the operation
has finished | entailment |
def remove(self, hostport):
"""Delete the Peer for the given host port.
Does nothing if a matching Peer does not exist.
:returns: The removed Peer
"""
assert hostport, "hostport is required"
peer = self._peers.pop(hostport, None)
peer_in_heap = peer and peer.index != -1
if peer_in_heap:
self.peer_heap.remove_peer(peer)
return peer | Delete the Peer for the given host port.
Does nothing if a matching Peer does not exist.
:returns: The removed Peer | entailment |
def get(self, hostport):
"""Get a Peer for the given destination.
A new Peer is added to the peer heap and returned if one does
not already exist for the given host-port. Otherwise, the
existing Peer is returned.
"""
assert hostport, "hostport is required"
assert isinstance(hostport, basestring), "hostport must be a string"
if hostport not in self._peers:
self._add(hostport)
return self._peers[hostport] | Get a Peer for the given destination.
A new Peer is added to the peer heap and returned if one does
not already exist for the given host-port. Otherwise, the
existing Peer is returned. | entailment |
def _add(self, hostport):
"""Creates a peer from the hostport and adds it to the peer heap"""
peer = self.peer_class(
tchannel=self.tchannel,
hostport=hostport,
on_conn_change=self._update_heap,
)
peer.rank = self.rank_calculator.get_rank(peer)
self._peers[peer.hostport] = peer
self.peer_heap.add_and_shuffle(peer) | Creates a peer from the hostport and adds it to the peer heap | entailment |
def _update_heap(self, peer):
"""Recalculate the peer's rank and update itself in the peer heap."""
rank = self.rank_calculator.get_rank(peer)
if rank == peer.rank:
return
peer.rank = rank
self.peer_heap.update_peer(peer) | Recalculate the peer's rank and update itself in the peer heap. | entailment |
def _get_isolated(self, hostport):
"""Get a Peer for the given destination for a request.
A new Peer is added and returned if one does not already exist for the
given host-port. Otherwise, the existing Peer is returned.
**NOTE** new peers will not be added to the peer heap.
"""
assert hostport, "hostport is required"
if hostport not in self._peers:
# Add a peer directly from a hostport, do NOT add it to the peer
# heap
peer = self.peer_class(
tchannel=self.tchannel,
hostport=hostport,
)
self._peers[peer.hostport] = peer
return self._peers[hostport] | Get a Peer for the given destination for a request.
A new Peer is added and returned if one does not already exist for the
given host-port. Otherwise, the existing Peer is returned.
**NOTE** new peers will not be added to the peer heap. | entailment |
def request(self, service, hostport=None, **kwargs):
"""Initiate a new request through this PeerGroup.
:param hostport:
If specified, requests will be sent to the specific host.
Otherwise, a known peer will be picked at random.
:param service:
Name of the service being called. Defaults to an empty string.
"""
return PeerClientOperation(
peer_group=self,
service=service,
hostport=hostport,
**kwargs) | Initiate a new request through this PeerGroup.
:param hostport:
If specified, requests will be sent to the specific host.
Otherwise, a known peer will be picked at random.
:param service:
Name of the service being called. Defaults to an empty string. | entailment |
def choose(self, hostport=None, blacklist=None):
"""Choose a Peer that matches the given criteria.
:param hostport:
Specifies that the returned Peer must be for the given host-port.
Without this, all peers managed by this PeerGroup are
candidates.
:param blacklist:
Peers on the blacklist won't be chosen.
:returns:
A Peer that matches all the requested criteria or None if no such
Peer was found.
"""
blacklist = blacklist or set()
if hostport:
return self._get_isolated(hostport)
return self.peer_heap.smallest_peer(
(lambda p: p.hostport not in blacklist and not p.is_ephemeral),
) | Choose a Peer that matches the given criteria.
:param hostport:
Specifies that the returned Peer must be for the given host-port.
Without this, all peers managed by this PeerGroup are
candidates.
:param blacklist:
Peers on the blacklist won't be chosen.
:returns:
A Peer that matches all the requested criteria or None if no such
Peer was found. | entailment |
def fail_to(future):
"""A decorator for function callbacks to catch uncaught non-async
exceptions and forward them to the given future.
The primary use for this is to catch exceptions in async callbacks and
propagate them to futures. For example, consider,
.. code-block:: python
answer = Future()
def on_done(future):
foo = bar()
answer.set_result(foo)
some_async_operation().add_done_callback(on_done)
If ``bar()`` fails, ``answer`` will never get filled with an exception or
a result. Now if we change ``on_done`` to,
.. code-block:: python
@fail_to(answer)
def on_done(future):
foo = bar()
answer.set_result(foo)
Uncaught exceptions in ``on_done`` will be caught and propagated to
``answer``. Note that ``on_done`` will return None if an exception was
caught.
:param answer:
Future to which the result will be written.
"""
assert is_future(future), 'you forgot to pass a future'
def decorator(f):
@wraps(f)
def new_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
future.set_exc_info(sys.exc_info())
return new_f
return decorator | A decorator for function callbacks to catch uncaught non-async
exceptions and forward them to the given future.
The primary use for this is to catch exceptions in async callbacks and
propagate them to futures. For example, consider,
.. code-block:: python
answer = Future()
def on_done(future):
foo = bar()
answer.set_result(foo)
some_async_operation().add_done_callback(on_done)
If ``bar()`` fails, ``answer`` will never get filled with an exception or
a result. Now if we change ``on_done`` to,
.. code-block:: python
@fail_to(answer)
def on_done(future):
foo = bar()
answer.set_result(foo)
Uncaught exceptions in ``on_done`` will be caught and propagated to
``answer``. Note that ``on_done`` will return None if an exception was
caught.
:param answer:
Future to which the result will be written. | entailment |
def get_arg(context, index):
"""get value from arg stream in async way"""
if index < len(context.argstreams):
arg = ""
chunk = yield context.argstreams[index].read()
while chunk:
arg += chunk
chunk = yield context.argstreams[index].read()
raise tornado.gen.Return(arg)
else:
raise TChannelError() | get value from arg stream in async way | entailment |
def put(self, value):
"""Puts an item into the queue.
Returns a Future that resolves to None once the value has been
accepted by the queue.
"""
io_loop = IOLoop.current()
new_hole = Future()
new_put = Future()
new_put.set_result(new_hole)
with self._lock:
self._put, put = new_put, self._put
answer = Future()
def _on_put(future):
if future.exception(): # pragma: no cover (never happens)
return answer.set_exc_info(future.exc_info())
old_hole = put.result()
old_hole.set_result(Node(value, new_hole))
answer.set_result(None)
io_loop.add_future(put, _on_put)
return answer | Puts an item into the queue.
Returns a Future that resolves to None once the value has been
accepted by the queue. | entailment |
def get_nowait(self):
"""Returns a value from the queue without waiting.
Raises ``QueueEmpty`` if no values are available right now.
"""
new_get = Future()
with self._lock:
if not self._get.done():
raise QueueEmpty
get, self._get = self._get, new_get
hole = get.result()
if not hole.done():
# Restore the unfinished hole.
new_get.set_result(hole)
raise QueueEmpty
node = hole.result()
value = node.value
new_hole, node.next = node.next, None
new_get.set_result(new_hole)
return value | Returns a value from the queue without waiting.
Raises ``QueueEmpty`` if no values are available right now. | entailment |
def get(self):
"""Gets the next item from the queue.
Returns a Future that resolves to the next item once it is available.
"""
io_loop = IOLoop.current()
new_get = Future()
with self._lock:
get, self._get = self._get, new_get
answer = Future()
def _on_node(future):
if future.exception(): # pragma: no cover (never happens)
return answer.set_exc_info(future.exc_info())
node = future.result()
value = node.value
new_hole, node.next = node.next, None
new_get.set_result(new_hole)
answer.set_result(value)
def _on_get(future):
if future.exception(): # pragma: no cover (never happens)
return answer.set_exc_info(future.exc_info())
hole = future.result()
io_loop.add_future(hole, _on_node)
io_loop.add_future(get, _on_get)
return answer | Gets the next item from the queue.
Returns a Future that resolves to the next item once it is available. | entailment |
def fragment(self, space_left, fragment_msg):
"""Streaming Message got fragmented based on
payload size. All the data within space_left
will be kept. All the rest will be shifted to
next fragment message.
:param space_left:
space left for current frame
:param fragment_msg:
the type is either CallRequestMessage or
CallResponseMessage
:return: None if there is space left
or next fragment message
"""
new_args = []
key_length = 2 # 2bytes for size
for i, arg in enumerate(self.args):
if space_left >= key_length:
space_left -= key_length
if arg is not None:
arg_length = len(arg)
if space_left < arg_length:
fragment_msg.args.append(arg[space_left:])
new_args.append(arg[:space_left])
space_left = 0
else:
new_args.append(arg)
space_left -= arg_length
if space_left <= key_length:
# boundary for arg
fragment_msg.args.append("")
else:
new_args.append("")
else:
for l in range(i, len(self.args)):
fragment_msg.args.append(self.args[l])
break
self.args = new_args
if space_left >= 0 and len(fragment_msg.args) == 0:
# don't need to fragment any more
return None
else:
self.flags = FlagsType.fragment
fragment_msg.id = self.id
return fragment_msg | Streaming Message got fragmented based on
payload size. All the data within space_left
will be kept. All the rest will be shifted to
next fragment message.
:param space_left:
space left for current frame
:param fragment_msg:
the type is either CallRequestMessage or
CallResponseMessage
:return: None if there is space left
or next fragment message | entailment |
def response_from_mixed(mixed):
"""Create Response from mixed input."""
# if none then give empty Response
if mixed is None:
return Response()
# if not Response, then treat like body
if not isinstance(mixed, Response):
return Response(mixed)
# it's already a Response
return mixed | Create Response from mixed input. | entailment |
def register_hook(self, hook, event_type=None):
"""
If ``event_type`` is provided, then ``hook`` will be called whenever
that event is fired.
If no ``event_type`` is specifid, but ``hook`` implements any methods
with names matching an event hook, then those will be registered with
their corresponding events. This allows for more stateful, class-based
event handlers.
"""
if event_type is not None:
assert type(event_type) is int, "register hooks with int values"
return self.hooks[event_type].append(hook)
for event_type in EventType._fields:
func = getattr(hook, event_type, None)
if callable(func):
event_value = getattr(EventType, event_type)
self.register_hook(func, event_value) | If ``event_type`` is provided, then ``hook`` will be called whenever
that event is fired.
If no ``event_type`` is specifid, but ``hook`` implements any methods
with names matching an event hook, then those will be registered with
their corresponding events. This allows for more stateful, class-based
event handlers. | entailment |
def init(h):
"""Initialize existing object into the heap."""
# heapify
n = h.size()
for i in six.moves.range(int(math.floor(n/2)) - 1, -1, -1):
down(h, i, n) | Initialize existing object into the heap. | entailment |
def push(h, x):
"""Push a new value into heap."""
h.push(x)
up(h, h.size()-1) | Push a new value into heap. | entailment |
def pop(h):
"""Pop the heap value from the heap."""
n = h.size() - 1
h.swap(0, n)
down(h, 0, n)
return h.pop() | Pop the heap value from the heap. | entailment |
def remove(h, i):
"""Remove the item at position i of the heap."""
n = h.size() - 1
if n != i:
h.swap(i, n)
down(h, i, n)
up(h, i)
return h.pop() | Remove the item at position i of the heap. | entailment |
def fix(h, i):
"""Rearrange the heap after the item at position i got updated."""
down(h, i, h.size())
up(h, i) | Rearrange the heap after the item at position i got updated. | entailment |
def smallest(heap, predicate):
"""Finds the index of the smallest item in the heap that matches the given
predicate.
:param heap:
Heap on which this search is being performed.
:param predicate:
Function that accepts an item from the heap and returns true or false.
:returns:
Index of the first item for which ``predicate`` returned true.
:raises NoMatchError:
If no matching items were found.
"""
n = heap.size()
# items contains indexes of items yet to be checked.
items = deque([0])
while items:
current = items.popleft()
if current >= n:
continue
if predicate(heap.peek(current)):
return current
child1 = 2 * current + 1
child2 = child1 + 1
if child1 < n and child2 < n and heap.lt(child2, child1):
# make sure we check the smaller child first.
child1, child2 = child2, child1
if child1 < n:
items.append(child1)
if child2 < n:
items.append(child2)
raise NoMatchError() | Finds the index of the smallest item in the heap that matches the given
predicate.
:param heap:
Heap on which this search is being performed.
:param predicate:
Function that accepts an item from the heap and returns true or false.
:returns:
Index of the first item for which ``predicate`` returned true.
:raises NoMatchError:
If no matching items were found. | entailment |
def span_to_tracing_field(span):
"""
Inject the span into Trace field, if Zipkin format is supported
:param span: OpenTracing Span
"""
if span is None:
return common.random_tracing()
# noinspection PyBroadException
try:
carrier = {}
span.tracer.inject(span, ZIPKIN_SPAN_FORMAT, carrier)
tracing = Tracing(span_id=carrier['span_id'],
trace_id=carrier['trace_id'],
parent_id=carrier['parent_id'] or 0L,
traceflags=carrier['traceflags'])
return tracing
except opentracing.UnsupportedFormatException:
pass # tracer might not support Zipkin format
except:
log.exception('Failed to inject tracing span into headers')
return common.random_tracing() | Inject the span into Trace field, if Zipkin format is supported
:param span: OpenTracing Span | entailment |
def apply_trace_flag(span, trace, default_trace):
"""
If ``trace`` (or ``default_trace``) is False, disables tracing on ``span``.
:param span:
:param trace:
:param default_trace:
:return:
"""
if trace is None:
trace = default_trace
trace = trace() if callable(trace) else trace
if trace is False and span:
span.set_tag(tags.SAMPLING_PRIORITY, 0) | If ``trace`` (or ``default_trace``) is False, disables tracing on ``span``.
:param span:
:param trace:
:param default_trace:
:return: | entailment |
def start_basic_span(self, request):
"""
Start tracing span from the protocol's `tracing` fields.
This will only work if the `tracer` supports Zipkin-style span context.
:param request: inbound request
:type request: tchannel.tornado.request.Request
"""
# noinspection PyBroadException
try:
# Currently Java does not populate Tracing field, so do not
# mistaken it for a real trace ID.
if request.tracing.trace_id:
context = self.tracer.extract(
format=ZIPKIN_SPAN_FORMAT,
carrier=request.tracing)
self.span = self.tracer.start_span(
operation_name=request.endpoint,
child_of=context,
tags={tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER},
)
except opentracing.UnsupportedFormatException:
pass # tracer might not support Zipkin format
except:
log.exception('Cannot extract tracing span from Trace field') | Start tracing span from the protocol's `tracing` fields.
This will only work if the `tracer` supports Zipkin-style span context.
:param request: inbound request
:type request: tchannel.tornado.request.Request | entailment |
def start_span(self, request, headers, peer_host, peer_port):
"""
Start a new server-side span. If the span has already been started
by `start_basic_span`, this method only adds baggage from the headers.
:param request: inbound tchannel.tornado.request.Request
:param headers: dictionary containing parsed application headers
:return:
"""
parent_context = None
# noinspection PyBroadException
try:
if headers and hasattr(headers, 'iteritems'):
tracing_headers = {
k[len(TRACING_KEY_PREFIX):]: v
for k, v in headers.iteritems()
if k.startswith(TRACING_KEY_PREFIX)
}
parent_context = self.tracer.extract(
format=opentracing.Format.TEXT_MAP,
carrier=tracing_headers
)
if self.span and parent_context:
# we already started a span from Tracing fields,
# so only copy baggage from the headers.
for k, v in parent_context.baggage.iteritems():
self.span.set_baggage_item(k, v)
except:
log.exception('Cannot extract tracing span from headers')
if self.span is None:
self.span = self.tracer.start_span(
operation_name=request.endpoint,
child_of=parent_context,
tags={tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER},
)
if 'cn' in request.headers:
self.span.set_tag(tags.PEER_SERVICE, request.headers['cn'])
if peer_host:
self.span.set_tag(tags.PEER_HOST_IPV4, peer_host)
if peer_port:
self.span.set_tag(tags.PEER_PORT, peer_port)
if 'as' in request.headers:
self.span.set_tag('as', request.headers['as'])
return self.span | Start a new server-side span. If the span has already been started
by `start_basic_span`, this method only adds baggage from the headers.
:param request: inbound tchannel.tornado.request.Request
:param headers: dictionary containing parsed application headers
:return: | entailment |
def from_code(cls, code, **kw):
"""Construct a ``TChannelError`` instance from an error code.
This will return the appropriate class type for the given code.
"""
return {
TIMEOUT: TimeoutError,
CANCELED: CanceledError,
BUSY: BusyError,
DECLINED: DeclinedError,
UNEXPECTED_ERROR: UnexpectedError,
BAD_REQUEST: BadRequestError,
NETWORK_ERROR: NetworkError,
UNHEALTHY: UnhealthyError,
FATAL: FatalProtocolError,
}[code](**kw) | Construct a ``TChannelError`` instance from an error code.
This will return the appropriate class type for the given code. | entailment |
def serve():
"""main entry point"""
logging.getLogger().setLevel(logging.DEBUG)
logging.info('Python Tornado Crossdock Server Starting ...')
tracer = Tracer(
service_name='python',
reporter=NullReporter(),
sampler=ConstSampler(decision=True))
opentracing.tracer = tracer
tchannel = TChannel(name='python', hostport=':%d' % DEFAULT_SERVER_PORT,
trace=True)
register_tchannel_handlers(tchannel=tchannel)
tchannel.listen()
app = tornado.web.Application(debug=True)
register_http_handlers(app)
app.listen(DEFAULT_CLIENT_PORT)
tornado.ioloop.IOLoop.current().start() | main entry point | entailment |
def register(dispatcher, service_module, handler, method=None, service=None):
"""Registers a Thrift service method with the given RequestDispatcher.
.. code-block:: python
# For,
#
# service HelloWorld { string hello(1: string name); }
import tchannel.thrift
import HelloWorld
def hello(request, response):
name = request.args.name
response.write_result("Hello, %s" % name)
dispatcher = RequestDispatcher()
tchannel.thrift.register(dispatcher, HelloWorld, hello)
:param dispatcher:
TChannel dispatcher with which the Thrift service will be registered.
:param service_module:
The service module generated by Thrift. This module contains the
service ``Iface``, ``Client``, ``Processor``, etc. classes.
:param handler:
A function implementing the request handler. The function must accept
a ``request``, a ``response``, and a ``tchannel``.
:param service:
Thrift service name. This is the `service` name specified in the
Thrift IDL. If omitted, it is automatically determined based on the
name of ``service_module``.
:param method:
Name of the method. Defaults to the name of the ``handler`` function.
"""
if not service:
service = service_module.__name__.rsplit('.', 1)[-1]
if not method:
method = handler.__name__
assert service, 'A service name could not be determined'
assert method, 'A method name could not be determined'
assert hasattr(service_module.Iface, method), (
"Service %s doesn't define method %s" % (service, method)
)
assert hasattr(service_module, method + '_result'), (
"oneway methods are not yet supported"
)
endpoint = '%s::%s' % (service, method)
args_type = getattr(service_module, method + '_args')
result_type = getattr(service_module, method + '_result')
# if the dispatcher is set to deal with handlers that
# return responses, then use new api, else use deprecated
if dispatcher._handler_returns_response:
new_handler = build_handler(result_type, handler)
else:
new_handler = deprecated_build_handler(result_type, handler)
dispatcher.register(
endpoint,
new_handler,
ThriftSerializer(args_type),
ThriftSerializer(result_type)
)
return handler | Registers a Thrift service method with the given RequestDispatcher.
.. code-block:: python
# For,
#
# service HelloWorld { string hello(1: string name); }
import tchannel.thrift
import HelloWorld
def hello(request, response):
name = request.args.name
response.write_result("Hello, %s" % name)
dispatcher = RequestDispatcher()
tchannel.thrift.register(dispatcher, HelloWorld, hello)
:param dispatcher:
TChannel dispatcher with which the Thrift service will be registered.
:param service_module:
The service module generated by Thrift. This module contains the
service ``Iface``, ``Client``, ``Processor``, etc. classes.
:param handler:
A function implementing the request handler. The function must accept
a ``request``, a ``response``, and a ``tchannel``.
:param service:
Thrift service name. This is the `service` name specified in the
Thrift IDL. If omitted, it is automatically determined based on the
name of ``service_module``.
:param method:
Name of the method. Defaults to the name of the ``handler`` function. | entailment |
def write_result(self, result):
"""Send back the result of this call.
Only one of this and `write_exc_info` may be called.
:param result:
Return value of the call
"""
assert not self.finished, "Already sent a response"
if not self.result.thrift_spec:
self.finished = True
return
spec = self.result.thrift_spec[0]
if result is not None:
assert spec, "Tried to return a result for a void method."
setattr(self.result, spec[2], result)
self.finished = True | Send back the result of this call.
Only one of this and `write_exc_info` may be called.
:param result:
Return value of the call | entailment |
def write_exc_info(self, exc_info=None):
"""Write exception information to the response.
Only one of this and ``write_result`` may be called.
:param exc_info:
3-tuple of exception information. If omitted, the last exception
will be retrieved using ``sys.exc_info()``.
"""
exc_info = exc_info or sys.exc_info()
exc = exc_info[1]
self.code = StatusCode.error
for spec in self.result.thrift_spec[1:]:
if spec and isinstance(exc, spec[3][0]):
assert not self.finished, "Already sent a response"
setattr(self.result, spec[2], exc)
self.finished = True
return
# Re-raise the exception (with the same traceback) if it didn't match.
raise exc_info[0], exc_info[1], exc_info[2] | Write exception information to the response.
Only one of this and ``write_result`` may be called.
:param exc_info:
3-tuple of exception information. If omitted, the last exception
will be retrieved using ``sys.exc_info()``. | entailment |
def call(
self,
scheme,
service,
arg1,
arg2=None,
arg3=None,
timeout=None,
retry_on=None,
retry_limit=None,
routing_delegate=None,
hostport=None,
shard_key=None,
tracing_span=None,
trace=None, # to trace or not, defaults to self._dep_tchannel.trace
caller_name=None,
):
"""Make low-level requests to TChannel services.
**Note:** Usually you would interact with a higher-level arg scheme
like :py:class:`tchannel.schemes.JsonArgScheme` or
:py:class:`tchannel.schemes.ThriftArgScheme`.
"""
# TODO - don't use asserts for public API
assert format, "format is required"
assert service, "service is required"
assert arg1, "arg1 is required"
# default args
if arg2 is None:
arg2 = ""
if arg3 is None:
arg3 = ""
if timeout is None:
timeout = DEFAULT_TIMEOUT
if retry_on is None:
retry_on = retry.DEFAULT
if retry_limit is None:
retry_limit = retry.DEFAULT_RETRY_LIMIT
# TODO - allow filters/steps for serialization, tracing, etc...
tracing.apply_trace_flag(tracing_span, trace, self._dep_tchannel.trace)
# calls tchannel.tornado.peer.PeerClientOperation.__init__
operation = self._dep_tchannel.request(
service=service,
hostport=hostport,
arg_scheme=scheme,
retry=retry_on,
tracing_span=tracing_span
)
# fire operation
transport_headers = {
transport.SCHEME: scheme,
transport.CALLER_NAME: caller_name or self.name,
}
if shard_key:
transport_headers[transport.SHARD_KEY] = shard_key
if routing_delegate:
transport_headers[transport.ROUTING_DELEGATE] = routing_delegate
response = yield operation.send(
arg1=arg1,
arg2=arg2,
arg3=arg3,
headers=transport_headers,
retry_limit=retry_limit,
ttl=timeout,
)
# unwrap response
body = yield response.get_body()
headers = yield response.get_header()
t = TransportHeaders.from_dict(response.headers)
result = Response(
body=body,
headers=headers,
transport=t,
status=response.code,
)
raise gen.Return(result) | Make low-level requests to TChannel services.
**Note:** Usually you would interact with a higher-level arg scheme
like :py:class:`tchannel.schemes.JsonArgScheme` or
:py:class:`tchannel.schemes.ThriftArgScheme`. | entailment |
def advertise(self, routers=None, name=None, timeout=None,
router_file=None, jitter=None):
"""Advertise with Hyperbahn.
After a successful advertisement, Hyperbahn will establish long-lived
connections with your application. These connections are used to load
balance inbound and outbound requests to other applications on the
Hyperbahn network.
Re-advertisement happens periodically after calling this method (every
minute). Hyperbahn will eject us from the network if it doesn't get a
re-advertise from us after 5 minutes.
This function may be called multiple times if it fails. If it
succeeds, all consecutive calls are ignored.
:param list routers:
A seed list of known Hyperbahn addresses to attempt contact with.
Entries should be of the form ``"host:port"``.
:param string name:
The name your application identifies itself as. This is usually
unneeded because in the common case it will match the ``name`` you
initialized the ``TChannel`` instance with. This is the identifier
other services will use to make contact with you.
:param timeout:
The timeout (in sec) for the initial advertise attempt.
Defaults to 30 seconds.
:param jitter:
Variance allowed in the interval per request. Defaults to 5
seconds. The jitter applies to the initial advertise request as
well.
:param router_file:
The host file that contains the routers information. The file
should contain a JSON stringified format of the routers parameter.
Either routers or router_file should be provided. If both provided,
a ValueError will be raised.
:returns:
A future that resolves to the remote server's response after the
first advertise finishes.
:raises TimeoutError:
When unable to make our first advertise request to Hyperbahn.
Subsequent requests may fail but will be ignored.
"""
if routers is not None and router_file is not None:
raise ValueError(
'Only one of routers and router_file can be provided.')
if routers is None and router_file is not None:
# should just let the exceptions fly
try:
with open(router_file, 'r') as json_data:
routers = json.load(json_data)
except (IOError, OSError, ValueError):
log.exception('Failed to read seed routers list.')
raise
@gen.coroutine
def _advertise():
result = yield self._dep_tchannel.advertise(
routers=routers,
name=name,
timeout=timeout,
)
body = yield result.get_body()
headers = yield result.get_header()
response = Response(json.loads(body), headers or {})
raise gen.Return(response)
def _on_advertise(future):
if not future.exception():
return
# If the request failed, clear the response so that we can try
# again.
with self._advertise_lock:
# `is` comparison to ensure we're not deleting another Future.
if self._advertise_response is future:
self._advertise_response = None
with self._advertise_lock:
if self._advertise_response is not None:
return self._advertise_response
future = self._advertise_response = _advertise()
# We call add_done_callback here rather than when we call _advertise()
# because if the future has already resolved by the time we call
# add_done_callback, the callback will immediately be executed. The
# callback will try to acquire the advertise_lock which we already
# hold and end up in a deadlock.
future.add_done_callback(_on_advertise)
return future | Advertise with Hyperbahn.
After a successful advertisement, Hyperbahn will establish long-lived
connections with your application. These connections are used to load
balance inbound and outbound requests to other applications on the
Hyperbahn network.
Re-advertisement happens periodically after calling this method (every
minute). Hyperbahn will eject us from the network if it doesn't get a
re-advertise from us after 5 minutes.
This function may be called multiple times if it fails. If it
succeeds, all consecutive calls are ignored.
:param list routers:
A seed list of known Hyperbahn addresses to attempt contact with.
Entries should be of the form ``"host:port"``.
:param string name:
The name your application identifies itself as. This is usually
unneeded because in the common case it will match the ``name`` you
initialized the ``TChannel`` instance with. This is the identifier
other services will use to make contact with you.
:param timeout:
The timeout (in sec) for the initial advertise attempt.
Defaults to 30 seconds.
:param jitter:
Variance allowed in the interval per request. Defaults to 5
seconds. The jitter applies to the initial advertise request as
well.
:param router_file:
The host file that contains the routers information. The file
should contain a JSON stringified format of the routers parameter.
Either routers or router_file should be provided. If both provided,
a ValueError will be raised.
:returns:
A future that resolves to the remote server's response after the
first advertise finishes.
:raises TimeoutError:
When unable to make our first advertise request to Hyperbahn.
Subsequent requests may fail but will be ignored. | entailment |
def thrift_request_builder(service, thrift_module, hostport=None,
thrift_class_name=None):
"""Provide TChannel compatibility with Thrift-generated modules.
The service this creates is meant to be used with TChannel like so:
.. code-block:: python
from tchannel import TChannel, thrift_request_builder
from some_other_service_thrift import some_other_service
tchannel = TChannel('my-service')
some_service = thrift_request_builder(
service='some-other-service',
thrift_module=some_other_service
)
resp = tchannel.thrift(
some_service.fetchPotatoes()
)
.. deprecated:: 0.18.0
Please switch to :py:func:`tchannel.thrift.load`.
.. warning::
This API is deprecated and will be removed in a future version.
:param string service:
Name of Thrift service to call. This is used internally for
grouping and stats, but also to route requests over Hyperbahn.
:param thrift_module:
The top-level module of the Apache Thrift generated code for
the service that will be called.
:param string hostport:
When calling the Thrift service directly, and not over Hyperbahn,
this 'host:port' value should be provided.
:param string thrift_class_name:
When the Apache Thrift generated Iface class name does not match
thrift_module, then this should be provided.
"""
# start with a request maker instance
maker = ThriftRequestMaker(
service=service,
thrift_module=thrift_module,
hostport=hostport,
thrift_class_name=thrift_class_name
)
# create methods that mirror thrift client
# and each return ThriftRequest
methods = _create_methods(thrift_module)
# then attach to instane
for name, method in methods.iteritems():
method = types.MethodType(method, maker, ThriftRequestMaker)
setattr(maker, name, method)
return maker | Provide TChannel compatibility with Thrift-generated modules.
The service this creates is meant to be used with TChannel like so:
.. code-block:: python
from tchannel import TChannel, thrift_request_builder
from some_other_service_thrift import some_other_service
tchannel = TChannel('my-service')
some_service = thrift_request_builder(
service='some-other-service',
thrift_module=some_other_service
)
resp = tchannel.thrift(
some_service.fetchPotatoes()
)
.. deprecated:: 0.18.0
Please switch to :py:func:`tchannel.thrift.load`.
.. warning::
This API is deprecated and will be removed in a future version.
:param string service:
Name of Thrift service to call. This is used internally for
grouping and stats, but also to route requests over Hyperbahn.
:param thrift_module:
The top-level module of the Apache Thrift generated code for
the service that will be called.
:param string hostport:
When calling the Thrift service directly, and not over Hyperbahn,
this 'host:port' value should be provided.
:param string thrift_class_name:
When the Apache Thrift generated Iface class name does not match
thrift_module, then this should be provided. | entailment |
def read_body(self, body):
"""Handles the response body for this request.
If the response body includes a result, returns the result unwrapped
from the response union. If the response contains an exception, raises
that exception.
"""
result_spec = self.result_type.thrift_spec
# raise application exception, if present
for exc_spec in result_spec[1:]:
exc = getattr(body, exc_spec[2])
if exc is not None:
raise exc
# success - non-void
if len(result_spec) >= 1 and result_spec[0] is not None:
# value expected, but got none
# TODO - server side should use this same logic
if body.success is None:
raise ValueExpectedError(
'Expected a value to be returned for %s, '
'but recieved None - only void procedures can '
'return None.' % self.endpoint
)
return body.success
# success - void
else:
return None | Handles the response body for this request.
If the response body includes a result, returns the result unwrapped
from the response union. If the response contains an exception, raises
that exception. | entailment |
def get_rank(self, peer):
"""Calculate the peer rank based on connections.
If the peer has no incoming connections, it will have largest rank.
In our peer selection strategy, the largest number has least priority
in the heap.
If the peer has incoming connections, we will return number of outbound
pending requests and responses.
:param peer: instance of `tchannel.tornado.peer.Peer`
:return: rank of the peer
"""
if not peer.connections:
return self.TIERS[0]
if not peer.has_incoming_connections:
return self.TIERS[1] + peer.total_outbound_pendings
return self.TIERS[2] + peer.total_outbound_pendings | Calculate the peer rank based on connections.
If the peer has no incoming connections, it will have largest rank.
In our peer selection strategy, the largest number has least priority
in the heap.
If the peer has incoming connections, we will return number of outbound
pending requests and responses.
:param peer: instance of `tchannel.tornado.peer.Peer`
:return: rank of the peer | entailment |
def prepare(cls, *args, **kwargs):
"""Set arguments to be used when instantiating a TChannel instance.
Arguments are the same as :py:meth:`tchannel.TChannel.__init__`.
"""
cls.args = args
cls.kwargs = kwargs
cls.prepared = True | Set arguments to be used when instantiating a TChannel instance.
Arguments are the same as :py:meth:`tchannel.TChannel.__init__`. | entailment |
def reset(cls, *args, **kwargs):
"""Undo call to prepare, useful for testing."""
cls.local.tchannel = None
cls.args = None
cls.kwargs = None
cls.prepared = False | Undo call to prepare, useful for testing. | entailment |
def get_instance(cls):
"""Get a configured, thread-safe, singleton TChannel instance.
:returns tchannel.TChannel:
"""
if not cls.prepared:
raise SingletonNotPreparedError(
"prepare must be called before get_instance"
)
if hasattr(cls.local, 'tchannel') and cls.local.tchannel is not None:
return cls.local.tchannel
cls.local.tchannel = cls.tchannel_cls(*cls.args, **cls.kwargs)
return cls.local.tchannel | Get a configured, thread-safe, singleton TChannel instance.
:returns tchannel.TChannel: | entailment |
def handle_pre_call(self, message, connection):
"""Handle incoming request message including CallRequestMessage and
CallRequestContinueMessage
This method will build the User friendly request object based on the
incoming messages.
It passes all the messages into the message_factory to build the init
request object. Only when it get a CallRequestMessage and a completed
arg_1=argstream[0], the message_factory will return a request object.
Then it will trigger the async handle_call method.
:param message: CallRequestMessage or CallRequestContinueMessage
:param connection: tornado connection
"""
req = None
try:
req = connection.request_message_factory.build(message)
# message_factory will create Request only when it receives
# CallRequestMessage. It will return None, if it receives
# CallRequestContinueMessage.
if req:
self.handle_call(req, connection)
except TChannelError as e:
log.warn('Received a bad request.', exc_info=True)
if req:
e.tracing = req.tracing
connection.send_error(e) | Handle incoming request message including CallRequestMessage and
CallRequestContinueMessage
This method will build the User friendly request object based on the
incoming messages.
It passes all the messages into the message_factory to build the init
request object. Only when it get a CallRequestMessage and a completed
arg_1=argstream[0], the message_factory will return a request object.
Then it will trigger the async handle_call method.
:param message: CallRequestMessage or CallRequestContinueMessage
:param connection: tornado connection | entailment |
def register(
self,
rule,
handler,
req_serializer=None,
resp_serializer=None
):
"""Register a new endpoint with the given name.
.. code-block:: python
@dispatcher.register('is_healthy')
def check_health(request, response):
# ...
:param rule:
Name of the endpoint. Incoming Call Requests must have this as
``arg1`` to dispatch to this handler.
If ``RequestHandler.FALLBACK`` is specified as a rule, the given
handler will be used as the 'fallback' handler when requests don't
match any registered rules.
:param handler:
A function that gets called with ``Request`` and ``Response``.
:param req_serializer:
Arg scheme serializer of this endpoint. It should be
``RawSerializer``, ``JsonSerializer``, and ``ThriftSerializer``.
:param resp_serializer:
Arg scheme serializer of this endpoint. It should be
``RawSerializer``, ``JsonSerializer``, and ``ThriftSerializer``.
"""
assert handler, "handler must not be None"
req_serializer = req_serializer or RawSerializer()
resp_serializer = resp_serializer or RawSerializer()
self.handlers[rule] = Handler(handler, req_serializer, resp_serializer) | Register a new endpoint with the given name.
.. code-block:: python
@dispatcher.register('is_healthy')
def check_health(request, response):
# ...
:param rule:
Name of the endpoint. Incoming Call Requests must have this as
``arg1`` to dispatch to this handler.
If ``RequestHandler.FALLBACK`` is specified as a rule, the given
handler will be used as the 'fallback' handler when requests don't
match any registered rules.
:param handler:
A function that gets called with ``Request`` and ``Response``.
:param req_serializer:
Arg scheme serializer of this endpoint. It should be
``RawSerializer``, ``JsonSerializer``, and ``ThriftSerializer``.
:param resp_serializer:
Arg scheme serializer of this endpoint. It should be
``RawSerializer``, ``JsonSerializer``, and ``ThriftSerializer``. | entailment |
def random_tracing():
"""
Create new Tracing() tuple with random IDs.
"""
new_id = _uniq_id()
return Tracing(
span_id=new_id,
parent_id=0,
trace_id=new_id,
traceflags=0) | Create new Tracing() tuple with random IDs. | entailment |
def generate_checksum(message, previous_csum=0):
"""Generate checksum for messages with
CALL_REQ, CALL_REQ_CONTINUE,
CALL_RES,CALL_RES_CONTINUE types.
:param message: outgoing message
:param previous_csum: accumulated checksum value
"""
if message.message_type in CHECKSUM_MSG_TYPES:
csum = compute_checksum(
message.checksum[0],
message.args,
previous_csum,
)
message.checksum = (message.checksum[0], csum) | Generate checksum for messages with
CALL_REQ, CALL_REQ_CONTINUE,
CALL_RES,CALL_RES_CONTINUE types.
:param message: outgoing message
:param previous_csum: accumulated checksum value | entailment |
def verify_checksum(message, previous_csum=0):
"""Verify checksum for incoming message.
:param message: incoming message
:param previous_csum: accumulated checksum value
:return return True if message checksum type is None
or checksum is correct
"""
if message.message_type in CHECKSUM_MSG_TYPES:
csum = compute_checksum(
message.checksum[0],
message.args,
previous_csum,
)
if csum == message.checksum[1]:
return True
else:
return False
else:
return True | Verify checksum for incoming message.
:param message: incoming message
:param previous_csum: accumulated checksum value
:return return True if message checksum type is None
or checksum is correct | entailment |
def advertise(tchannel, service, routers=None, timeout=None, router_file=None,
jitter=None):
"""Advertise with Hyperbahn.
See :py:class:`tchannel.TChannel.advertise`.
"""
timeout = timeout or FIRST_ADVERTISE_TIME
if routers is not None and router_file is not None:
raise ValueError(
'Only one of routers and router_file can be provided.')
if routers is None and router_file is not None:
# should just let the exceptions fly
try:
with open(router_file, 'r') as json_data:
routers = json.load(json_data)
except (IOError, OSError, ValueError):
log.exception('Failed to read seed routers list.')
raise
for router in routers:
# We use .get here instead of .add because we don't want to fail if a
# TChannel already knows about some of the routers.
tchannel.peers.get(router)
adv = Advertiser(service, tchannel, ttl_secs=timeout,
interval_max_jitter_secs=jitter)
return adv.start() | Advertise with Hyperbahn.
See :py:class:`tchannel.TChannel.advertise`. | entailment |
def start(self):
"""Starts the advertise loop.
Returns the result of the first ad request.
"""
if self.running:
raise Exception('Advertiser is already running')
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
self.running = True
answer = tornado.gen.Future()
self._schedule_ad(0, answer)
return answer | Starts the advertise loop.
Returns the result of the first ad request. | entailment |
def _schedule_ad(self, delay=None, response_future=None):
"""Schedules an ``ad`` request.
:param delay:
Time in seconds to wait before making the ``ad`` request. Defaults
to self.interval_secs. Regardless of value, a jitter of
self.interval_max_jitter_secs is applied to this.
:param response_future:
If non-None, the result of the advertise request is filled into
this future.
"""
if not self.running:
return
if delay is None:
delay = self.interval_secs
delay += random.uniform(0, self.interval_max_jitter_secs)
self._next_ad = self.io_loop.call_later(delay, self._ad,
response_future) | Schedules an ``ad`` request.
:param delay:
Time in seconds to wait before making the ``ad`` request. Defaults
to self.interval_secs. Regardless of value, a jitter of
self.interval_max_jitter_secs is applied to this.
:param response_future:
If non-None, the result of the advertise request is filled into
this future. | entailment |
def add(self, id, ttl_secs):
"""Adds a new request to the Cemetery that is known to have timed out.
The request will be forgotten after ``ttl_secs + ttl_offset_secs``
seconds.
:param id:
ID of the request
:param ttl_secs:
TTL of the request (in seconds)
"""
ttl_secs = min(ttl_secs + self.ttl_offset_secs, self.max_ttl_secs)
self._tombstones[id] = IOLoop.current().call_later(
ttl_secs, self.forget, id,
) | Adds a new request to the Cemetery that is known to have timed out.
The request will be forgotten after ``ttl_secs + ttl_offset_secs``
seconds.
:param id:
ID of the request
:param ttl_secs:
TTL of the request (in seconds) | entailment |
def clear(self):
"""Forget about all requests."""
io_loop = IOLoop.current()
while self._tombstones:
_, req_timeout = self._tombstones.popitem()
io_loop.remove_timeout(req_timeout) | Forget about all requests. | entailment |
def get_header(self):
"""Get the header value from the response.
:return: a future contains the deserialized value of header
"""
raw_header = yield get_arg(self, 1)
if not self.serializer:
raise tornado.gen.Return(raw_header)
else:
header = self.serializer.deserialize_header(raw_header)
raise tornado.gen.Return(header) | Get the header value from the response.
:return: a future contains the deserialized value of header | entailment |
def get_body(self):
"""Get the body value from the response.
:return: a future contains the deserialized value of body
"""
raw_body = yield get_arg(self, 2)
if not self.serializer:
raise tornado.gen.Return(raw_body)
else:
body = self.serializer.deserialize_body(raw_body)
raise tornado.gen.Return(body) | Get the body value from the response.
:return: a future contains the deserialized value of body | entailment |
def set_body_s(self, stream):
"""Set customized body stream.
Note: the body stream can only be changed before the stream
is consumed.
:param stream: InMemStream/PipeStream for body
:except TChannelError:
Raise TChannelError if the stream is being sent when you try
to change the stream.
"""
if self.argstreams[2].state == StreamState.init:
self.argstreams[2] = stream
else:
raise TChannelError(
"Unable to change the body since the streaming has started") | Set customized body stream.
Note: the body stream can only be changed before the stream
is consumed.
:param stream: InMemStream/PipeStream for body
:except TChannelError:
Raise TChannelError if the stream is being sent when you try
to change the stream. | entailment |
def set_header_s(self, stream):
"""Set customized header stream.
Note: the header stream can only be changed before the stream
is consumed.
:param stream: InMemStream/PipeStream for header
:except TChannelError:
Raise TChannelError if the stream is being sent when you try
to change the stream.
"""
if self.argstreams[1].state == StreamState.init:
self.argstreams[1] = stream
else:
raise TChannelError(
"Unable to change the header since the streaming has started") | Set customized header stream.
Note: the header stream can only be changed before the stream
is consumed.
:param stream: InMemStream/PipeStream for header
:except TChannelError:
Raise TChannelError if the stream is being sent when you try
to change the stream. | entailment |
def write_header(self, chunk):
"""Write to header.
Note: the header stream is only available to write before write body.
:param chunk: content to write to header
:except TChannelError:
Raise TChannelError if the response's flush() has been called
"""
if self.serializer:
header = self.serializer.serialize_header(chunk)
else:
header = chunk
if self.flushed:
raise TChannelError("write operation invalid after flush call")
if (self.argstreams[0].state != StreamState.completed and
self.argstreams[0].auto_close):
self.argstreams[0].close()
return self.argstreams[1].write(header) | Write to header.
Note: the header stream is only available to write before write body.
:param chunk: content to write to header
:except TChannelError:
Raise TChannelError if the response's flush() has been called | entailment |
def write_body(self, chunk):
"""Write to header.
Note: whenever write_body is called, the header stream will be closed.
write_header method is unavailable.
:param chunk: content to write to body
:except TChannelError:
Raise TChannelError if the response's flush() has been called
"""
if self.serializer:
body = self.serializer.serialize_body(chunk)
else:
body = chunk
if self.flushed:
raise TChannelError("write operation invalid after flush call")
if (self.argstreams[0].state != StreamState.completed and
self.argstreams[0].auto_close):
self.argstreams[0].close()
if (self.argstreams[1].state != StreamState.completed and
self.argstreams[1].auto_close):
self.argstreams[1].close()
return self.argstreams[2].write(body) | Write to header.
Note: whenever write_body is called, the header stream will be closed.
write_header method is unavailable.
:param chunk: content to write to body
:except TChannelError:
Raise TChannelError if the response's flush() has been called | entailment |
def read_message(stream):
"""Reads a message from the given IOStream.
:param IOStream stream:
IOStream to read from.
"""
answer = tornado.gen.Future()
io_loop = IOLoop.current()
def on_error(future):
log.info('Failed to read data: %s', future.exception())
return answer.set_exc_info(future.exc_info())
@fail_to(answer)
def on_body(size, future):
if future.exception():
return on_error(future)
body = future.result()
f = frame.frame_rw.read(BytesIO(body), size=size)
message_type = f.header.message_type
message_rw = messages.RW.get(message_type)
if not message_rw:
exc = errors.FatalProtocolError(
'Unknown message type %s', str(message_type)
)
return answer.set_exception(exc)
message = message_rw.read(BytesIO(f.payload))
message.id = f.header.message_id
answer.set_result(message)
@fail_to(answer)
def on_read_size(future):
if future.exception():
return answer.set_exc_info(future.exc_info())
size_bytes = future.result()
size = frame.frame_rw.size_rw.read(BytesIO(size_bytes))
io_loop.add_future(
stream.read_bytes(size - FRAME_SIZE_WIDTH),
lambda f: on_body(size, f)
)
try:
# read_bytes may fail if the stream has already been closed
read_size_future = stream.read_bytes(FRAME_SIZE_WIDTH)
except Exception:
answer.set_exc_info(sys.exc_info())
else:
read_size_future.add_done_callback(on_read_size)
return answer | Reads a message from the given IOStream.
:param IOStream stream:
IOStream to read from. | entailment |
def set_close_callback(self, cb):
"""Specify a function to be called when this connection is closed.
:param cb:
A callable that takes no arguments. This callable will be called
when this connection is closed.
"""
assert self._close_cb is None, (
'A close_callback has already been set for this connection.'
)
self._close_cb = stack_context.wrap(cb)
if self.closed:
self._close_cb() | Specify a function to be called when this connection is closed.
:param cb:
A callable that takes no arguments. This callable will be called
when this connection is closed. | entailment |
def send(self, message):
"""Send the given message up the wire.
Use this for messages which have a response message.
:param message:
Message to send
:returns:
A Future containing the response for the message
"""
assert self._handshake_performed, "Perform a handshake first."
assert message.message_type in self.CALL_REQ_TYPES, (
"Message '%s' can't use send" % repr(message)
)
message.id = message.id or self.writer.next_message_id()
assert message.id not in self._outbound_pending_call, (
"Message ID '%d' already being used" % message.id
)
future = tornado.gen.Future()
self._outbound_pending_call[message.id] = future
self.write(message)
return future | Send the given message up the wire.
Use this for messages which have a response message.
:param message:
Message to send
:returns:
A Future containing the response for the message | entailment |
def write(self, message):
"""Writes the given message up the wire.
Does not expect a response back for the message.
:param message:
Message to write.
"""
message.id = message.id or self.writer.next_message_id()
if message.message_type in self.CALL_REQ_TYPES:
message_factory = self.request_message_factory
else:
message_factory = self.response_message_factory
fragments = message_factory.fragment(message)
return self._write_fragments(fragments) | Writes the given message up the wire.
Does not expect a response back for the message.
:param message:
Message to write. | entailment |
def _write_fragments(self, fragments):
"""
:param fragments:
A generator of messages
"""
answer = tornado.gen.Future()
if not fragments:
answer.set_result(None)
return answer
io_loop = IOLoop.current()
def _write_fragment(future):
if future and future.exception():
return answer.set_exc_info(future.exc_info())
try:
fragment = fragments.next()
except StopIteration:
return answer.set_result(None)
io_loop.add_future(self.writer.put(fragment), _write_fragment)
_write_fragment(None)
return answer | :param fragments:
A generator of messages | entailment |
def initiate_handshake(self, headers, timeout=None):
"""Initiate a handshake with the remote host.
:param headers:
A dictionary of headers to send.
:returns:
A future that resolves (with a value of None) when the handshake
is complete.
"""
io_loop = IOLoop.current()
timeout = timeout or DEFAULT_INIT_TIMEOUT_SECS
self.writer.put(messages.InitRequestMessage(
version=PROTOCOL_VERSION,
headers=headers
))
init_res_future = self.reader.get()
timeout_handle = io_loop.call_later(timeout, (
lambda: init_res_future.set_exception(errors.TimeoutError(
'Handshake with %s:%d timed out. Did not receive an INIT_RES '
'after %s seconds' % (
self.remote_host, self.remote_host_port, str(timeout)
)
))
))
io_loop.add_future(
init_res_future,
(lambda _: io_loop.remove_timeout(timeout_handle)),
)
init_res = yield init_res_future
if init_res.message_type != Types.INIT_RES:
raise errors.UnexpectedError(
"Expected handshake response, got %s" % repr(init_res)
)
self._extract_handshake_headers(init_res)
self._handshake_performed = True
# The receive loop is started only after the handshake has been
# completed.
self._loop()
raise tornado.gen.Return(init_res) | Initiate a handshake with the remote host.
:param headers:
A dictionary of headers to send.
:returns:
A future that resolves (with a value of None) when the handshake
is complete. | entailment |
def expect_handshake(self, headers):
"""Expect a handshake from the remote host.
:param headers:
Headers to respond with
:returns:
A future that resolves (with a value of None) when the handshake
is complete.
"""
init_req = yield self.reader.get()
if init_req.message_type != Types.INIT_REQ:
raise errors.UnexpectedError(
"You need to shake my hand first. Got %s" % repr(init_req)
)
self._extract_handshake_headers(init_req)
self._handshake_performed = True
self.writer.put(
messages.InitResponseMessage(
PROTOCOL_VERSION, headers, init_req.id),
)
# The receive loop is started only after the handshake has been
# completed.
self._loop()
raise tornado.gen.Return(init_req) | Expect a handshake from the remote host.
:param headers:
Headers to respond with
:returns:
A future that resolves (with a value of None) when the handshake
is complete. | entailment |
def outgoing(cls, hostport, process_name=None, serve_hostport=None,
handler=None, tchannel=None):
"""Initiate a new connection to the given host.
:param hostport:
String in the form ``$host:$port`` specifying the target host
:param process_name:
Process name of the entity making the connection.
:param serve_hostport:
String in the form ``$host:$port`` specifying an address at which
the caller can be reached. If omitted, ``0.0.0.0:0`` is used.
:param handler:
If given, any calls received from this connection will be sent to
this RequestHandler.
"""
host, port = hostport.rsplit(":", 1)
process_name = process_name or "%s[%s]" % (sys.argv[0], os.getpid())
serve_hostport = serve_hostport or "0.0.0.0:0"
# TODO: change this to tornado.tcpclient.TCPClient to do async DNS
# lookups.
stream = tornado.iostream.IOStream(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
)
log.debug("Connecting to %s", hostport)
try:
yield stream.connect((host, int(port)))
connection = cls(stream, tchannel, direction=OUTGOING)
log.debug("Performing handshake with %s", hostport)
yield connection.initiate_handshake(headers={
'host_port': serve_hostport,
'process_name': process_name,
'tchannel_language': TCHANNEL_LANGUAGE,
'tchannel_language_version': TCHANNEL_LANGUAGE_VERSION,
'tchannel_version': TCHANNEL_VERSION,
})
except (StreamClosedError, socket.error, errors.TimeoutError) as e:
log.warn("Couldn't connect to %s", hostport)
raise NetworkError(
"Couldn't connect to %s" % hostport, e
)
if handler:
connection.serve(handler)
raise tornado.gen.Return(connection) | Initiate a new connection to the given host.
:param hostport:
String in the form ``$host:$port`` specifying the target host
:param process_name:
Process name of the entity making the connection.
:param serve_hostport:
String in the form ``$host:$port`` specifying an address at which
the caller can be reached. If omitted, ``0.0.0.0:0`` is used.
:param handler:
If given, any calls received from this connection will be sent to
this RequestHandler. | entailment |
def serve(self, handler):
"""Serve calls over this connection using the given RequestHandler.
:param handler:
RequestHandler to process the requests through
:return:
A Future that resolves (to None) once the loop is done running --
which happens once this connection is closed.
"""
assert handler, "handler is required"
while not self.closed:
message = yield self.await()
try:
handler(message, self)
except Exception:
# TODO Send error frame back
log.exception("Failed to process %s", repr(message)) | Serve calls over this connection using the given RequestHandler.
:param handler:
RequestHandler to process the requests through
:return:
A Future that resolves (to None) once the loop is done running --
which happens once this connection is closed. | entailment |
def send_error(self, error):
"""Convenience method for writing Error frames up the wire.
:param error:
TChannel Error. :py:class`tchannel.errors.TChannelError`.
:returns:
A future that resolves when the write finishes.
"""
error_message = build_raw_error_message(error)
write_future = self.writer.put(error_message)
write_future.add_done_callback(
lambda f: IOLoop.current().add_callback(
self.tchannel.event_emitter.fire(
EventType.after_send_error,
error,
)
)
)
return write_future | Convenience method for writing Error frames up the wire.
:param error:
TChannel Error. :py:class`tchannel.errors.TChannelError`.
:returns:
A future that resolves when the write finishes. | entailment |
def _stream(self, context, message_factory):
"""write request/response into frames
Transform request/response into protocol level message objects based on
types and argstreams.
Assumption: the chunk data read from stream can fit into memory.
If arg stream is at init or streaming state, build the message based on
current chunk. If arg stream is at completed state, put current chunk
into args[] array, and continue to read next arg stream in order to
compose a larger message instead of sending multi small messages.
Note: the message built at this stage is not guaranteed the size is
less then 64KB.
Possible messages created sequence:
Take request as an example::
CallRequestMessage(flags=fragment)
--> CallRequestContinueMessage(flags=fragment)
....
--> CallRequestContinueMessage(flags=fragment)
--> CallRequestMessage(flags=none)
:param context: Request or Response object
"""
args = []
try:
for argstream in context.argstreams:
chunk = yield argstream.read()
args.append(chunk)
chunk = yield argstream.read()
while chunk:
message = (message_factory.
build_raw_message(context, args))
yield self.write(message)
args = [chunk]
chunk = yield argstream.read()
# last piece of request/response.
message = (message_factory.
build_raw_message(context, args, is_completed=True))
yield self.write(message)
context.state = StreamState.completed
# Stop streamming immediately if exception occurs on the handler side
except TChannelError:
# raise by tchannel intentionally
log.info("Stopped outgoing streams because of an error",
exc_info=sys.exc_info()) | write request/response into frames
Transform request/response into protocol level message objects based on
types and argstreams.
Assumption: the chunk data read from stream can fit into memory.
If arg stream is at init or streaming state, build the message based on
current chunk. If arg stream is at completed state, put current chunk
into args[] array, and continue to read next arg stream in order to
compose a larger message instead of sending multi small messages.
Note: the message built at this stage is not guaranteed the size is
less then 64KB.
Possible messages created sequence:
Take request as an example::
CallRequestMessage(flags=fragment)
--> CallRequestContinueMessage(flags=fragment)
....
--> CallRequestContinueMessage(flags=fragment)
--> CallRequestMessage(flags=none)
:param context: Request or Response object | entailment |
def stream_request(self, request, out_future):
"""send the given request and response is not required"""
request.close_argstreams()
def on_done(future):
if future.exception() and out_future.running():
out_future.set_exc_info(future.exc_info())
request.close_argstreams(force=True)
stream_future = self._stream(request, self.request_message_factory)
stream_future.add_done_callback(on_done)
return stream_future | send the given request and response is not required | entailment |
def send_request(self, request):
"""Send the given request and response is required.
Use this for messages which have a response message.
:param request:
request to send
:returns:
A Future containing the response for the request
"""
assert self._handshake_performed, "Perform a handshake first."
assert request.id not in self._outbound_pending_call, (
"Message ID '%d' already being used" % request.id
)
future = tornado.gen.Future()
self._outbound_pending_call[request.id] = future
self.add_pending_outbound()
self.stream_request(request, future).add_done_callback(
lambda f: self.remove_pending_outbound()
)
if request.ttl:
self._add_timeout(request, future)
# the actual future that caller will yield
response_future = tornado.gen.Future()
# TODO: fire before_receive_response
IOLoop.current().add_future(
future,
lambda f: self.adapt_result(f, request, response_future),
)
return response_future | Send the given request and response is required.
Use this for messages which have a response message.
:param request:
request to send
:returns:
A Future containing the response for the request | entailment |
def _add_timeout(self, request, future):
"""Adds a timeout for the given request to the given future."""
io_loop = IOLoop.current()
t = io_loop.call_later(
request.ttl,
self._request_timed_out,
request.id,
request.service,
request.ttl,
future,
)
io_loop.add_future(future, lambda f: io_loop.remove_timeout(t)) | Adds a timeout for the given request to the given future. | entailment |
def get(self):
"""Receive the next message off the wire.
:returns:
A Future that resolves to the next message off the wire.
"""
if not self.filling:
self.fill()
answer = tornado.gen.Future()
def _on_result(future):
if future.exception():
return answer.set_exc_info(future.exc_info())
answer.set_result(future.result())
def _on_item(future):
if future.exception():
return answer.set_exc_info(future.exc_info())
future.result().add_done_callback(_on_result)
self.queue.get().add_done_callback(_on_item)
return answer | Receive the next message off the wire.
:returns:
A Future that resolves to the next message off the wire. | entailment |
def put(self, message):
"""Enqueues the given message for writing to the wire.
The message must be small enough to fit in a single frame.
"""
if self.draining is False:
self.drain()
return self._enqueue(message) | Enqueues the given message for writing to the wire.
The message must be small enough to fit in a single frame. | entailment |
def advertise(
self,
routers=None,
name=None,
timeout=None,
router_file=None,
jitter=None,
):
"""Make a service available on the Hyperbahn routing mesh.
This will make contact with a Hyperbahn host from a list of known
Hyperbahn routers. Additional Hyperbahn connections will be established
once contact has been made with the network.
:param router:
A seed list of addresses of Hyperbahn routers, e.g.,
``["127.0.0.1:23000"]``.
:param name:
The identity of this service on the Hyperbahn.
This is usually unnecessary, as it defaults to the name given when
initializing the :py:class:`TChannel` (which is used as your
identity as a caller).
:returns:
A future that resolves to the remote server's response after
the first advertise finishes.
Advertisement will continue to happen periodically.
"""
name = name or self.name
if not self.is_listening():
self.listen()
return hyperbahn.advertise(
self,
name,
routers,
timeout,
router_file,
jitter,
) | Make a service available on the Hyperbahn routing mesh.
This will make contact with a Hyperbahn host from a list of known
Hyperbahn routers. Additional Hyperbahn connections will be established
once contact has been made with the network.
:param router:
A seed list of addresses of Hyperbahn routers, e.g.,
``["127.0.0.1:23000"]``.
:param name:
The identity of this service on the Hyperbahn.
This is usually unnecessary, as it defaults to the name given when
initializing the :py:class:`TChannel` (which is used as your
identity as a caller).
:returns:
A future that resolves to the remote server's response after
the first advertise finishes.
Advertisement will continue to happen periodically. | entailment |
def request(self,
hostport=None,
service=None,
arg_scheme=None,
retry=None,
**kwargs):
"""Initiate a new request through this TChannel.
:param hostport:
Host to which the request will be made. If unspecified, a random
known peer will be picked. This is not necessary if using
Hyperbahn.
:param service:
The name of a service available on Hyperbahn. Defaults to an empty
string.
:param arg_scheme:
Determines the serialization scheme for the request. One of 'raw',
'json', or 'thrift'. Defaults to 'raw'.
:param rety:
One of 'n' (never retry), 'c' (retry on connection errors), 't'
(retry on timeout), 'ct' (retry on connection errors and timeouts).
Defaults to 'c'.
"""
# TODO disallow certain parameters or don't propagate them backwards.
# For example, blacklist and rank threshold aren't really
# user-configurable right now.
return self.peers.request(hostport=hostport,
service=service,
arg_scheme=arg_scheme,
retry=retry,
**kwargs) | Initiate a new request through this TChannel.
:param hostport:
Host to which the request will be made. If unspecified, a random
known peer will be picked. This is not necessary if using
Hyperbahn.
:param service:
The name of a service available on Hyperbahn. Defaults to an empty
string.
:param arg_scheme:
Determines the serialization scheme for the request. One of 'raw',
'json', or 'thrift'. Defaults to 'raw'.
:param rety:
One of 'n' (never retry), 'c' (retry on connection errors), 't'
(retry on timeout), 'ct' (retry on connection errors and timeouts).
Defaults to 'c'. | entailment |
def listen(self, port=None):
"""Start listening for incoming connections.
A request handler must have already been specified with
``TChannel.host``.
:param port:
An explicit port to listen on. This is unnecessary when advertising
on Hyperbahn.
:returns:
Returns immediately.
:raises AlreadyListeningError:
If listen was already called.
"""
if self.is_listening():
raise AlreadyListeningError(
"listen has already been called"
)
if port:
assert not self._port, "Port has already been set."
self._port = int(port)
assert self._handler, "Call .host with a RequestHandler first"
server = TChannelServer(self)
bind_sockets_kwargs = {
'port': self._port,
# ipv6 causes random address already in use (socket.error w errno
# == 98) when getaddrinfo() returns multiple values
# @see https://github.com/uber/tchannel-python/issues/256
'family': socket.AF_INET,
}
if self._reuse_port is True:
# allow multiple processes to share the same port,
# this is really useful in a world where services launch N
# processes per container/os-space, where N is
# the amount of cpus for example
bind_sockets_kwargs['reuse_port'] = True
sockets = bind_sockets(**bind_sockets_kwargs)
assert sockets, "No sockets bound for port %d" % self._port
# If port was 0, the OS probably assigned something better.
self._port = sockets[0].getsockname()[1]
server.add_sockets(sockets)
# assign server so we don't listen twice
self._server = server | Start listening for incoming connections.
A request handler must have already been specified with
``TChannel.host``.
:param port:
An explicit port to listen on. This is unnecessary when advertising
on Hyperbahn.
:returns:
Returns immediately.
:raises AlreadyListeningError:
If listen was already called. | entailment |
def _register_simple(self, endpoint, scheme, f):
"""Register a simple endpoint with this TChannel.
:param endpoint:
Name of the endpoint being registered.
:param scheme:
Name of the arg scheme under which the endpoint will be
registered.
:param f:
Callable handler for the endpoint.
"""
assert scheme in DEFAULT_NAMES, ("Unsupported arg scheme %s" % scheme)
if scheme == JSON:
req_serializer = JsonSerializer()
resp_serializer = JsonSerializer()
else:
req_serializer = RawSerializer()
resp_serializer = RawSerializer()
self._handler.register(endpoint, f, req_serializer, resp_serializer)
return f | Register a simple endpoint with this TChannel.
:param endpoint:
Name of the endpoint being registered.
:param scheme:
Name of the arg scheme under which the endpoint will be
registered.
:param f:
Callable handler for the endpoint. | entailment |
def _register_thrift(self, service_module, handler, **kwargs):
"""Register a Thrift endpoint on this TChannel.
:param service_module:
Reference to the Thrift-generated module for the service being
registered.
:param handler:
Handler for the endpoint
:param method:
Name of the Thrift method being registered. If omitted, ``f``'s
name is assumed to be the method name.
:param service:
Name of the Thrift service. By default this is determined
automatically from the module name.
"""
import tchannel.thrift as thrift
# Imported inside the function so that we don't have a hard dependency
# on the Thrift library. This function is usable only if the Thrift
# library is installed.
thrift.register(self._handler, service_module, handler, **kwargs)
return handler | Register a Thrift endpoint on this TChannel.
:param service_module:
Reference to the Thrift-generated module for the service being
registered.
:param handler:
Handler for the endpoint
:param method:
Name of the Thrift method being registered. If omitted, ``f``'s
name is assumed to be the method name.
:param service:
Name of the Thrift service. By default this is determined
automatically from the module name. | entailment |
def register(self, endpoint, scheme=None, handler=None, **kwargs):
"""Register a handler with this TChannel.
This may be used as a decorator:
.. code-block:: python
app = TChannel(name='bar')
@app.register("hello", "json")
def hello_handler(request, response):
params = yield request.get_body()
Or as a function:
.. code-block:: python
# Here we have a Thrift handler for `Foo::hello`
app.register(Foo, "hello", hello_thrift_handler)
:param endpoint:
Name of the endpoint being registered. This should be a reference
to the Thrift-generated module if this is a Thrift endpoint. It
may also be ``TChannel.FALLBACK`` if it's intended to be a
catch-all endpoint.
:param scheme:
Name of the scheme under which the endpoint is being registered.
One of "raw", "json", and "thrift". Defaults to "raw", except if
"endpoint" was a module, in which case this defaults to "thrift".
:param handler:
If specified, this is the handler function. If ignored, this
function returns a decorator that can be used to register the
handler function.
:returns:
If ``handler`` was specified, this returns ``handler``. Otherwise,
it returns a decorator that can be applied to a function to
register it as the handler.
"""
assert endpoint is not None, "endpoint is required"
if endpoint is TChannel.FALLBACK:
decorator = partial(self._handler.register, TChannel.FALLBACK)
if handler is not None:
return decorator(handler)
else:
return decorator
if not scheme:
# scheme defaults to raw, unless the endpoint is a service module.
if inspect.ismodule(endpoint):
scheme = "thrift"
else:
scheme = "raw"
scheme = scheme.lower()
if scheme == 'thrift':
decorator = partial(self._register_thrift, endpoint, **kwargs)
else:
decorator = partial(
self._register_simple, endpoint, scheme, **kwargs
)
if handler is not None:
return decorator(handler)
else:
return decorator | Register a handler with this TChannel.
This may be used as a decorator:
.. code-block:: python
app = TChannel(name='bar')
@app.register("hello", "json")
def hello_handler(request, response):
params = yield request.get_body()
Or as a function:
.. code-block:: python
# Here we have a Thrift handler for `Foo::hello`
app.register(Foo, "hello", hello_thrift_handler)
:param endpoint:
Name of the endpoint being registered. This should be a reference
to the Thrift-generated module if this is a Thrift endpoint. It
may also be ``TChannel.FALLBACK`` if it's intended to be a
catch-all endpoint.
:param scheme:
Name of the scheme under which the endpoint is being registered.
One of "raw", "json", and "thrift". Defaults to "raw", except if
"endpoint" was a module, in which case this defaults to "thrift".
:param handler:
If specified, this is the handler function. If ignored, this
function returns a decorator that can be used to register the
handler function.
:returns:
If ``handler`` was specified, this returns ``handler``. Otherwise,
it returns a decorator that can be applied to a function to
register it as the handler. | entailment |
def lt(self, i, j):
"""Compare the priority of two peers.
Primary comparator will be the rank of each peer. If the ``rank`` is
same then compare the ``order``. The ``order`` attribute of the peer
tracks the heap push order of the peer. This help solve the imbalance
problem caused by randomization when deal with same rank situation.
:param i: ith peer
:param j: jth peer
:return: True or False
"""
if self.peers[i].rank == self.peers[j].rank:
return self.peers[i].order < self.peers[j].order
return self.peers[i].rank < self.peers[j].rank | Compare the priority of two peers.
Primary comparator will be the rank of each peer. If the ``rank`` is
same then compare the ``order``. The ``order`` attribute of the peer
tracks the heap push order of the peer. This help solve the imbalance
problem caused by randomization when deal with same rank situation.
:param i: ith peer
:param j: jth peer
:return: True or False | entailment |
def push_peer(self, peer):
"""Push a new peer into the heap"""
self.order += 1
peer.order = self.order + random.randint(0, self.size())
heap.push(self, peer) | Push a new peer into the heap | entailment |
def add_and_shuffle(self, peer):
"""Push a new peer into the heap and shuffle the heap"""
self.push_peer(peer)
r = random.randint(0, self.size() - 1)
self.swap_order(peer.index, r) | Push a new peer into the heap and shuffle the heap | entailment |
def remove_peer(self, peer):
"""Remove the peer from the heap.
Return: removed peer if peer exists. If peer's index is out of range,
raise IndexError.
"""
if peer.index < 0 or peer.index >= self.size():
raise IndexError('Peer index is out of range')
assert peer is self.peers[peer.index], "peer is not in the heap"
return heap.remove(self, peer.index) | Remove the peer from the heap.
Return: removed peer if peer exists. If peer's index is out of range,
raise IndexError. | entailment |
def token_request(self, authorization_code):
"""
Makes a token request. If the 'token_endpoint' is not configured in the provider metadata, no request will
be made.
Args:
authorization_code (str): authorization code issued to client after user authorization
Returns:
Union[AccessTokenResponse, TokenErrorResponse, None]: The parsed token response, or None if no token
request was performed.
"""
if not self._client.token_endpoint:
return None
request = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': self._redirect_uri
}
logger.debug('making token request: %s', request)
client_auth_method = self._client.registration_response.get('token_endpoint_auth_method', 'client_secret_basic')
auth_header = _ClientAuthentication(self._client.client_id, self._client.client_secret)(client_auth_method,
request)
resp = self._provider_configuration.requests_session \
.post(self._client.token_endpoint,
data=request,
headers=auth_header) \
.json()
logger.debug('received token response: %s', json.dumps(resp))
if 'error' in resp:
token_resp = TokenErrorResponse(**resp)
else:
token_resp = AccessTokenResponse(**resp)
token_resp.verify(keyjar=self._client.keyjar)
if 'id_token' in resp:
token_resp['id_token_jwt'] = resp['id_token']
return token_resp | Makes a token request. If the 'token_endpoint' is not configured in the provider metadata, no request will
be made.
Args:
authorization_code (str): authorization code issued to client after user authorization
Returns:
Union[AccessTokenResponse, TokenErrorResponse, None]: The parsed token response, or None if no token
request was performed. | entailment |
def userinfo_request(self, access_token):
"""
Args:
access_token (str): Bearer access token to use when fetching userinfo
Returns:
oic.oic.message.OpenIDSchema: UserInfo Response
"""
http_method = self._provider_configuration.userinfo_endpoint_method
if http_method is None or not self._client.userinfo_endpoint:
return None
logger.debug('making userinfo request')
userinfo_response = self._client.do_user_info_request(method=http_method, token=access_token)
logger.debug('received userinfo response: %s', userinfo_response.to_json())
return userinfo_response | Args:
access_token (str): Bearer access token to use when fetching userinfo
Returns:
oic.oic.message.OpenIDSchema: UserInfo Response | entailment |
def update(self, access_token=None, id_token=None, id_token_jwt=None, userinfo=None):
"""
Args:
access_token (str)
id_token (Mapping[str, str])
id_token_jwt (str)
userinfo (Mapping[str, str])
"""
def set_if_defined(session_key, value):
if value:
self._session_storage[session_key] = value
auth_time = int(time.time())
if id_token:
auth_time = id_token.get('auth_time', auth_time)
self._session_storage['last_authenticated'] = auth_time
set_if_defined('access_token', access_token)
set_if_defined('id_token', id_token)
set_if_defined('id_token_jwt', id_token_jwt)
set_if_defined('userinfo', userinfo) | Args:
access_token (str)
id_token (Mapping[str, str])
id_token_jwt (str)
userinfo (Mapping[str, str]) | entailment |
def log_background_messages(self, name=None):
"""Forwards messages logged on background to Robot Framework log.
By default forwards all messages logged by all threads, but can be
limited to a certain thread by passing thread's name as an argument.
Logged messages are removed from the message storage.
"""
with self.lock:
if name:
self._log_messages_by_thread(name)
else:
self._log_all_messages() | Forwards messages logged on background to Robot Framework log.
By default forwards all messages logged by all threads, but can be
limited to a certain thread by passing thread's name as an argument.
Logged messages are removed from the message storage. | entailment |
def decorate(func, caller):
"""
decorate(func, caller) decorates a function using a caller.
"""
evaldict = func.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
fun = FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun | decorate(func, caller) decorates a function using a caller. | entailment |
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.__func__
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_decorate_'] = decorate
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return _decorate_(%s, _call_)' % fun,
evaldict, call=caller, doc=doc, module=caller.__module__,
__wrapped__=caller) | decorator(caller) converts a caller function into a decorator | entailment |
def set_client_handler(self, handler_func, name=None, header_filter=None, interval=0.5):
"""Sets an automatic handler for the type of message template currently loaded.
This feature allows users to set a python handler function which is called
automatically by the Rammbock message queue when message matches the expected
template. The optional name argument defines the client node to which the
handler will be bound. Otherwise the default client will be used.
The header_filter defines which header field will be used to identify the
message defined in template. (Otherwise all incoming messages will match!)
The interval defines the interval in seconds on which the handler will
be called on background. By default the incoming messages are checked
every 0.5 seconds.
The handler function will be called with two arguments: the rammbock library
instance and the received message.
Example:
| Load template | SomeMessage |
| Set client handler | my_module.respond_to_sample |
my_module.py:
| def respond_to_sample(rammbock, msg):
| rammbock.save_template("__backup_template", unlocked=True)
| try:
| rammbock.load_template("sample response")
| rammbock.client_sends_message()
| finally:
| rammbock.load_template("__backup_template")
"""
msg_template = self._get_message_template()
client, client_name = self._clients.get_with_name(name)
client.set_handler(msg_template, handler_func, header_filter=header_filter, interval=interval) | Sets an automatic handler for the type of message template currently loaded.
This feature allows users to set a python handler function which is called
automatically by the Rammbock message queue when message matches the expected
template. The optional name argument defines the client node to which the
handler will be bound. Otherwise the default client will be used.
The header_filter defines which header field will be used to identify the
message defined in template. (Otherwise all incoming messages will match!)
The interval defines the interval in seconds on which the handler will
be called on background. By default the incoming messages are checked
every 0.5 seconds.
The handler function will be called with two arguments: the rammbock library
instance and the received message.
Example:
| Load template | SomeMessage |
| Set client handler | my_module.respond_to_sample |
my_module.py:
| def respond_to_sample(rammbock, msg):
| rammbock.save_template("__backup_template", unlocked=True)
| try:
| rammbock.load_template("sample response")
| rammbock.client_sends_message()
| finally:
| rammbock.load_template("__backup_template") | entailment |
def set_server_handler(self, handler_func, name=None, header_filter=None, alias=None, interval=0.5):
"""Sets an automatic handler for the type of message template currently loaded.
This feature allows users to set a python handler function which is called
automatically by the Rammbock message queue when message matches the expected
template. The optional name argument defines the server node to which the
handler will be bound. Otherwise the default server will be used.
The header_filter defines which header field will be used to identify the
message defined in template. (Otherwise all incoming messages will match!)
The interval defines the interval in seconds on which the handler will
be called on background. By default the incoming messages are checked
every 0.5 seconds.
The alias is the alias for the connection. By default the current active
connection will be used.
The handler function will be called with two arguments: the rammbock library
instance and the received message.
Example:
| Load template | SomeMessage |
| Set server handler | my_module.respond_to_sample | messageType |
my_module.py:
| def respond_to_sample(rammbock, msg):
| rammbock.save_template("__backup_template", unlocked=True)
| try:
| rammbock.load_template("sample response")
| rammbock.server_sends_message()
| finally:
| rammbock.load_template("__backup_template")
"""
msg_template = self._get_message_template()
server, server_name = self._servers.get_with_name(name)
server.set_handler(msg_template, handler_func, header_filter=header_filter, alias=alias, interval=interval) | Sets an automatic handler for the type of message template currently loaded.
This feature allows users to set a python handler function which is called
automatically by the Rammbock message queue when message matches the expected
template. The optional name argument defines the server node to which the
handler will be bound. Otherwise the default server will be used.
The header_filter defines which header field will be used to identify the
message defined in template. (Otherwise all incoming messages will match!)
The interval defines the interval in seconds on which the handler will
be called on background. By default the incoming messages are checked
every 0.5 seconds.
The alias is the alias for the connection. By default the current active
connection will be used.
The handler function will be called with two arguments: the rammbock library
instance and the received message.
Example:
| Load template | SomeMessage |
| Set server handler | my_module.respond_to_sample | messageType |
my_module.py:
| def respond_to_sample(rammbock, msg):
| rammbock.save_template("__backup_template", unlocked=True)
| try:
| rammbock.load_template("sample response")
| rammbock.server_sends_message()
| finally:
| rammbock.load_template("__backup_template") | entailment |
def reset_rammbock(self):
"""Closes all connections, deletes all servers, clients, and protocols.
You should call this method before exiting your test run. This will
close all the connections and the ports will therefore be available for
reuse faster.
"""
for client in self._clients:
client.close()
for server in self._servers:
server.close()
self._init_caches() | Closes all connections, deletes all servers, clients, and protocols.
You should call this method before exiting your test run. This will
close all the connections and the ports will therefore be available for
reuse faster. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.