code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
assert not self.finished, "Already sent a response" if not self.result.thrift_spec: self.finished = True return spec = self.result.thrift_spec[0] if result is not None: assert spec, "Tried to return a result for a void method." setattr(self.result, spec[2], result) self.finished = True
def write_result(self, result)
Send back the result of this call. Only one of this and `write_exc_info` may be called. :param result: Return value of the call
5.088506
5.282973
0.96319
exc_info = exc_info or sys.exc_info() exc = exc_info[1] self.code = StatusCode.error for spec in self.result.thrift_spec[1:]: if spec and isinstance(exc, spec[3][0]): assert not self.finished, "Already sent a response" setattr(self.result, spec[2], exc) self.finished = True return # Re-raise the exception (with the same traceback) if it didn't match. raise exc_info[0], exc_info[1], exc_info[2]
def write_exc_info(self, exc_info=None)
Write exception information to the response. Only one of this and ``write_result`` may be called. :param exc_info: 3-tuple of exception information. If omitted, the last exception will be retrieved using ``sys.exc_info()``.
4.99677
4.815445
1.037655
# TODO - don't use asserts for public API assert format, "format is required" assert service, "service is required" assert arg1, "arg1 is required" # default args if arg2 is None: arg2 = "" if arg3 is None: arg3 = "" if timeout is None: timeout = DEFAULT_TIMEOUT if retry_on is None: retry_on = retry.DEFAULT if retry_limit is None: retry_limit = retry.DEFAULT_RETRY_LIMIT # TODO - allow filters/steps for serialization, tracing, etc... tracing.apply_trace_flag(tracing_span, trace, self._dep_tchannel.trace) # calls tchannel.tornado.peer.PeerClientOperation.__init__ operation = self._dep_tchannel.request( service=service, hostport=hostport, arg_scheme=scheme, retry=retry_on, tracing_span=tracing_span ) # fire operation transport_headers = { transport.SCHEME: scheme, transport.CALLER_NAME: caller_name or self.name, } if shard_key: transport_headers[transport.SHARD_KEY] = shard_key if routing_delegate: transport_headers[transport.ROUTING_DELEGATE] = routing_delegate response = yield operation.send( arg1=arg1, arg2=arg2, arg3=arg3, headers=transport_headers, retry_limit=retry_limit, ttl=timeout, ) # unwrap response body = yield response.get_body() headers = yield response.get_header() t = TransportHeaders.from_dict(response.headers) result = Response( body=body, headers=headers, transport=t, status=response.code, ) raise gen.Return(result)
def call( self, scheme, service, arg1, arg2=None, arg3=None, timeout=None, retry_on=None, retry_limit=None, routing_delegate=None, hostport=None, shard_key=None, tracing_span=None, trace=None, # to trace or not, defaults to self._dep_tchannel.trace caller_name=None, )
Make low-level requests to TChannel services. **Note:** Usually you would interact with a higher-level arg scheme like :py:class:`tchannel.schemes.JsonArgScheme` or :py:class:`tchannel.schemes.ThriftArgScheme`.
3.249347
3.204561
1.013976
if routers is not None and router_file is not None: raise ValueError( 'Only one of routers and router_file can be provided.') if routers is None and router_file is not None: # should just let the exceptions fly try: with open(router_file, 'r') as json_data: routers = json.load(json_data) except (IOError, OSError, ValueError): log.exception('Failed to read seed routers list.') raise @gen.coroutine def _advertise(): result = yield self._dep_tchannel.advertise( routers=routers, name=name, timeout=timeout, ) body = yield result.get_body() headers = yield result.get_header() response = Response(json.loads(body), headers or {}) raise gen.Return(response) def _on_advertise(future): if not future.exception(): return # If the request failed, clear the response so that we can try # again. with self._advertise_lock: # `is` comparison to ensure we're not deleting another Future. if self._advertise_response is future: self._advertise_response = None with self._advertise_lock: if self._advertise_response is not None: return self._advertise_response future = self._advertise_response = _advertise() # We call add_done_callback here rather than when we call _advertise() # because if the future has already resolved by the time we call # add_done_callback, the callback will immediately be executed. The # callback will try to acquire the advertise_lock which we already # hold and end up in a deadlock. future.add_done_callback(_on_advertise) return future
def advertise(self, routers=None, name=None, timeout=None, router_file=None, jitter=None)
Advertise with Hyperbahn. After a successful advertisement, Hyperbahn will establish long-lived connections with your application. These connections are used to load balance inbound and outbound requests to other applications on the Hyperbahn network. Re-advertisement happens periodically after calling this method (every minute). Hyperbahn will eject us from the network if it doesn't get a re-advertise from us after 5 minutes. This function may be called multiple times if it fails. If it succeeds, all consecutive calls are ignored. :param list routers: A seed list of known Hyperbahn addresses to attempt contact with. Entries should be of the form ``"host:port"``. :param string name: The name your application identifies itself as. This is usually unneeded because in the common case it will match the ``name`` you initialized the ``TChannel`` instance with. This is the identifier other services will use to make contact with you. :param timeout: The timeout (in sec) for the initial advertise attempt. Defaults to 30 seconds. :param jitter: Variance allowed in the interval per request. Defaults to 5 seconds. The jitter applies to the initial advertise request as well. :param router_file: The host file that contains the routers information. The file should contain a JSON stringified format of the routers parameter. Either routers or router_file should be provided. If both provided, a ValueError will be raised. :returns: A future that resolves to the remote server's response after the first advertise finishes. :raises TimeoutError: When unable to make our first advertise request to Hyperbahn. Subsequent requests may fail but will be ignored.
3.686967
3.564136
1.034463
# start with a request maker instance maker = ThriftRequestMaker( service=service, thrift_module=thrift_module, hostport=hostport, thrift_class_name=thrift_class_name ) # create methods that mirror thrift client # and each return ThriftRequest methods = _create_methods(thrift_module) # then attach to instane for name, method in methods.iteritems(): method = types.MethodType(method, maker, ThriftRequestMaker) setattr(maker, name, method) return maker
def thrift_request_builder(service, thrift_module, hostport=None, thrift_class_name=None)
Provide TChannel compatibility with Thrift-generated modules. The service this creates is meant to be used with TChannel like so: .. code-block:: python from tchannel import TChannel, thrift_request_builder from some_other_service_thrift import some_other_service tchannel = TChannel('my-service') some_service = thrift_request_builder( service='some-other-service', thrift_module=some_other_service ) resp = tchannel.thrift( some_service.fetchPotatoes() ) .. deprecated:: 0.18.0 Please switch to :py:func:`tchannel.thrift.load`. .. warning:: This API is deprecated and will be removed in a future version. :param string service: Name of Thrift service to call. This is used internally for grouping and stats, but also to route requests over Hyperbahn. :param thrift_module: The top-level module of the Apache Thrift generated code for the service that will be called. :param string hostport: When calling the Thrift service directly, and not over Hyperbahn, this 'host:port' value should be provided. :param string thrift_class_name: When the Apache Thrift generated Iface class name does not match thrift_module, then this should be provided.
4.602243
5.652408
0.814209
result_spec = self.result_type.thrift_spec # raise application exception, if present for exc_spec in result_spec[1:]: exc = getattr(body, exc_spec[2]) if exc is not None: raise exc # success - non-void if len(result_spec) >= 1 and result_spec[0] is not None: # value expected, but got none # TODO - server side should use this same logic if body.success is None: raise ValueExpectedError( 'Expected a value to be returned for %s, ' 'but recieved None - only void procedures can ' 'return None.' % self.endpoint ) return body.success # success - void else: return None
def read_body(self, body)
Handles the response body for this request. If the response body includes a result, returns the result unwrapped from the response union. If the response contains an exception, raises that exception.
6.817275
6.464682
1.054541
if not peer.connections: return self.TIERS[0] if not peer.has_incoming_connections: return self.TIERS[1] + peer.total_outbound_pendings return self.TIERS[2] + peer.total_outbound_pendings
def get_rank(self, peer)
Calculate the peer rank based on connections. If the peer has no incoming connections, it will have largest rank. In our peer selection strategy, the largest number has least priority in the heap. If the peer has incoming connections, we will return number of outbound pending requests and responses. :param peer: instance of `tchannel.tornado.peer.Peer` :return: rank of the peer
5.221223
4.169409
1.252269
cls.args = args cls.kwargs = kwargs cls.prepared = True
def prepare(cls, *args, **kwargs)
Set arguments to be used when instantiating a TChannel instance. Arguments are the same as :py:meth:`tchannel.TChannel.__init__`.
5.483458
6.872408
0.797895
cls.local.tchannel = None cls.args = None cls.kwargs = None cls.prepared = False
def reset(cls, *args, **kwargs)
Undo call to prepare, useful for testing.
12.963972
10.212646
1.269404
if not cls.prepared: raise SingletonNotPreparedError( "prepare must be called before get_instance" ) if hasattr(cls.local, 'tchannel') and cls.local.tchannel is not None: return cls.local.tchannel cls.local.tchannel = cls.tchannel_cls(*cls.args, **cls.kwargs) return cls.local.tchannel
def get_instance(cls)
Get a configured, thread-safe, singleton TChannel instance. :returns tchannel.TChannel:
4.049977
3.53142
1.146841
req = None try: req = connection.request_message_factory.build(message) # message_factory will create Request only when it receives # CallRequestMessage. It will return None, if it receives # CallRequestContinueMessage. if req: self.handle_call(req, connection) except TChannelError as e: log.warn('Received a bad request.', exc_info=True) if req: e.tracing = req.tracing connection.send_error(e)
def handle_pre_call(self, message, connection)
Handle incoming request message including CallRequestMessage and CallRequestContinueMessage This method will build the User friendly request object based on the incoming messages. It passes all the messages into the message_factory to build the init request object. Only when it get a CallRequestMessage and a completed arg_1=argstream[0], the message_factory will return a request object. Then it will trigger the async handle_call method. :param message: CallRequestMessage or CallRequestContinueMessage :param connection: tornado connection
6.815592
5.161594
1.320443
assert handler, "handler must not be None" req_serializer = req_serializer or RawSerializer() resp_serializer = resp_serializer or RawSerializer() self.handlers[rule] = Handler(handler, req_serializer, resp_serializer)
def register( self, rule, handler, req_serializer=None, resp_serializer=None )
Register a new endpoint with the given name. .. code-block:: python @dispatcher.register('is_healthy') def check_health(request, response): # ... :param rule: Name of the endpoint. Incoming Call Requests must have this as ``arg1`` to dispatch to this handler. If ``RequestHandler.FALLBACK`` is specified as a rule, the given handler will be used as the 'fallback' handler when requests don't match any registered rules. :param handler: A function that gets called with ``Request`` and ``Response``. :param req_serializer: Arg scheme serializer of this endpoint. It should be ``RawSerializer``, ``JsonSerializer``, and ``ThriftSerializer``. :param resp_serializer: Arg scheme serializer of this endpoint. It should be ``RawSerializer``, ``JsonSerializer``, and ``ThriftSerializer``.
2.950988
2.924799
1.008954
new_id = _uniq_id() return Tracing( span_id=new_id, parent_id=0, trace_id=new_id, traceflags=0)
def random_tracing()
Create new Tracing() tuple with random IDs.
6.113476
4.315726
1.416558
if message.message_type in CHECKSUM_MSG_TYPES: csum = compute_checksum( message.checksum[0], message.args, previous_csum, ) message.checksum = (message.checksum[0], csum)
def generate_checksum(message, previous_csum=0)
Generate checksum for messages with CALL_REQ, CALL_REQ_CONTINUE, CALL_RES,CALL_RES_CONTINUE types. :param message: outgoing message :param previous_csum: accumulated checksum value
5.029251
5.162436
0.974201
if message.message_type in CHECKSUM_MSG_TYPES: csum = compute_checksum( message.checksum[0], message.args, previous_csum, ) if csum == message.checksum[1]: return True else: return False else: return True
def verify_checksum(message, previous_csum=0)
Verify checksum for incoming message. :param message: incoming message :param previous_csum: accumulated checksum value :return return True if message checksum type is None or checksum is correct
3.741932
3.875784
0.965465
timeout = timeout or FIRST_ADVERTISE_TIME if routers is not None and router_file is not None: raise ValueError( 'Only one of routers and router_file can be provided.') if routers is None and router_file is not None: # should just let the exceptions fly try: with open(router_file, 'r') as json_data: routers = json.load(json_data) except (IOError, OSError, ValueError): log.exception('Failed to read seed routers list.') raise for router in routers: # We use .get here instead of .add because we don't want to fail if a # TChannel already knows about some of the routers. tchannel.peers.get(router) adv = Advertiser(service, tchannel, ttl_secs=timeout, interval_max_jitter_secs=jitter) return adv.start()
def advertise(tchannel, service, routers=None, timeout=None, router_file=None, jitter=None)
Advertise with Hyperbahn. See :py:class:`tchannel.TChannel.advertise`.
4.50452
4.88019
0.923021
if self.running: raise Exception('Advertiser is already running') if self.io_loop is None: self.io_loop = tornado.ioloop.IOLoop.current() self.running = True answer = tornado.gen.Future() self._schedule_ad(0, answer) return answer
def start(self)
Starts the advertise loop. Returns the result of the first ad request.
4.05342
3.309902
1.224634
if not self.running: return if delay is None: delay = self.interval_secs delay += random.uniform(0, self.interval_max_jitter_secs) self._next_ad = self.io_loop.call_later(delay, self._ad, response_future)
def _schedule_ad(self, delay=None, response_future=None)
Schedules an ``ad`` request. :param delay: Time in seconds to wait before making the ``ad`` request. Defaults to self.interval_secs. Regardless of value, a jitter of self.interval_max_jitter_secs is applied to this. :param response_future: If non-None, the result of the advertise request is filled into this future.
4.023077
3.258512
1.234636
ttl_secs = min(ttl_secs + self.ttl_offset_secs, self.max_ttl_secs) self._tombstones[id] = IOLoop.current().call_later( ttl_secs, self.forget, id, )
def add(self, id, ttl_secs)
Adds a new request to the Cemetery that is known to have timed out. The request will be forgotten after ``ttl_secs + ttl_offset_secs`` seconds. :param id: ID of the request :param ttl_secs: TTL of the request (in seconds)
4.421324
4.178036
1.05823
io_loop = IOLoop.current() while self._tombstones: _, req_timeout = self._tombstones.popitem() io_loop.remove_timeout(req_timeout)
def clear(self)
Forget about all requests.
5.472598
4.480107
1.221533
raw_header = yield get_arg(self, 1) if not self.serializer: raise tornado.gen.Return(raw_header) else: header = self.serializer.deserialize_header(raw_header) raise tornado.gen.Return(header)
def get_header(self)
Get the header value from the response. :return: a future contains the deserialized value of header
3.880868
4.123584
0.94114
raw_body = yield get_arg(self, 2) if not self.serializer: raise tornado.gen.Return(raw_body) else: body = self.serializer.deserialize_body(raw_body) raise tornado.gen.Return(body)
def get_body(self)
Get the body value from the response. :return: a future contains the deserialized value of body
4.040161
4.421676
0.913717
if self.argstreams[2].state == StreamState.init: self.argstreams[2] = stream else: raise TChannelError( "Unable to change the body since the streaming has started")
def set_body_s(self, stream)
Set customized body stream. Note: the body stream can only be changed before the stream is consumed. :param stream: InMemStream/PipeStream for body :except TChannelError: Raise TChannelError if the stream is being sent when you try to change the stream.
12.116536
9.171527
1.321103
if self.argstreams[1].state == StreamState.init: self.argstreams[1] = stream else: raise TChannelError( "Unable to change the header since the streaming has started")
def set_header_s(self, stream)
Set customized header stream. Note: the header stream can only be changed before the stream is consumed. :param stream: InMemStream/PipeStream for header :except TChannelError: Raise TChannelError if the stream is being sent when you try to change the stream.
13.692251
8.385957
1.63276
if self.serializer: header = self.serializer.serialize_header(chunk) else: header = chunk if self.flushed: raise TChannelError("write operation invalid after flush call") if (self.argstreams[0].state != StreamState.completed and self.argstreams[0].auto_close): self.argstreams[0].close() return self.argstreams[1].write(header)
def write_header(self, chunk)
Write to header. Note: the header stream is only available to write before write body. :param chunk: content to write to header :except TChannelError: Raise TChannelError if the response's flush() has been called
5.488922
4.74159
1.157612
if self.serializer: body = self.serializer.serialize_body(chunk) else: body = chunk if self.flushed: raise TChannelError("write operation invalid after flush call") if (self.argstreams[0].state != StreamState.completed and self.argstreams[0].auto_close): self.argstreams[0].close() if (self.argstreams[1].state != StreamState.completed and self.argstreams[1].auto_close): self.argstreams[1].close() return self.argstreams[2].write(body)
def write_body(self, chunk)
Write to header. Note: whenever write_body is called, the header stream will be closed. write_header method is unavailable. :param chunk: content to write to body :except TChannelError: Raise TChannelError if the response's flush() has been called
3.304162
3.116827
1.060104
answer = tornado.gen.Future() io_loop = IOLoop.current() def on_error(future): log.info('Failed to read data: %s', future.exception()) return answer.set_exc_info(future.exc_info()) @fail_to(answer) def on_body(size, future): if future.exception(): return on_error(future) body = future.result() f = frame.frame_rw.read(BytesIO(body), size=size) message_type = f.header.message_type message_rw = messages.RW.get(message_type) if not message_rw: exc = errors.FatalProtocolError( 'Unknown message type %s', str(message_type) ) return answer.set_exception(exc) message = message_rw.read(BytesIO(f.payload)) message.id = f.header.message_id answer.set_result(message) @fail_to(answer) def on_read_size(future): if future.exception(): return answer.set_exc_info(future.exc_info()) size_bytes = future.result() size = frame.frame_rw.size_rw.read(BytesIO(size_bytes)) io_loop.add_future( stream.read_bytes(size - FRAME_SIZE_WIDTH), lambda f: on_body(size, f) ) try: # read_bytes may fail if the stream has already been closed read_size_future = stream.read_bytes(FRAME_SIZE_WIDTH) except Exception: answer.set_exc_info(sys.exc_info()) else: read_size_future.add_done_callback(on_read_size) return answer
def read_message(stream)
Reads a message from the given IOStream. :param IOStream stream: IOStream to read from.
2.933449
2.953198
0.993313
assert self._close_cb is None, ( 'A close_callback has already been set for this connection.' ) self._close_cb = stack_context.wrap(cb) if self.closed: self._close_cb()
def set_close_callback(self, cb)
Specify a function to be called when this connection is closed. :param cb: A callable that takes no arguments. This callable will be called when this connection is closed.
4.284104
4.254507
1.006957
assert self._handshake_performed, "Perform a handshake first." assert message.message_type in self.CALL_REQ_TYPES, ( "Message '%s' can't use send" % repr(message) ) message.id = message.id or self.writer.next_message_id() assert message.id not in self._outbound_pending_call, ( "Message ID '%d' already being used" % message.id ) future = tornado.gen.Future() self._outbound_pending_call[message.id] = future self.write(message) return future
def send(self, message)
Send the given message up the wire. Use this for messages which have a response message. :param message: Message to send :returns: A Future containing the response for the message
4.953477
4.845301
1.022326
message.id = message.id or self.writer.next_message_id() if message.message_type in self.CALL_REQ_TYPES: message_factory = self.request_message_factory else: message_factory = self.response_message_factory fragments = message_factory.fragment(message) return self._write_fragments(fragments)
def write(self, message)
Writes the given message up the wire. Does not expect a response back for the message. :param message: Message to write.
4.619542
5.26239
0.877841
answer = tornado.gen.Future() if not fragments: answer.set_result(None) return answer io_loop = IOLoop.current() def _write_fragment(future): if future and future.exception(): return answer.set_exc_info(future.exc_info()) try: fragment = fragments.next() except StopIteration: return answer.set_result(None) io_loop.add_future(self.writer.put(fragment), _write_fragment) _write_fragment(None) return answer
def _write_fragments(self, fragments)
:param fragments: A generator of messages
2.822477
2.687369
1.050275
io_loop = IOLoop.current() timeout = timeout or DEFAULT_INIT_TIMEOUT_SECS self.writer.put(messages.InitRequestMessage( version=PROTOCOL_VERSION, headers=headers )) init_res_future = self.reader.get() timeout_handle = io_loop.call_later(timeout, ( lambda: init_res_future.set_exception(errors.TimeoutError( 'Handshake with %s:%d timed out. Did not receive an INIT_RES ' 'after %s seconds' % ( self.remote_host, self.remote_host_port, str(timeout) ) )) )) io_loop.add_future( init_res_future, (lambda _: io_loop.remove_timeout(timeout_handle)), ) init_res = yield init_res_future if init_res.message_type != Types.INIT_RES: raise errors.UnexpectedError( "Expected handshake response, got %s" % repr(init_res) ) self._extract_handshake_headers(init_res) self._handshake_performed = True # The receive loop is started only after the handshake has been # completed. self._loop() raise tornado.gen.Return(init_res)
def initiate_handshake(self, headers, timeout=None)
Initiate a handshake with the remote host. :param headers: A dictionary of headers to send. :returns: A future that resolves (with a value of None) when the handshake is complete.
3.745702
3.889699
0.96298
init_req = yield self.reader.get() if init_req.message_type != Types.INIT_REQ: raise errors.UnexpectedError( "You need to shake my hand first. Got %s" % repr(init_req) ) self._extract_handshake_headers(init_req) self._handshake_performed = True self.writer.put( messages.InitResponseMessage( PROTOCOL_VERSION, headers, init_req.id), ) # The receive loop is started only after the handshake has been # completed. self._loop() raise tornado.gen.Return(init_req)
def expect_handshake(self, headers)
Expect a handshake from the remote host. :param headers: Headers to respond with :returns: A future that resolves (with a value of None) when the handshake is complete.
7.35553
7.463826
0.98549
host, port = hostport.rsplit(":", 1) process_name = process_name or "%s[%s]" % (sys.argv[0], os.getpid()) serve_hostport = serve_hostport or "0.0.0.0:0" # TODO: change this to tornado.tcpclient.TCPClient to do async DNS # lookups. stream = tornado.iostream.IOStream( socket.socket(socket.AF_INET, socket.SOCK_STREAM) ) log.debug("Connecting to %s", hostport) try: yield stream.connect((host, int(port))) connection = cls(stream, tchannel, direction=OUTGOING) log.debug("Performing handshake with %s", hostport) yield connection.initiate_handshake(headers={ 'host_port': serve_hostport, 'process_name': process_name, 'tchannel_language': TCHANNEL_LANGUAGE, 'tchannel_language_version': TCHANNEL_LANGUAGE_VERSION, 'tchannel_version': TCHANNEL_VERSION, }) except (StreamClosedError, socket.error, errors.TimeoutError) as e: log.warn("Couldn't connect to %s", hostport) raise NetworkError( "Couldn't connect to %s" % hostport, e ) if handler: connection.serve(handler) raise tornado.gen.Return(connection)
def outgoing(cls, hostport, process_name=None, serve_hostport=None, handler=None, tchannel=None)
Initiate a new connection to the given host. :param hostport: String in the form ``$host:$port`` specifying the target host :param process_name: Process name of the entity making the connection. :param serve_hostport: String in the form ``$host:$port`` specifying an address at which the caller can be reached. If omitted, ``0.0.0.0:0`` is used. :param handler: If given, any calls received from this connection will be sent to this RequestHandler.
2.968951
3.00217
0.988935
assert handler, "handler is required" while not self.closed: message = yield self.await() try: handler(message, self) except Exception: # TODO Send error frame back log.exception("Failed to process %s", repr(message))
def serve(self, handler)
Serve calls over this connection using the given RequestHandler. :param handler: RequestHandler to process the requests through :return: A Future that resolves (to None) once the loop is done running -- which happens once this connection is closed.
8.062232
8.161839
0.987796
error_message = build_raw_error_message(error) write_future = self.writer.put(error_message) write_future.add_done_callback( lambda f: IOLoop.current().add_callback( self.tchannel.event_emitter.fire( EventType.after_send_error, error, ) ) ) return write_future
def send_error(self, error)
Convenience method for writing Error frames up the wire. :param error: TChannel Error. :py:class`tchannel.errors.TChannelError`. :returns: A future that resolves when the write finishes.
5.407206
4.116731
1.313471
args = [] try: for argstream in context.argstreams: chunk = yield argstream.read() args.append(chunk) chunk = yield argstream.read() while chunk: message = (message_factory. build_raw_message(context, args)) yield self.write(message) args = [chunk] chunk = yield argstream.read() # last piece of request/response. message = (message_factory. build_raw_message(context, args, is_completed=True)) yield self.write(message) context.state = StreamState.completed # Stop streamming immediately if exception occurs on the handler side except TChannelError: # raise by tchannel intentionally log.info("Stopped outgoing streams because of an error", exc_info=sys.exc_info())
def _stream(self, context, message_factory)
write request/response into frames Transform request/response into protocol level message objects based on types and argstreams. Assumption: the chunk data read from stream can fit into memory. If arg stream is at init or streaming state, build the message based on current chunk. If arg stream is at completed state, put current chunk into args[] array, and continue to read next arg stream in order to compose a larger message instead of sending multi small messages. Note: the message built at this stage is not guaranteed the size is less then 64KB. Possible messages created sequence: Take request as an example:: CallRequestMessage(flags=fragment) --> CallRequestContinueMessage(flags=fragment) .... --> CallRequestContinueMessage(flags=fragment) --> CallRequestMessage(flags=none) :param context: Request or Response object
6.110307
5.279671
1.157327
request.close_argstreams() def on_done(future): if future.exception() and out_future.running(): out_future.set_exc_info(future.exc_info()) request.close_argstreams(force=True) stream_future = self._stream(request, self.request_message_factory) stream_future.add_done_callback(on_done) return stream_future
def stream_request(self, request, out_future)
send the given request and response is not required
4.064369
4.025374
1.009687
assert self._handshake_performed, "Perform a handshake first." assert request.id not in self._outbound_pending_call, ( "Message ID '%d' already being used" % request.id ) future = tornado.gen.Future() self._outbound_pending_call[request.id] = future self.add_pending_outbound() self.stream_request(request, future).add_done_callback( lambda f: self.remove_pending_outbound() ) if request.ttl: self._add_timeout(request, future) # the actual future that caller will yield response_future = tornado.gen.Future() # TODO: fire before_receive_response IOLoop.current().add_future( future, lambda f: self.adapt_result(f, request, response_future), ) return response_future
def send_request(self, request)
Send the given request and response is required. Use this for messages which have a response message. :param request: request to send :returns: A Future containing the response for the request
5.588853
5.662563
0.986983
io_loop = IOLoop.current() t = io_loop.call_later( request.ttl, self._request_timed_out, request.id, request.service, request.ttl, future, ) io_loop.add_future(future, lambda f: io_loop.remove_timeout(t))
def _add_timeout(self, request, future)
Adds a timeout for the given request to the given future.
3.05568
3.036397
1.006351
if not self.filling: self.fill() answer = tornado.gen.Future() def _on_result(future): if future.exception(): return answer.set_exc_info(future.exc_info()) answer.set_result(future.result()) def _on_item(future): if future.exception(): return answer.set_exc_info(future.exc_info()) future.result().add_done_callback(_on_result) self.queue.get().add_done_callback(_on_item) return answer
def get(self)
Receive the next message off the wire. :returns: A Future that resolves to the next message off the wire.
2.582868
2.541084
1.016443
if self.draining is False: self.drain() return self._enqueue(message)
def put(self, message)
Enqueues the given message for writing to the wire. The message must be small enough to fit in a single frame.
11.39971
12.112451
0.941156
name = name or self.name if not self.is_listening(): self.listen() return hyperbahn.advertise( self, name, routers, timeout, router_file, jitter, )
def advertise( self, routers=None, name=None, timeout=None, router_file=None, jitter=None, )
Make a service available on the Hyperbahn routing mesh. This will make contact with a Hyperbahn host from a list of known Hyperbahn routers. Additional Hyperbahn connections will be established once contact has been made with the network. :param router: A seed list of addresses of Hyperbahn routers, e.g., ``["127.0.0.1:23000"]``. :param name: The identity of this service on the Hyperbahn. This is usually unnecessary, as it defaults to the name given when initializing the :py:class:`TChannel` (which is used as your identity as a caller). :returns: A future that resolves to the remote server's response after the first advertise finishes. Advertisement will continue to happen periodically.
3.946902
4.244056
0.929983
# TODO disallow certain parameters or don't propagate them backwards. # For example, blacklist and rank threshold aren't really # user-configurable right now. return self.peers.request(hostport=hostport, service=service, arg_scheme=arg_scheme, retry=retry, **kwargs)
def request(self, hostport=None, service=None, arg_scheme=None, retry=None, **kwargs)
Initiate a new request through this TChannel. :param hostport: Host to which the request will be made. If unspecified, a random known peer will be picked. This is not necessary if using Hyperbahn. :param service: The name of a service available on Hyperbahn. Defaults to an empty string. :param arg_scheme: Determines the serialization scheme for the request. One of 'raw', 'json', or 'thrift'. Defaults to 'raw'. :param rety: One of 'n' (never retry), 'c' (retry on connection errors), 't' (retry on timeout), 'ct' (retry on connection errors and timeouts). Defaults to 'c'.
9.151646
10.759858
0.850536
if self.is_listening(): raise AlreadyListeningError( "listen has already been called" ) if port: assert not self._port, "Port has already been set." self._port = int(port) assert self._handler, "Call .host with a RequestHandler first" server = TChannelServer(self) bind_sockets_kwargs = { 'port': self._port, # ipv6 causes random address already in use (socket.error w errno # == 98) when getaddrinfo() returns multiple values # @see https://github.com/uber/tchannel-python/issues/256 'family': socket.AF_INET, } if self._reuse_port is True: # allow multiple processes to share the same port, # this is really useful in a world where services launch N # processes per container/os-space, where N is # the amount of cpus for example bind_sockets_kwargs['reuse_port'] = True sockets = bind_sockets(**bind_sockets_kwargs) assert sockets, "No sockets bound for port %d" % self._port # If port was 0, the OS probably assigned something better. self._port = sockets[0].getsockname()[1] server.add_sockets(sockets) # assign server so we don't listen twice self._server = server
def listen(self, port=None)
Start listening for incoming connections. A request handler must have already been specified with ``TChannel.host``. :param port: An explicit port to listen on. This is unnecessary when advertising on Hyperbahn. :returns: Returns immediately. :raises AlreadyListeningError: If listen was already called.
6.613805
5.718504
1.156562
assert scheme in DEFAULT_NAMES, ("Unsupported arg scheme %s" % scheme) if scheme == JSON: req_serializer = JsonSerializer() resp_serializer = JsonSerializer() else: req_serializer = RawSerializer() resp_serializer = RawSerializer() self._handler.register(endpoint, f, req_serializer, resp_serializer) return f
def _register_simple(self, endpoint, scheme, f)
Register a simple endpoint with this TChannel. :param endpoint: Name of the endpoint being registered. :param scheme: Name of the arg scheme under which the endpoint will be registered. :param f: Callable handler for the endpoint.
4.732293
4.451308
1.063124
import tchannel.thrift as thrift # Imported inside the function so that we don't have a hard dependency # on the Thrift library. This function is usable only if the Thrift # library is installed. thrift.register(self._handler, service_module, handler, **kwargs) return handler
def _register_thrift(self, service_module, handler, **kwargs)
Register a Thrift endpoint on this TChannel. :param service_module: Reference to the Thrift-generated module for the service being registered. :param handler: Handler for the endpoint :param method: Name of the Thrift method being registered. If omitted, ``f``'s name is assumed to be the method name. :param service: Name of the Thrift service. By default this is determined automatically from the module name.
7.891277
8.751709
0.901684
assert endpoint is not None, "endpoint is required" if endpoint is TChannel.FALLBACK: decorator = partial(self._handler.register, TChannel.FALLBACK) if handler is not None: return decorator(handler) else: return decorator if not scheme: # scheme defaults to raw, unless the endpoint is a service module. if inspect.ismodule(endpoint): scheme = "thrift" else: scheme = "raw" scheme = scheme.lower() if scheme == 'thrift': decorator = partial(self._register_thrift, endpoint, **kwargs) else: decorator = partial( self._register_simple, endpoint, scheme, **kwargs ) if handler is not None: return decorator(handler) else: return decorator
def register(self, endpoint, scheme=None, handler=None, **kwargs)
Register a handler with this TChannel. This may be used as a decorator: .. code-block:: python app = TChannel(name='bar') @app.register("hello", "json") def hello_handler(request, response): params = yield request.get_body() Or as a function: .. code-block:: python # Here we have a Thrift handler for `Foo::hello` app.register(Foo, "hello", hello_thrift_handler) :param endpoint: Name of the endpoint being registered. This should be a reference to the Thrift-generated module if this is a Thrift endpoint. It may also be ``TChannel.FALLBACK`` if it's intended to be a catch-all endpoint. :param scheme: Name of the scheme under which the endpoint is being registered. One of "raw", "json", and "thrift". Defaults to "raw", except if "endpoint" was a module, in which case this defaults to "thrift". :param handler: If specified, this is the handler function. If ignored, this function returns a decorator that can be used to register the handler function. :returns: If ``handler`` was specified, this returns ``handler``. Otherwise, it returns a decorator that can be applied to a function to register it as the handler.
3.592973
2.970205
1.209672
if self.peers[i].rank == self.peers[j].rank: return self.peers[i].order < self.peers[j].order return self.peers[i].rank < self.peers[j].rank
def lt(self, i, j)
Compare the priority of two peers. Primary comparator will be the rank of each peer. If the ``rank`` is same then compare the ``order``. The ``order`` attribute of the peer tracks the heap push order of the peer. This help solve the imbalance problem caused by randomization when deal with same rank situation. :param i: ith peer :param j: jth peer :return: True or False
2.633682
2.003216
1.314727
self.order += 1 peer.order = self.order + random.randint(0, self.size()) heap.push(self, peer)
def push_peer(self, peer)
Push a new peer into the heap
9.057784
6.190287
1.463225
self.push_peer(peer) r = random.randint(0, self.size() - 1) self.swap_order(peer.index, r)
def add_and_shuffle(self, peer)
Push a new peer into the heap and shuffle the heap
5.580418
4.433446
1.258709
if peer.index < 0 or peer.index >= self.size(): raise IndexError('Peer index is out of range') assert peer is self.peers[peer.index], "peer is not in the heap" return heap.remove(self, peer.index)
def remove_peer(self, peer)
Remove the peer from the heap. Return: removed peer if peer exists. If peer's index is out of range, raise IndexError.
5.266049
3.782092
1.392364
if not self._client.token_endpoint: return None request = { 'grant_type': 'authorization_code', 'code': authorization_code, 'redirect_uri': self._redirect_uri } logger.debug('making token request: %s', request) client_auth_method = self._client.registration_response.get('token_endpoint_auth_method', 'client_secret_basic') auth_header = _ClientAuthentication(self._client.client_id, self._client.client_secret)(client_auth_method, request) resp = self._provider_configuration.requests_session \ .post(self._client.token_endpoint, data=request, headers=auth_header) \ .json() logger.debug('received token response: %s', json.dumps(resp)) if 'error' in resp: token_resp = TokenErrorResponse(**resp) else: token_resp = AccessTokenResponse(**resp) token_resp.verify(keyjar=self._client.keyjar) if 'id_token' in resp: token_resp['id_token_jwt'] = resp['id_token'] return token_resp
def token_request(self, authorization_code)
Makes a token request. If the 'token_endpoint' is not configured in the provider metadata, no request will be made. Args: authorization_code (str): authorization code issued to client after user authorization Returns: Union[AccessTokenResponse, TokenErrorResponse, None]: The parsed token response, or None if no token request was performed.
2.651662
2.497988
1.061519
http_method = self._provider_configuration.userinfo_endpoint_method if http_method is None or not self._client.userinfo_endpoint: return None logger.debug('making userinfo request') userinfo_response = self._client.do_user_info_request(method=http_method, token=access_token) logger.debug('received userinfo response: %s', userinfo_response.to_json()) return userinfo_response
def userinfo_request(self, access_token)
Args: access_token (str): Bearer access token to use when fetching userinfo Returns: oic.oic.message.OpenIDSchema: UserInfo Response
3.640045
3.945925
0.922482
def set_if_defined(session_key, value): if value: self._session_storage[session_key] = value auth_time = int(time.time()) if id_token: auth_time = id_token.get('auth_time', auth_time) self._session_storage['last_authenticated'] = auth_time set_if_defined('access_token', access_token) set_if_defined('id_token', id_token) set_if_defined('id_token_jwt', id_token_jwt) set_if_defined('userinfo', userinfo)
def update(self, access_token=None, id_token=None, id_token_jwt=None, userinfo=None)
Args: access_token (str) id_token (Mapping[str, str]) id_token_jwt (str) userinfo (Mapping[str, str])
2.278924
2.209166
1.031576
with self.lock: if name: self._log_messages_by_thread(name) else: self._log_all_messages()
def log_background_messages(self, name=None)
Forwards messages logged on background to Robot Framework log. By default forwards all messages logged by all threads, but can be limited to a certain thread by passing thread's name as an argument. Logged messages are removed from the message storage.
4.41729
3.799257
1.162672
evaldict = func.__globals__.copy() evaldict['_call_'] = caller evaldict['_func_'] = func fun = FunctionMaker.create( func, "return _call_(_func_, %(shortsignature)s)", evaldict, __wrapped__=func) if hasattr(func, '__qualname__'): fun.__qualname__ = func.__qualname__ return fun
def decorate(func, caller)
decorate(func, caller) decorates a function using a caller.
5.70798
5.711304
0.999418
if _func is not None: # return a decorated function # this is obsolete behavior; you should use decorate instead return decorate(_func, caller) # else return a decorator function if inspect.isclass(caller): name = caller.__name__.lower() callerfunc = get_init(caller) doc = 'decorator(%s) converts functions/generators into ' \ 'factories of %s objects' % (caller.__name__, caller.__name__) fun = getfullargspec(callerfunc).args[1] # second arg elif inspect.isfunction(caller): if caller.__name__ == '<lambda>': name = '_lambda_' else: name = caller.__name__ callerfunc = caller doc = caller.__doc__ fun = getfullargspec(callerfunc).args[0] # first arg else: # assume caller is an object with a __call__ method name = caller.__class__.__name__.lower() callerfunc = caller.__call__.__func__ doc = caller.__call__.__doc__ fun = getfullargspec(callerfunc).args[1] # second arg evaldict = callerfunc.__globals__.copy() evaldict['_call_'] = caller evaldict['_decorate_'] = decorate return FunctionMaker.create( '%s(%s)' % (name, fun), 'return _decorate_(%s, _call_)' % fun, evaldict, call=caller, doc=doc, module=caller.__module__, __wrapped__=caller)
def decorator(caller, _func=None)
decorator(caller) converts a caller function into a decorator
3.673752
3.638825
1.009598
msg_template = self._get_message_template() client, client_name = self._clients.get_with_name(name) client.set_handler(msg_template, handler_func, header_filter=header_filter, interval=interval)
def set_client_handler(self, handler_func, name=None, header_filter=None, interval=0.5)
Sets an automatic handler for the type of message template currently loaded. This feature allows users to set a python handler function which is called automatically by the Rammbock message queue when message matches the expected template. The optional name argument defines the client node to which the handler will be bound. Otherwise the default client will be used. The header_filter defines which header field will be used to identify the message defined in template. (Otherwise all incoming messages will match!) The interval defines the interval in seconds on which the handler will be called on background. By default the incoming messages are checked every 0.5 seconds. The handler function will be called with two arguments: the rammbock library instance and the received message. Example: | Load template | SomeMessage | | Set client handler | my_module.respond_to_sample | my_module.py: | def respond_to_sample(rammbock, msg): | rammbock.save_template("__backup_template", unlocked=True) | try: | rammbock.load_template("sample response") | rammbock.client_sends_message() | finally: | rammbock.load_template("__backup_template")
3.731347
5.308584
0.702889
msg_template = self._get_message_template() server, server_name = self._servers.get_with_name(name) server.set_handler(msg_template, handler_func, header_filter=header_filter, alias=alias, interval=interval)
def set_server_handler(self, handler_func, name=None, header_filter=None, alias=None, interval=0.5)
Sets an automatic handler for the type of message template currently loaded. This feature allows users to set a python handler function which is called automatically by the Rammbock message queue when message matches the expected template. The optional name argument defines the server node to which the handler will be bound. Otherwise the default server will be used. The header_filter defines which header field will be used to identify the message defined in template. (Otherwise all incoming messages will match!) The interval defines the interval in seconds on which the handler will be called on background. By default the incoming messages are checked every 0.5 seconds. The alias is the alias for the connection. By default the current active connection will be used. The handler function will be called with two arguments: the rammbock library instance and the received message. Example: | Load template | SomeMessage | | Set server handler | my_module.respond_to_sample | messageType | my_module.py: | def respond_to_sample(rammbock, msg): | rammbock.save_template("__backup_template", unlocked=True) | try: | rammbock.load_template("sample response") | rammbock.server_sends_message() | finally: | rammbock.load_template("__backup_template")
3.527119
5.064552
0.696433
for client in self._clients: client.close() for server in self._servers: server.close() self._init_caches()
def reset_rammbock(self)
Closes all connections, deletes all servers, clients, and protocols. You should call this method before exiting your test run. This will close all the connections and the ports will therefore be available for reuse faster.
5.41037
4.041295
1.338771
for client in self._clients: client.empty() for server in self._servers: server.empty()
def clear_message_streams(self)
Resets streams and sockets of incoming messages. You can use this method to reuse the same connections for several consecutive test cases.
5.564054
5.356185
1.038809
if self._protocol_in_progress: raise Exception('Can not start a new protocol definition in middle of old.') if protocol_name in self._protocols: raise Exception('Protocol %s already defined' % protocol_name) self._init_new_message_stack(Protocol(protocol_name, library=self)) self._protocol_in_progress = True
def new_protocol(self, protocol_name)
Start defining a new protocol template. All messages sent and received from a connection that uses a protocol have to conform to this protocol template.
5.066045
4.996376
1.013944
protocol = self._get_message_template() self._protocols[protocol.name] = protocol self._protocol_in_progress = False
def end_protocol(self)
End protocol definition.
9.300893
7.790621
1.193858
self._start_server(UDPServer, ip, port, name, timeout, protocol, family)
def start_udp_server(self, ip, port, name=None, timeout=None, protocol=None, family='ipv4')
Starts a new UDP server to given `ip` and `port`. Server can be given a `name`, default `timeout` and a `protocol`. `family` can be either ipv4 (default) or ipv6. Examples: | Start UDP server | 10.10.10.2 | 53 | | Start UDP server | 10.10.10.2 | 53 | Server1 | | Start UDP server | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 | | Start UDP server | 10.10.10.2 | 53 | timeout=5 | | Start UDP server | 0:0:0:0:0:0:0:1 | 53 | family=ipv6 |
3.618571
7.042077
0.51385
self._start_server(TCPServer, ip, port, name, timeout, protocol, family)
def start_tcp_server(self, ip, port, name=None, timeout=None, protocol=None, family='ipv4')
Starts a new TCP server to given `ip` and `port`. Server can be given a `name`, default `timeout` and a `protocol`. `family` can be either ipv4 (default) or ipv6. Notice that you have to use `Accept Connection` keyword for server to receive connections. Examples: | Start TCP server | 10.10.10.2 | 53 | | Start TCP server | 10.10.10.2 | 53 | Server1 | | Start TCP server | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 | | Start TCP server | 10.10.10.2 | 53 | timeout=5 | | Start TCP server | 0:0:0:0:0:0:0:1 | 53 | family=ipv6 |
3.714906
7.088389
0.524083
self._start_server(SCTPServer, ip, port, name, timeout, protocol, family)
def start_sctp_server(self, ip, port, name=None, timeout=None, protocol=None, family='ipv4')
Starts a new STCP server to given `ip` and `port`. `family` can be either ipv4 (default) or ipv6. pysctp (https://github.com/philpraxis/pysctp) need to be installed your system. Server can be given a `name`, default `timeout` and a `protocol`. Notice that you have to use `Accept Connection` keyword for server to receive connections. Examples: | Start STCP server | 10.10.10.2 | 53 | | Start STCP server | 10.10.10.2 | 53 | Server1 | | Start STCP server | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 | | Start STCP server | 10.10.10.2 | 53 | timeout=5 |
3.094425
6.963481
0.444379
self._start_client(UDPClient, ip, port, name, timeout, protocol, family)
def start_udp_client(self, ip=None, port=None, name=None, timeout=None, protocol=None, family='ipv4')
Starts a new UDP client. Client can be optionally given `ip` and `port` to bind to, as well as `name`, default `timeout` and a `protocol`. `family` can be either ipv4 (default) or ipv6. You should use `Connect` keyword to connect client to a host. Examples: | Start UDP client | | Start UDP client | name=Client1 | protocol=GTPV2 | | Start UDP client | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 | | Start UDP client | timeout=5 | | Start UDP client | 0:0:0:0:0:0:0:1 | 53 | family=ipv6 |
3.685821
8.39101
0.439258
self._start_client(TCPClient, ip, port, name, timeout, protocol, family)
def start_tcp_client(self, ip=None, port=None, name=None, timeout=None, protocol=None, family='ipv4')
Starts a new TCP client. Client can be optionally given `ip` and `port` to bind to, as well as `name`, default `timeout` and a `protocol`. `family` can be either ipv4 (default) or ipv6. You should use `Connect` keyword to connect client to a host. Examples: | Start TCP client | | Start TCP client | name=Client1 | protocol=GTPV2 | | Start TCP client | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 | | Start TCP client | timeout=5 | | Start TCP client | 0:0:0:0:0:0:0:1 | 53 | family=ipv6 |
3.80562
8.52181
0.446574
self._start_client(SCTPClient, ip, port, name, timeout, protocol, family)
def start_sctp_client(self, ip=None, port=None, name=None, timeout=None, protocol=None, family='ipv4')
Starts a new SCTP client. Client can be optionally given `ip` and `port` to bind to, as well as `name`, default `timeout` and a `protocol`. `family` can be either ipv4 (default) or ipv6. You should use `Connect` keyword to connect client to a host. Examples: | Start TCP client | | Start TCP client | name=Client1 | protocol=GTPV2 | | Start TCP client | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 | | Start TCP client | timeout=5 |
3.287835
7.668109
0.428767
server = self._servers.get(name) server.accept_connection(alias, timeout)
def accept_connection(self, name=None, alias=None, timeout=0)
Accepts a connection to server identified by `name` or the latest server if `name` is empty. If given an `alias`, the connection is named and can be later referenced with that name. If `timeout` is > 0, the connection times out after the time specified. `timeout` defaults to 0 which will wait indefinitely. Empty value or None will use socket default timeout. Examples: | Accept connection | | Accept connection | Server1 | my_connection | | Accept connection | Server1 | my_connection | timeout=5 |
6.57569
10.482025
0.62733
client = self._clients.get(name) client.connect_to(host, port)
def connect(self, host, port, name=None)
Connects a client to given `host` and `port`. If client `name` is not given then connects the latest client. Examples: | Connect | 127.0.0.1 | 8080 | | Connect | 127.0.0.1 | 8080 | Client1 |
6.527193
10.726867
0.60849
client, name = self._clients.get_with_name(name) client.send(message) self._register_send(client, label, name)
def client_sends_binary(self, message, name=None, label=None)
Send raw binary `message`. If client `name` is not given, uses the latest client. Optional message `label` is shown on logs. Examples: | Client sends binary | Hello! | | Client sends binary | ${some binary} | Client1 | label=DebugMessage |
6.771822
11.977646
0.565372
server, name = self._servers.get_with_name(name) server.send(message, alias=connection) self._register_send(server, label, name, connection=connection)
def server_sends_binary(self, message, name=None, connection=None, label=None)
Send raw binary `message`. If server `name` is not given, uses the latest server. Optional message `label` is shown on logs. Examples: | Server sends binary | Hello! | | Server sends binary | ${some binary} | Server1 | label=DebugMessage | | Server sends binary | ${some binary} | connection=my_connection |
7.858949
13.265631
0.592429
client, name = self._clients.get_with_name(name) msg = client.receive(timeout=timeout) self._register_receive(client, label, name) return msg
def client_receives_binary(self, name=None, timeout=None, label=None)
Receive raw binary message. If client `name` is not given, uses the latest client. Optional message `label` is shown on logs. Examples: | ${binary} = | Client receives binary | | ${binary} = | Client receives binary | Client1 | timeout=5 |
5.841832
12.044212
0.485032
return self.server_receives_binary_from(name, timeout, connection=connection, label=label)[0]
def server_receives_binary(self, name=None, timeout=None, connection=None, label=None)
Receive raw binary message. If server `name` is not given, uses the latest server. Optional message `label` is shown on logs. Examples: | ${binary} = | Server receives binary | | ${binary} = | Server receives binary | Server1 | connection=my_connection | timeout=5 |
4.348476
7.623243
0.570423
server, name = self._servers.get_with_name(name) msg, ip, port = server.receive_from(timeout=timeout, alias=connection) self._register_receive(server, label, name, connection=connection) return msg, ip, port
def server_receives_binary_from(self, name=None, timeout=None, connection=None, label=None)
Receive raw binary message. Returns message, ip, and port. If server `name` is not given, uses the latest server. Optional message `label` is shown on logs. Examples: | ${binary} | ${ip} | ${port} = | Server receives binary from | | ${binary} | ${ip} | ${port} = | Server receives binary from | Server1 | connection=my_connection | timeout=5 |
6.524151
9.436106
0.691403
proto = self._get_protocol(protocol) if not proto: raise Exception("Protocol not defined! Please define a protocol before creating a message!") if self._protocol_in_progress: raise Exception("Protocol definition in progress. Please finish it before starting to define a message.") configs, fields, header_fields = self._parse_parameters(parameters) self._raise_error_if_configs_or_fields(configs, fields, 'New message') self._init_new_message_stack(MessageTemplate(message_name, proto, header_fields))
def new_message(self, message_name, protocol=None, *parameters)
Define a new message template with `message_name`. `protocol` has to be defined earlier with `Start Protocol Description`. Optional parameters are default values for message header separated with colon. Examples: | New message | MyMessage | MyProtocol | header_field:value |
6.239716
6.189053
1.008186
if isinstance(unlocked, basestring): unlocked = unlocked.lower() != 'false' template = self._get_message_template() if not unlocked: template.set_as_saved() self._message_templates[name] = (template, self._field_values)
def save_template(self, name, unlocked=False)
Save a message template for later use with `Load template`. If saved template is marked as unlocked, then changes can be made to it afterwards. By default tempaltes are locked. Examples: | Save Template | MyMessage | | Save Template | MyOtherMessage | unlocked=True |
5.396916
5.5474
0.972873
template, fields, header_fields = self._set_templates_fields_and_header_fields(name, parameters) self._init_new_message_stack(template, fields, header_fields)
def load_template(self, name, *parameters)
Load a message template saved with `Save template`. Optional parameters are default values for message header separated with colon. Examples: | Load Template | MyMessage | header_field:value |
7.81892
9.382373
0.833363
template, fields, header_fields = self._set_templates_fields_and_header_fields(name, parameters) copy_of_template = copy.deepcopy(template) copy_of_fields = copy.deepcopy(fields) self._init_new_message_stack(copy_of_template, copy_of_fields, header_fields)
def load_copy_of_template(self, name, *parameters)
Load a copy of message template saved with `Save template` when originally saved values need to be preserved from test to test. Optional parameters are default values for message header separated with colon. Examples: | Load Copy Of Template | MyMessage | header_field:value |
3.87027
4.588201
0.843527
_, message_fields, header_fields = self._get_parameters_with_defaults(parameters) return self._encode_message(message_fields, header_fields)
def get_message(self, *parameters)
Get encoded message. * Send Message -keywords are convenience methods, that will call this to get the message object and then send it. Optional parameters are message field values separated with colon. Examples: | ${msg} = | Get message | | ${msg} = | Get message | field_name:value |
6.644455
7.359551
0.902834
with self._receive(self._clients, *parameters) as (msg, message_fields, header_fields): self._validate_message(msg, message_fields, header_fields) return msg
def client_receives_message(self, *parameters)
Receive a message with template defined using `New Message` and validate field values. Message template has to be defined with `New Message` before calling this. Optional parameters: - `name` the client name (default is the latest used) example: `name=Client 1` - `timeout` for receiving message. example: `timeout=0.1` - `latest` if set to True, get latest message from buffer instead first. Default is False. Example: `latest=True` - message field values for validation separated with colon. example: `some_field:0xaf05` Examples: | ${msg} = | Client receives message | | ${msg} = | Client receives message | name=Client1 | timeout=5 | | ${msg} = | Client receives message | message_field:(0|1) |
6.480008
8.760937
0.739648
with self._receive(self._clients, *parameters) as (msg, _, _): return msg
def client_receives_without_validation(self, *parameters)
Receive a message with template defined using `New Message`. Message template has to be defined with `New Message` before calling this. Optional parameters: - `name` the client name (default is the latest used) example: `name=Client 1` - `timeout` for receiving message. example: `timeout=0.1` - `latest` if set to True, get latest message from buffer instead first. Default is False. Example: `latest=True` Examples: | ${msg} = | Client receives without validation | | ${msg} = | Client receives without validation | name=Client1 | timeout=5 |
11.024602
26.766109
0.411887
with self._receive(self._servers, *parameters) as (msg, message_fields, header_fields): self._validate_message(msg, message_fields, header_fields) return msg
def server_receives_message(self, *parameters)
Receive a message with template defined using `New Message` and validate field values. Message template has to be defined with `New Message` before calling this. Optional parameters: - `name` the client name (default is the latest used) example: `name=Client 1` - `connection` alias. example: `connection=connection 1` - `timeout` for receiving message. example: `timeout=0.1` - `latest` if set to True, get latest message from buffer instead first. Default is False. Example: `latest=True` - message field values for validation separated with colon. example: `some_field:0xaf05` Optional parameters are server `name`, `connection` alias and possible `timeout` separated with equals and message field values for validation separated with colon. Examples: | ${msg} = | Server receives message | | ${msg} = | Server receives message | name=Server1 | alias=my_connection | timeout=5 | | ${msg} = | Server receives message | message_field:(0|1) |
7.066804
8.999406
0.785252
with self._receive(self._servers, *parameters) as (msg, _, _): return msg
def server_receives_without_validation(self, *parameters)
Receive a message with template defined using `New Message`. Message template has to be defined with `New Message` before calling this. Optional parameters: - `name` the client name (default is the latest used) example: `name=Client 1` - `connection` alias. example: `connection=connection 1` - `timeout` for receiving message. example: `timeout=0.1` - `latest` if set to True, get latest message from buffer instead first. Default is False. Example: `latest=True` Examples: | ${msg} = | Server receives without validation | | ${msg} = | Server receives without validation | name=Server1 | alias=my_connection | timeout=5 |
12.173165
28.728767
0.423727
_, message_fields, header_fields = self._get_parameters_with_defaults(parameters) self._validate_message(msg, message_fields, header_fields)
def validate_message(self, msg, *parameters)
Validates given message using template defined with `New Message` and field values given as optional arguments. Examples: | Validate message | ${msg} | | Validate message | ${msg} | status:0 |
6.375202
6.498536
0.981021
self._add_field(UInt(length, name, value, align=align))
def uint(self, length, name, value=None, align=None)
Add an unsigned integer to template. `length` is given in bytes and `value` is optional. `align` can be used to align the field to longer byte length. Examples: | uint | 2 | foo | | uint | 2 | foo | 42 | | uint | 2 | fourByteFoo | 42 | align=4 |
5.517631
8.791585
0.627604
self._add_field(Int(length, name, value, align=align))
def int(self, length, name, value=None, align=None)
Add an signed integer to template. `length` is given in bytes and `value` is optional. `align` can be used to align the field to longer byte length. Signed integer uses twos-complement with bits numbered in big-endian. Examples: | int | 2 | foo | | int | 2 | foo | 42 | | int | 2 | fourByteFoo | 42 | align=4 |
5.562984
9.992979
0.556689
self._add_field(Char(length, name, value, terminator))
def chars(self, length, name, value=None, terminator=None)
Add a char array to template. `length` is given in bytes and can refer to earlier numeric fields in template. Special value '*' in length means that length is encoded to length of value and decoded as all available bytes. `value` is optional. `value` could be either a "String" or a "Regular Expression" and if it is a Regular Expression it must be prefixed by 'REGEXP:'. Examples: | chars | 16 | field | Hello World! | | u8 | charLength | | chars | charLength | field | | chars | * | field | Hello World! | | chars | * | field | REGEXP:^{[a-zA-Z ]+}$ |
7.289282
9.952795
0.732385
configs, parameters, _ = self._get_parameters_with_defaults(parameters) self._add_struct_name_to_params(name, parameters) self._message_stack.append(StructTemplate(type, name, self._current_container, parameters, length=configs.get('length'), align=configs.get('align')))
def new_struct(self, type, name, *parameters)
Defines a new struct to template. You must call `End Struct` to end struct definition. `type` is the name for generic type and `name` is the field name in containing structure. Possible parameters are values for struct fields separated with colon and optional struct length defined with `length=`. Length can be used in receiveing to validate that struct matches predfeined length. When sending, the struct length can refer to other message field which will then be set dynamically. Examples: | New struct | Pair | myPair | | u8 | first | | u8 | second | | End Struct |
6.995584
8.22913
0.8501
self._message_stack.append(ListTemplate(size, name, self._current_container))
def _new_list(self, size, name)
Defines a new list to template of `size` and with `name`. List type must be given after this keyword by defining one field. Then the list definition has to be closed using `End List`. Special value '*' in size means that list will decode values as long as data is available. This free length value is not supported on encoding. Examples: | New list | 5 | myIntList | | u16 | | End List | | u8 | listLength | | New list | listLength | myIntList | | u16 | | End List | | New list | * | myIntList | | u16 | | End List |
13.179429
15.220887
0.865878
self._message_stack.append(BinaryContainerTemplate(name, self._current_container))
def new_binary_container(self, name)
Defines a new binary container to template. Binary container can only contain binary fields defined with `Bin` keyword. Examples: | New binary container | flags | | bin | 2 | foo | | bin | 6 | bar | | End binary container |
11.827603
15.725463
0.752131
binary_container = self._message_stack.pop() binary_container.verify() self._add_field(binary_container)
def end_binary_container(self)
End binary container. See `New Binary Container`.
8.405698
6.913803
1.215785
self._add_field(Binary(size, name, value))
def bin(self, size, name, value=None)
Add new binary field to template. This keyword has to be called within a binary container. See `New Binary Container`.
8.642646
8.272209
1.044781
self._message_stack.append(UnionTemplate(type, name, self._current_container))
def new_union(self, type, name)
Defines a new union to template of `type` and `name`. Fields inside the union are alternatives and the length of the union is the length of its longest field. Example: | New union | IntOrAddress | foo | | Chars | 16 | ipAddress | | u32 | int | | End union |
10.251286
13.221705
0.775338
self._message_stack.append(BagTemplate(name, self._current_container))
def start_bag(self, name)
Bags are sets of optional elements with an optional count. The optional elements are given each as a `Case` with the accepted count as first argument. The received elements are matched from the list of cases in order. If the the received value does not validate against the case (for example a value of a field does not match expected), then the next case is tried until a match is found. Note that although the received elements are matched in order that the cases are given, the elements dont need to arrive in the same order as the cases are. This example would match int value 42 0-1 times and in value 1 0-2 times. For example 1, 42, 1 would match, as would 1, 1: | Start bag | intBag | | case | 0-1 | u8 | foo | 42 | | case | 0-2 | u8 | bar | 1 | | End bag | A more real world example, where each AVP entry has a type field with a value that is used for matching: | Start bag | avps | | Case | 1 | AVP | result | Result-Code | | Case | 1 | AVP | originHost | Origin-Host | | Case | 1 | AVP | originRealm | Origin-Realm | | Case | 1 | AVP | hostIP | Host-IP-Address | | Case | * | AVP | appId | Vendor-Specific-Application-Id | | Case | 0-1 | AVP | originState | Origin-State | | End bag | For a more complete example on bags, see the [https://github.com/robotframework/Rammbock/blob/master/atest/diameter.robot|diameter.robot] file from Rammbock's acceptance tests.
13.662303
21.169073
0.64539
if isinstance(value, _StructuredElement): self._struct_fields_as_values(name, value) elif name.startswith('header:'): self._header_values[name.partition(':')[-1]] = value else: self._field_values[name] = value
def value(self, name, value)
Defines a default `value` for a template field identified by `name`. Default values for header fields can be set with header:field syntax. Examples: | Value | foo | 42 | | Value | struct.sub_field | 0xcafe | | Value | header:version | 0x02 |
5.18715
4.831607
1.073587
self._message_stack.append(ConditionalTemplate(condition, name, self._current_container))
def conditional(self, condition, name)
Defines a 'condition' when conditional element of 'name' exists if `condition` is true. `condition` can contain multiple conditions combined together using Logical Expressions(&&,||). Example: | Conditional | mycondition == 1 | foo | | u8 | myelement | 42 | | End conditional | | Conditional | condition1 == 1 && condition2 != 2 | bar | | u8 | myelement | 8 | | End condtional |
12.425714
20.169073
0.616078
client = self._clients.get_with_name(client_name)[0] return client.get_messages_count_in_buffer()
def get_client_unread_messages_count(self, client_name=None)
Gets count of unread messages from client
6.523592
6.020486
1.083566