code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if 0 <= n <= 255: self._output_buffer.append(pack(n)) else: raise ValueError('Octet %d out of range 0..255', n) return self
def write_octet(self, n, pack=Struct('B').pack)
Write an integer as an unsigned 8-bit value.
3.027606
2.949489
1.026485
if 0 <= n <= 0xFFFF: self._output_buffer.extend(pack(n)) else: raise ValueError('Short %d out of range 0..0xFFFF', n) return self
def write_short(self, n, pack=Struct('>H').pack)
Write an integer as an unsigned 16-bit value.
3.96679
3.728308
1.063965
''' Write an unsigned 16bit value at a specific position in the buffer. Used for writing tables and frames. ''' if 0 <= n <= 0xFFFF: pack_into(self._output_buffer, pos, n) else: raise ValueError('Short %d out of range 0..0xFFFF', n) return self
def write_short_at(self, n, pos, pack_into=Struct('>H').pack_into)
Write an unsigned 16bit value at a specific position in the buffer. Used for writing tables and frames.
5.348223
2.946947
1.814835
if 0 <= n <= 0xFFFFFFFF: self._output_buffer.extend(pack(n)) else: raise ValueError('Long %d out of range 0..0xFFFFFFFF', n) return self
def write_long(self, n, pack=Struct('>I').pack)
Write an integer as an unsigned 32-bit value.
4.109202
3.917146
1.04903
''' Write an unsigned 32bit value at a specific position in the buffer. Used for writing tables and frames. ''' if 0 <= n <= 0xFFFFFFFF: pack_into(self._output_buffer, pos, n) else: raise ValueError('Long %d out of range 0..0xFFFFFFFF', n) return self
def write_long_at(self, n, pos, pack_into=Struct('>I').pack_into)
Write an unsigned 32bit value at a specific position in the buffer. Used for writing tables and frames.
5.78852
3.099266
1.867707
if 0 <= n <= 0xFFFFFFFFFFFFFFFF: self._output_buffer.extend(pack(n)) else: raise ValueError( 'Longlong %d out of range 0..0xFFFFFFFFFFFFFFFF', n) return self
def write_longlong(self, n, pack=Struct('>Q').pack)
Write an integer as an unsigned 64-bit value.
4.274258
4.141564
1.03204
if isinstance(s, unicode): s = s.encode('utf-8') self.write_octet(len(s)) self.write(s) return self
def write_shortstr(self, s)
Write a string up to 255 bytes long after encoding. If passed a unicode string, encode as UTF-8.
2.697298
2.403753
1.122119
# Double check timestamp, can't imagine why it would be signed self._output_buffer.extend(pack(long(timegm(t.timetuple())))) return self
def write_timestamp(self, t, pack=Struct('>Q').pack)
Write out a Python datetime.datetime object as a 64-bit integer representing seconds since the Unix UTC epoch.
16.663513
15.330724
1.086936
# HACK: encoding of AMQP tables is broken because it requires the # length of the /encoded/ data instead of the number of items. To # support streaming, fiddle with cursor position, rewinding to write # the real length of the data. Generally speaking, I'm not a fan of # the AMQP encoding scheme, it could be much faster. table_len_pos = len(self._output_buffer) self.write_long(0) table_data_pos = len(self._output_buffer) for key, value in d.iteritems(): self._write_item(key, value) table_end_pos = len(self._output_buffer) table_len = table_end_pos - table_data_pos self.write_long_at(table_len, table_len_pos) return self
def write_table(self, d)
Write out a Python dictionary made of up string keys, and values that are strings, signed integers, Decimal, datetime.datetime, or sub-dictionaries following the same constraints.
6.735496
6.892915
0.977162
view = ffi.buffer(self.packet.m_body, self.packet.m_nBodySize) return view[:]
def body(self)
The body of the packet.
14.267596
9.148659
1.559529
global _log_callbacks if not callable(callback): raise ValueError("Callback must be callable") _log_callbacks.add(callback) return callback
def add_log_callback(callback)
Adds a log callback.
3.433009
3.544942
0.968425
# If enabled tell the server that our buffer can fit the whole # stream, this often increases throughput alot. if self._update_buffer and not self._updated_buffer and self.duration: self.update_buffer((self.duration * 1000) + 5000) self._updated_buffer = True if not self._buf or len(self._buf) != size: self._buf = ffi.new("char[]", size) self._view = ffi.buffer(self._buf, size) res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size) if res < 0: raise IOError("Failed to read data") return self._view[:res]
def read(self, size)
Attempts to read data from the stream. :param size: int, The maximum amount of bytes to read. Raises :exc:`IOError` on error.
5.163198
5.355893
0.964022
if isinstance(data, bytearray): data = bytes(data) if not isinstance(data, byte_types): raise ValueError("A bytes argument is required") res = librtmp.RTMP_Write(self.client.rtmp, data, len(data)) if res < 0: raise IOError("Failed to write data") return res
def write(self, data)
Writes data to the stream. :param data: bytes, FLV data to write to the stream The data passed can contain multiple FLV tags, but it MUST always contain complete tags or undefined behaviour might occur. Raises :exc:`IOError` on error.
4.480677
4.80583
0.932342
res = librtmp.RTMP_Pause(self.client.rtmp, 1) if res < 1: raise RTMPError("Failed to pause")
def pause(self)
Pauses the stream.
11.141439
8.273381
1.346661
res = librtmp.RTMP_Pause(self.client.rtmp, 0) if res < 1: raise RTMPError("Failed to unpause")
def unpause(self)
Unpauses the stream.
10.541034
8.127713
1.296925
res = librtmp.RTMP_SendSeek(self.client.rtmp, time) if res < 1: raise RTMPError("Failed to seek")
def seek(self, time)
Attempts to seek in the stream. :param time: int, Time to seek to in seconds
9.202672
10.851433
0.848061
if not self._closed: self._closed = True self.client.close()
def close(self)
Closes the connection.
5.138503
4.188681
1.226759
librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms)) librtmp.RTMP_UpdateBufferMS(self.client.rtmp)
def update_buffer(self, ms)
Tells the server how big our buffer is (in milliseconds).
7.926487
5.795146
1.367781
akey = AVal(key) aval = AVal(value) res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval) if res < 1: raise ValueError("Unable to set option {0}".format(key)) self._options[akey] = aval
def set_option(self, key, value)
Sets a option for this session. For a detailed list of available options see the librtmp(3) man page. :param key: str, A valid option key. :param value: A value, anything that can be converted to str is valid. Raises :exc:`ValueError` if a invalid option is specified.
5.678583
5.529363
1.026987
r self.url = bytes(url, "utf8") res = librtmp.RTMP_SetupURL(self.rtmp, self.url) if res < 1: raise RTMPError("Unable to parse URL")
def setup_url(self, url)
r"""Attempt to parse a RTMP URL. Additional options may be specified by appending space-separated key=value pairs to the URL. Special characters in values may need to be escaped to prevent misinterpretation by the option parser. The escape encoding uses a backslash followed by two hexadecimal digits representing the ASCII value of the character. E.g., spaces must be escaped as `\\20` and backslashes must be escaped as `\\5c`. :param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]` Raises :exc:`RTMPError` if URL parsing fails.
7.745299
7.001463
1.10624
if isinstance(packet, RTMPPacket): packet = packet.packet else: packet = ffi.NULL res = librtmp.RTMP_Connect(self.rtmp, packet) if res < 1: raise RTMPError("Failed to connect") return RTMPCall(self, 1.0)
def connect(self, packet=None)
Connect to the server. :param packet: RTMPPacket, this packet will be sent instead of the regular "connect" packet. Raises :exc:`RTMPError` if the connect attempt fails.
5.384265
4.574569
1.176999
if writeable: librtmp.RTMP_EnableWrite(self.rtmp) # Calling handle_packet() on a connect result causes # librtmp to send a CreateStream call. This is not always # desired when using process_packets(), therefore we do it # here instead. if self._connect_result: self.handle_packet(self._connect_result) if not seek: seek = 0 res = librtmp.RTMP_ConnectStream(self.rtmp, seek) if res < 1: raise RTMPError("Failed to start RTMP playback") return RTMPStream(self, update_buffer=update_buffer)
def create_stream(self, seek=None, writeable=False, update_buffer=True)
Prepares the session for streaming of audio/video and returns a :class:`RTMPStream` object. :param seek: int, Attempt to seek to this position. :param writeable: bool, Make the stream writeable instead of readable. :param update_buffer: bool, When enabled will attempt to speed up download by telling the server our buffer can fit the whole stream. Raises :exc:`RTMPError` if a stream could not be created. Usage:: >>> stream = conn.create_stream() >>> data = stream.read(1024)
5.706621
6.468904
0.882162
packet = ffi.new("RTMPPacket*") packet_complete = False while not packet_complete: res = librtmp.RTMP_ReadPacket(self.rtmp, packet) if res < 1: if librtmp.RTMP_IsTimedout(self.rtmp): raise RTMPTimeoutError("Timed out while reading packet") else: raise RTMPError("Failed to read packet") packet_complete = packet.m_nBytesRead == packet.m_nBodySize return RTMPPacket._from_pointer(packet)
def read_packet(self)
Reads a RTMP packet from the server. Returns a :class:`RTMPPacket`. Raises :exc:`RTMPError` on error. Raises :exc:`RTMPTimeoutError` on timeout. Usage:: >>> packet = conn.read_packet() >>> packet.body b'packet body ...'
4.22363
4.056744
1.041138
if not isinstance(packet, RTMPPacket): raise ValueError("A RTMPPacket argument is required") return librtmp.RTMP_SendPacket(self.rtmp, packet.packet, int(queue))
def send_packet(self, packet, queue=True)
Sends a RTMP packet to the server. :param packet: RTMPPacket, the packet to send to the server. :param queue: bool, If True, queue up the packet in a internal queue rather than sending it right away.
6.736919
5.53114
1.217998
if not isinstance(packet, RTMPPacket): raise ValueError("A RTMPPacket argument is required") return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def handle_packet(self, packet)
Lets librtmp look at a packet and send a response if needed.
9.792464
7.192632
1.361458
start = time() while self.connected and transaction_id not in self._invoke_results: if timeout and (time() - start) >= timeout: raise RTMPTimeoutError("Timeout") packet = self.read_packet() if packet.type == PACKET_TYPE_INVOKE: try: decoded = decode_amf(packet.body) except AMFError: continue try: method, transaction_id_, obj = decoded[:3] args = decoded[3:] except ValueError: continue if method == "_result": if len(args) > 0: result = args[0] else: result = None self._invoke_results[transaction_id_] = result else: handler = self._invoke_handlers.get(method) if handler: res = handler(*args) if res is not None: self.call("_result", res, transaction_id=transaction_id_) if method == invoked_method: self._invoke_args[invoked_method] = args break if transaction_id_ == 1.0: self._connect_result = packet else: self.handle_packet(packet) else: self.handle_packet(packet) if transaction_id: result = self._invoke_results.pop(transaction_id, None) return result if invoked_method: args = self._invoke_args.pop(invoked_method, None) return args
def process_packets(self, transaction_id=None, invoked_method=None, timeout=None)
Wait for packets and process them as needed. :param transaction_id: int, Wait until the result of this transaction ID is recieved. :param invoked_method: int, Wait until this method is invoked by the server. :param timeout: int, The time to wait for a result from the server. Note: This is the timeout used by this method only, the connection timeout is still used when reading packets. Raises :exc:`RTMPError` on error. Raises :exc:`RTMPTimeoutError` on timeout. Usage:: >>> @conn.invoke_handler ... def add(x, y): ... return x + y >>> @conn.process_packets()
2.95861
2.968245
0.996754
transaction_id = params.get("transaction_id") if not transaction_id: self.transaction_id += 1 transaction_id = self.transaction_id obj = params.get("obj") args = [method, transaction_id, obj] + list(args) args_encoded = map(lambda x: encode_amf(x), args) body = b"".join(args_encoded) format = params.get("format", PACKET_SIZE_MEDIUM) channel = params.get("channel", 0x03) packet = RTMPPacket(type=PACKET_TYPE_INVOKE, format=format, channel=channel, body=body) self.send_packet(packet) return RTMPCall(self, transaction_id)
def call(self, method, *args, **params)
Calls a method on the server.
3.621212
3.542217
1.022301
def func(*args): call = self.call(method, *args, **params) if block: return call.result() return call func.__name__ = method return func
def remote_method(self, method, block=False, **params)
Creates a Python function that will attempt to call a remote method when used. :param method: str, Method name on the server to call :param block: bool, Wheter to wait for result or not Usage:: >>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True) >>> send_usher_token("some token") 'Token Accepted'
4.056325
6.267825
0.647166
if self.done: return self._result result = self.conn.process_packets(transaction_id=self.transaction_id, timeout=timeout) self._result = result self.done = True return result
def result(self, timeout=None)
Retrieves the result of the call. :param timeout: The time to wait for a result from the server. Raises :exc:`RTMPTimeoutError` on timeout.
5.048227
5.287117
0.954817
import signal def handler(sig, frame): if sig == signal.SIGINT: librtmp.RTMP_UserInterrupt() raise KeyboardInterrupt signal.signal(signal.SIGINT, handler)
def add_signal_handler()
Adds a signal handler to handle KeyboardInterrupt.
5.099237
4.759511
1.071378
self.normals = self.data['normals'] self.vectors = numpy.ones(( self.data['vectors'].shape[0], self.data['vectors'].shape[1], self.data['vectors'].shape[2] + 1 )) self.vectors[:, :, :-1] = self.data['vectors'] self.attr = self.data['attr'] return
def set_initial_values(self)
Set initial values form existing self.data value :return: None
3.023185
2.930367
1.031675
rad = math.radians(deg) mat = numpy.array([ [1, 0, 0, 0], [0, math.cos(rad), math.sin(rad), 0], [0, -math.sin(rad), math.cos(rad), 0], [0, 0, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
def rotate_x(self, deg)
Rotate mesh around x-axis :param float deg: Rotation angle (degree) :return:
1.772161
1.897344
0.934022
mat = numpy.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [d, 0, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
def translate_x(self, d)
Translate mesh for x-direction :param float d: Amount to translate
2.107421
2.428434
0.867811
mat = numpy.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, d, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
def translate_y(self, d)
Translate mesh for y-direction :param float d: Amount to translate
2.184994
2.504569
0.872403
mat = numpy.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, d, 1] ]) self.vectors = self.vectors.dot(mat) return self
def translate_z(self, d)
Translate mesh for z-direction :param float d: Amount to translate
2.103216
2.425765
0.867032
mat = numpy.array([ [sx, 0, 0, 0], [0, sy, 0, 0], [0, 0, sz, 0], [0, 0, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
def scale(self, sx, sy, sz)
Scale mesh :param float sx: Amount to scale for x-direction :param float sy: Amount to scale for y-direction :param float sz: Amount to scale for z-direction
1.949204
2.308752
0.844268
v321 = triangle[2][0] * triangle[1][1] * triangle[0][2] v231 = triangle[1][0] * triangle[2][1] * triangle[0][2] v312 = triangle[2][0] * triangle[0][1] * triangle[1][2] v132 = triangle[0][0] * triangle[2][1] * triangle[1][2] v213 = triangle[1][0] * triangle[0][1] * triangle[2][2] v123 = triangle[0][0] * triangle[1][1] * triangle[2][2] signed_volume = (-v321 + v231 + v312 - v132 - v213 + v123) / 6.0 return signed_volume
def __calc_signed_volume(triangle)
Calculate signed volume of given triangle :param list of list triangle: :rtype float
1.442501
1.493761
0.965684
if update_normals: self.update_normals() filename = os.path.split(path)[-1] if mode is MODE_STL_AUTO: if self.mode == MODE_STL_BINARY: save_func = self.__save_stl_binary elif self.mode == MODE_STL_ASCII: save_func = self.__save_stl_ascii else: raise ValueError("Mode %r is invalid" % mode) elif mode is MODE_STL_BINARY: save_func = self.__save_stl_binary else: raise ValueError("Mode %r is invalid" % mode) with open(path, 'wb') as fh: save_func(fh, filename)
def save_stl(self, path, mode=MODE_STL_AUTO, update_normals=True)
Save data with stl format :param str path: :param int mode: :param bool update_normals:
2.049126
2.238227
0.915513
if update_normals: self.update_normals() # Create triangle_list vectors_key_list = [] vectors_list = [] normals_key_list = [] normals_list = [] triangle_list = [] for i, vector in enumerate(self.vectors): one_triangle = [] for j in range(3): v_key = ",".join(map(str, self.vectors[i][j][:3])) if v_key in vectors_key_list: v_index = vectors_key_list.index(v_key) else: v_index = len(vectors_key_list) vectors_key_list.append(v_key) vectors_list.append(self.vectors[i][j][:3]) one_triangle.append(v_index + 1) n_key = ",".join(map(str, self.normals[i][:3])) if n_key in normals_key_list: n_index = normals_key_list.index(n_key) else: n_index = len(normals_key_list) normals_key_list.append(n_key) normals_list.append(self.normals[i][:3]) # print(normals_list) triangle_list.append((one_triangle, n_index + 1)) with open(path, "wb") as fh: print("# {} {}".format(__title__, __version__), file=fh) print("# {}".format(datetime.datetime.now()), file=fh) print("# {}".format(__url__), file=fh) print("", file=fh) for v in vectors_list: print("v {} {} {}".format(v[0], v[1], v[2]), file=fh) for vn in normals_list: print("vn {} {} {}".format(vn[0], vn[1], vn[2]), file=fh) for t in triangle_list: faces = t[0] normal = t[1] print("f {}//{} {}//{} {}//{}".format( faces[0], normal, faces[1], normal, faces[2], normal, ), file=fh)
def save_obj(self, path, update_normals=True)
Save data with OBJ format :param stl path: :param bool update_normals:
1.807079
1.81484
0.995724
header = fh.read(Stl.HEADER_SIZE).lower() name = "" data = None if not header.strip(): return if mode in (Stl.MODE_AUTO, Stl.MODE_ASCII) and header.startswith('solid'): try: name = header.split('\n', 1)[0][:5].strip() data = Stl.__load_ascii(fh, header) mode = Stl.MODE_ASCII except: pass else: data = Stl.__load_binary(fh) mode = Stl.MODE_BINARY return name, data, mode
def __load(fh, mode=MODE_AUTO)
Load Mesh from STL file :param FileIO fh: The file handle to open :param int mode: The mode to open, default is :py:data:`AUTOMATIC`. :return:
3.503757
3.811338
0.919298
lines = header.split('\n') recoverable = [True] def get(prefix=''): if lines: line = lines.pop(0) else: raise RuntimeError(recoverable[0], 'Unable to find more lines') if not lines: recoverable[0] = False # Read more lines and make sure we prepend any old data lines[:] = fh.read(Stl.BUFFER_SIZE).split('\n') line += lines.pop(0) line = line.lower().strip() if prefix: if line.startswith(prefix): values = line.replace(prefix, '', 1).strip().split() elif line.startswith('endsolid'): raise StopIteration() else: raise RuntimeError(recoverable[0], '%r should start with %r' % (line, prefix)) if len(values) == 3: vertex = [float(v) for v in values] return vertex else: # pragma: no cover raise RuntimeError(recoverable[0], 'Incorrect value %r' % line) else: return line line = get() if not line.startswith('solid ') and line.startswith('solid'): print("Error") if not lines: raise RuntimeError(recoverable[0], 'No lines found, impossible to read') while True: # Read from the header lines first, until that point we can recover # and go to the binary option. After that we cannot due to # unseekable files such as sys.stdin # # Numpy doesn't support any non-file types so wrapping with a # buffer and/or StringIO does not work. try: normals = get('facet normal') assert get() == 'outer loop' v0 = get('vertex') v1 = get('vertex') v2 = get('vertex') assert get() == 'endloop' assert get() == 'endfacet' attrs = 0 yield (normals, (v0, v1, v2), attrs) except AssertionError as e: raise RuntimeError(recoverable[0], e) except StopIteration: if any(lines): # Seek back to where the next solid should begin fh.seek(-len('\n'.join(lines)), os.SEEK_CUR) raise
def __ascii_reader(fh, header)
:param fh: :param header: :return:
4.578618
4.609072
0.993393
logger = logging.getLogger('steppy') logger.setLevel(logging.INFO) message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s', datefmt='%Y-%m-%d %H:%M:%S') # console handler console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging.INFO) console_handler.setFormatter(fmt=message_format) # add the handlers to the logger logger.addHandler(console_handler) return logger
def initialize_logger()
Initialize steppy logger. This logger is used throughout the steppy library to report computation progress. Example: Simple use of steppy logger: .. code-block:: python initialize_logger() logger = get_logger() logger.info('My message inside pipeline') result looks like this: .. code:: 2018-06-02 12:33:48 steppy >>> My message inside pipeline Returns: logging.Logger: logger object formatted in the steppy style
1.941516
2.002499
0.969547
graph = _create_graph(structure_dict) plt = Image(graph.create_png()) display(plt)
def display_upstream_structure(structure_dict)
Displays pipeline structure in the jupyter notebook. Args: structure_dict (dict): dict returned by :func:`~steppy.base.Step.upstream_structure`.
7.048835
10.952924
0.643557
graph = _create_graph(structure_dict) graph.write(filepath, format='png')
def persist_as_png(structure_dict, filepath)
Saves pipeline diagram to disk as png file. Args: structure_dict (dict): dict returned by :func:`~steppy.base.Step.upstream_structure` filepath (str): filepath to which the png with pipeline visualization should be persisted
4.575902
8.282942
0.552449
graph = pydot.Dot() for node in structure_dict['nodes']: graph.add_node(pydot.Node(node)) for node1, node2 in structure_dict['edges']: graph.add_edge(pydot.Edge(node1, node2)) return graph
def _create_graph(structure_dict)
Creates pydot graph from the pipeline structure dict. Args: structure_dict (dict): dict returned by step.upstream_structure Returns: graph (pydot.Dot): object representing upstream pipeline structure (with regard to the current Step).
1.943077
2.103857
0.923578
adapted = {} for name, recipe in self.adapting_recipes.items(): adapted[name] = self._construct(all_ouputs, recipe) return adapted
def adapt(self, all_ouputs: AllOutputs) -> DataPacket
Adapt inputs for the transformer included in the step. Args: all_ouputs: Dict of outputs from parent steps. The keys should match the names of these steps and the values should be their respective outputs. Returns: Dictionary with the same keys as `adapting_recipes` and values constructed according to the respective recipes.
6.514915
4.244461
1.534922
structure_dict = {'edges': set(), 'nodes': set()} structure_dict = self._build_structure_dict(structure_dict) return structure_dict
def upstream_structure(self)
Build dictionary with entire upstream pipeline structure (with regard to the current Step). Returns: dict: dictionary describing the upstream pipeline structure. It has two keys: ``'edges'`` and ``'nodes'``, where: - value of ``'edges'`` is set of tuples ``(input_step.name, self.name)`` - value of ``'nodes'`` is set of all step names upstream to this Step
5.340276
5.617101
0.950717
if data: assert isinstance(data, dict), 'Step {}, "data" argument in the "fit_transform()" method must be dict, ' \ 'got {} instead.'.format(self.name, type(data)) logger.info('Step {}, working in "{}" mode'.format(self.name, self._mode)) if self._mode == 'inference': ValueError('Step {}, you are in "{}" mode, where you cannot run "fit".' 'Please change mode to "train" to enable fitting.' 'Use: "step.set_mode_train()" then "step.fit_transform()"'.format(self.name, self._mode)) if self.output_is_cached and not self.force_fitting: logger.info('Step {} using cached output'.format(self.name)) step_output_data = self.output elif self.output_is_persisted and self.load_persisted_output and not self.force_fitting: logger.info('Step {} loading persisted output from {}'.format(self.name, self.experiment_directory_output_step)) step_output_data = self._load_output(self.experiment_directory_output_step) else: step_inputs = {} if self.input_data is not None: for input_data_part in self.input_data: step_inputs[input_data_part] = data[input_data_part] for input_step in self.input_steps: step_inputs[input_step.name] = input_step.fit_transform(data) if self.adapter: step_inputs = self._adapt(step_inputs) else: step_inputs = self._unpack(step_inputs) step_output_data = self._fit_transform_operation(step_inputs) logger.info('Step {}, fit and transform completed'.format(self.name)) return step_output_data
def fit_transform(self, data)
Fit the model and transform data or load already processed data. Loads cached or persisted output or adapts data for the current transformer and executes ``transformer.fit_transform``. Args: data (dict): data dictionary with keys as input names and values as dictionaries of key-value pairs that can be passed to the ``self.transformer.fit_transform`` method. Example: .. code-block:: python data = {'input_1': {'X': X, 'y': y}, 'input_2': {'X': X, 'y': y} } Returns: dict: Step output from the ``self.transformer.fit_transform`` method
3.221887
3.154536
1.021351
self.clean_cache_upstream() self.set_mode_train() for step_obj in self.all_upstream_steps.values(): step_obj.is_fittable = DEFAULT_TRAINING_SETUP['is_fittable'] step_obj.force_fitting = DEFAULT_TRAINING_SETUP['force_fitting'] step_obj.persist_output = DEFAULT_TRAINING_SETUP['persist_output'] step_obj.cache_output = DEFAULT_TRAINING_SETUP['cache_output'] step_obj.load_persisted_output = DEFAULT_TRAINING_SETUP['load_persisted_output'] logger.info('Step {}, reset all upstream Steps to default training parameters, ' 'including this Step'.format(self.name)) return self
def reset(self)
Reset all upstream Steps to the default training parameters and cleans cache for all upstream Steps including this Step. Defaults are: 'mode': 'train', 'is_fittable': True, 'force_fitting': True, 'persist_output': False, 'cache_output': False, 'load_persisted_output': False
4.669091
2.159538
2.162078
assert isinstance(parameters, dict), 'parameters must be dict, got {} instead'.format(type(parameters)) for step_obj in self.all_upstream_steps.values(): for key in step_obj.__dict__.keys(): if key in list(parameters.keys()): step_obj.__dict__[key] = parameters[key] if key == 'experiment_directory': step_obj._prepare_experiment_directories() logger.info('set new values to all upstream Steps including this Step.') return self
def set_parameters_upstream(self, parameters)
Set parameters to all upstream Steps including this Step. Parameters is dict() where key is Step attribute, and value is new value to set.
4.597817
3.269534
1.406261
logger.info('Step {}, cleaning cache'.format(self.name)) self.output = None return self
def clean_cache_step(self)
Clean cache for current step.
9.336915
8.494073
1.099227
logger.info('Cleaning cache for the entire upstream pipeline') for step in self.all_upstream_steps.values(): logger.info('Step {}, cleaning cache'.format(step.name)) step.output = None return self
def clean_cache_upstream(self)
Clean cache for all steps that are upstream to `self`.
6.372764
4.990633
1.276945
self._validate_step_name(name) name = str(name) try: return self.all_upstream_steps[name] except KeyError as e: msg = 'No Step with name "{}" found. ' \ 'You have following Steps: {}'.format(name, list(self.all_upstream_steps.keys())) raise StepError(msg) from e
def get_step_by_name(self, name)
Extracts step by name from the pipeline. Extracted Step is a fully functional pipeline as well. All upstream Steps are already defined. Args: name (str): name of the step to be fetched Returns: Step (obj): extracted step
3.743946
3.688065
1.015152
persist_dir = os.path.join(self.experiment_directory, '{}_upstream_structure.json'.format(self.name)) logger.info('Step {}, saving upstream pipeline structure to {}'.format(self.name, persist_dir)) joblib.dump(self.upstream_structure, persist_dir)
def persist_upstream_structure(self)
Persist json file with the upstream steps structure, that is step names and their connections.
3.949616
3.25669
1.21277
assert isinstance(filepath, str),\ 'Step {} error, filepath must be str. Got {} instead'.format(self.name, type(filepath)) persist_as_png(self.upstream_structure, filepath)
def persist_upstream_diagram(self, filepath)
Creates upstream steps diagram and persists it to disk as png file. Pydot graph is created and persisted to disk as png file under the filepath directory. Args: filepath (str): filepath to which the png with steps visualization should be persisted
7.310869
8.644835
0.845692
self.fit(*args, **kwargs) return self.transform(*args, **kwargs)
def fit_transform(self, *args, **kwargs)
Performs fit followed by transform. This method simply combines fit and transform. Args: args: positional arguments (can be anything) kwargs: keyword arguments (can be anything) Returns: dict: output
2.583186
4.528889
0.57038
return self.api_url.format(version=self.api_version, endpoint=self.api_endpoints[endpoint])
def url(self, endpoint)
Returns full URL for specified API endpoint >>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e") >>> translate.url("langs") 'https://translate.yandex.net/api/v1.5/tr.json/getLangs' >>> translate.url("detect") 'https://translate.yandex.net/api/v1.5/tr.json/detect' >>> translate.url("translate") 'https://translate.yandex.net/api/v1.5/tr.json/translate'
4.804023
5.550974
0.865438
try: response = requests.get(self.url("langs"), params={"key": self.api_key}, proxies=proxies) except requests.exceptions.ConnectionError: raise YandexTranslateException(self.error_codes[503]) else: response = response.json() status_code = response.get("code", 200) if status_code != 200: raise YandexTranslateException(status_code) return response.get("dirs")
def directions(self, proxies=None)
Returns list with translate directions >>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e") >>> directions = translate.directions >>> len(directions) > 0 True
3.23662
3.231739
1.00151
data = { "text": text, "format": format, "key": self.api_key, } try: response = requests.post(self.url("detect"), data=data, proxies=proxies) except ConnectionError: raise YandexTranslateException(self.error_codes[503]) except ValueError: raise YandexTranslateException(response) else: response = response.json() language = response.get("lang", None) status_code = response.get("code", 200) if status_code != 200: raise YandexTranslateException(status_code) elif not language: raise YandexTranslateException(501) return language
def detect(self, text, proxies=None, format="plain")
Specifies language of text >>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e") >>> result = translate.detect(text="Hello world!") >>> result == "en" True
2.581352
2.549197
1.012614
data = { "text": text, "format": format, "lang": lang, "key": self.api_key } try: response = requests.post(self.url("translate"), data=data, proxies=proxies) except ConnectionError: raise YandexTranslateException(503) else: response = response.json() status_code = response.get("code", 200) if status_code != 200: raise YandexTranslateException(status_code) return response
def translate(self, text, lang, proxies=None, format="plain")
Translates text to passed language >>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e") >>> result = translate.translate(lang="ru", text="Hello, world!") >>> result["code"] == 200 True >>> result["lang"] == "en-ru" True
2.321945
2.457481
0.944848
# https://tools.ietf.org/html/rfc4226 msg = struct.pack('>Q', counter) digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest() ob = digest[19] if PY2: ob = ord(ob) pos = ob & 15 base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff token = base % 1000000 return token
def generate_hotp(secret, counter=4)
Generate a HOTP code. :param secret: A secret token for the authentication. :param counter: HOTP is a counter based algorithm.
2.856081
3.116546
0.916425
if timestamp is None: timestamp = time.time() counter = int(timestamp) // period return generate_hotp(secret, counter)
def generate_totp(secret, period=30, timestamp=None)
Generate a TOTP code. A TOTP code is an extension of HOTP algorithm. :param secret: A secret token for the authentication. :param period: A period that a TOTP code is valid in seconds :param timestamp: Current time stamp.
3.761553
5.312366
0.708075
if not valid_code(code): return False code = bytes(int(code)) for i in range(last + 1, last + trials + 1): if compare_digest(bytes(self.hotp(counter=i)), code): return i return False
def valid_hotp(self, code, last=0, trials=100)
Valid a HOTP code. :param code: A number that is less than 6 characters. :param last: Guess HOTP code from last + 1 range. :param trials: Guest HOTP code end at last + trials + 1.
4.022855
4.978909
0.807979
if not valid_code(code): return False return compare_digest( bytes(self.totp(period, timestamp)), bytes(int(code)) )
def valid_totp(self, code, period=30, timestamp=None)
Valid a TOTP code. :param code: A number that is less than 6 characters. :param period: A period that a TOTP code is valid in seconds :param timestamp: Validate TOTP at this given timestamp
5.348812
8.657154
0.617849
type = type.lower() if type not in ('hotp', 'totp'): raise ValueError('type must be hotp or totp') if type == 'hotp' and not counter: raise ValueError('HOTP type authentication need counter') # https://code.google.com/p/google-authenticator/wiki/KeyUriFormat url = ('otpauth://%(type)s/%(label)s?secret=%(secret)s' '&issuer=%(issuer)s') dct = dict( type=type, label=label, issuer=issuer, secret=self.encoded_secret, counter=counter ) ret = url % dct if type == 'hotp': ret = '%s&counter=%s' % (ret, counter) return ret
def to_uri(self, type, label, issuer, counter=None)
Generate the otpauth protocal string. :param type: Algorithm type, hotp or totp. :param label: Label of the identifier. :param issuer: The company, the organization or something else. :param counter: Counter of the HOTP algorithm.
2.648979
2.541363
1.042346
warnings.warn('deprecated, use to_uri instead', DeprecationWarning) return self.to_uri(type, label, issuer, counter)
def to_google(self, type, label, issuer, counter=None)
Generate the otpauth protocal string for Google Authenticator. .. deprecated:: 0.2.0 Use :func:`to_uri` instead.
3.604256
3.271245
1.1018
if not first or not second: raise JaroDistanceException("Cannot calculate distance from NoneType ({0}, {1})".format( first.__class__.__name__, second.__class__.__name__)) jaro = _score(first, second) cl = min(len(_get_prefix(first, second)), 4) if all([winkler, winkler_ajustment]): # 0.1 as scaling factor return round((jaro + (scaling * cl * (1.0 - jaro))) * 100.0) / 100.0 return jaro
def get_jaro_distance(first, second, winkler=True, winkler_ajustment=True, scaling=0.1)
:param first: word to calculate distance for :param second: word to calculate distance with :param winkler: same as winkler_ajustment :param winkler_ajustment: add an adjustment factor to the Jaro of the distance :param scaling: scaling factor for the Winkler adjustment :return: Jaro distance adjusted (or not)
4.601777
4.668673
0.985671
# RADAR: Python2 if isinstance(backend, money.six.string_types): path, name = backend.rsplit('.', 1) module = importlib.import_module(path) backend = getattr(module, name)() elif isinstance(backend, type): backend = backend() if not isinstance(backend, BackendBase): raise TypeError("backend '{}' is not a subclass of " "money.xrates.BackendBase".format(backend)) self._backend = backend
def install(self, backend='money.exchange.SimpleBackend')
Install an exchange rates backend using a python path string
4.150821
3.953743
1.049846
if not self._backend: raise ExchangeBackendNotInstalled() return self._backend.rate(currency)
def rate(self, currency)
Return quotation between the base and another currency
8.223121
7.557373
1.088092
if not self._backend: raise ExchangeBackendNotInstalled() return self._backend.quotation(origin, target)
def quotation(self, origin, target)
Return quotation between two currencies (origin, target)
7.512267
5.933221
1.266136
@functools.wraps(PathCollection.__init__) def __init__(self, *args, **kwargs): _nonscatter_pathcollections.add(self) return __init__.__wrapped__(self, *args, **kwargs) PathCollection.__init__ = __init__ @functools.wraps(Axes.scatter) def scatter(*args, **kwargs): paths = scatter.__wrapped__(*args, **kwargs) with suppress(KeyError): _nonscatter_pathcollections.remove(paths) return paths Axes.scatter = scatter
def _register_scatter()
Patch `PathCollection` and `scatter` to register their return values. This registration allows us to distinguish `PathCollection`s created by `Axes.scatter`, which should use point-like picking, from others, which should use path-like picking. The former is more common, so we store the latter instead; this also lets us guess the type better if this module is imported late.
2.856919
2.447173
1.167436
transform = artist.get_transform().frozen() tpath = (path.cleaned(transform) if transform.is_affine # `cleaned` only handles affine transforms. else transform.transform_path(path).cleaned()) # `cleaned` should return a path where the first element is `MOVETO`, the # following are `LINETO` or `CLOSEPOLY`, and the last one is `STOP`, i.e. # codes = path.codes # assert (codes[0], codes[-1]) == (path.MOVETO, path.STOP) # assert np.in1d(codes[1:-1], [path.LINETO, path.CLOSEPOLY]).all() vertices = tpath.vertices[:-1] codes = tpath.codes[:-1] vertices[codes == tpath.CLOSEPOLY] = vertices[0] # Unit vectors for each segment. us = vertices[1:] - vertices[:-1] ls = np.hypot(*us.T) with np.errstate(invalid="ignore"): # Results in 0/0 for repeated consecutive points. us /= ls[:, None] # Vectors from each vertex to the event (overwritten below). vs = xy - vertices[:-1] # Clipped dot products -- `einsum` cannot be done in place, `clip` can. # `clip` can trigger invalid comparisons if there are nan points. with np.errstate(invalid="ignore"): dot = np.clip(np.einsum("ij,ij->i", vs, us), 0, ls, out=vs[:, 0]) # Projections. projs = vertices[:-1] + dot[:, None] * us ds = np.hypot(*(xy - projs).T, out=vs[:, 1]) try: argmin = np.nanargmin(ds) dmin = ds[argmin] except (ValueError, IndexError): # See above re: exceptions caught. return else: target = AttrArray( artist.axes.transData.inverted().transform_point(projs[argmin])) target.index = ( (argmin + dot[argmin] / ls[argmin]) / (path._interpolation_steps / tpath._interpolation_steps)) return Selection(artist, target, dmin, None, None)
def _compute_projection_pick(artist, path, xy)
Project *xy* on *path* to obtain a `Selection` for *artist*. *path* is first transformed to screen coordinates using the artist transform, and the target of the returned `Selection` is transformed back to data coordinates using the artist *axes* inverse transform. The `Selection` `index` is returned as a float. This function returns ``None`` for degenerate inputs. The caller is responsible for converting the index to the proper class if needed.
5.083385
4.923317
1.032512
tr_xy = ax.transData.transform(orig_xy) return ( orig_xy if ((tr_xy == screen_xy) | np.isnan(tr_xy) & np.isnan(screen_xy)).all() else ax.transData.inverted().transform(screen_xy))
def _untransform(orig_xy, screen_xy, ax)
Return data coordinates to place an annotation at screen coordinates *screen_xy* in axes *ax*. *orig_xy* are the "original" coordinates as stored by the artist; they are transformed to *screen_xy* by whatever transform the artist uses. If the artist uses ``ax.transData``, just return *orig_xy*; else, apply ``ax.transData.inverse()`` to *screen_xy*. (The first case is more accurate than always applying ``ax.transData.inverse()``.)
3.71154
3.932102
0.943907
wrapped_kwonly_params = [ param for param in inspect.signature(func).parameters.values() if param.kind == param.KEYWORD_ONLY] sel_sig = inspect.signature(Selection) default_sel_sig = sel_sig.replace( parameters=[param.replace(default=None) if param.default is param.empty else param for param in sel_sig.parameters.values()]) @functools.wraps(func) def wrapper(*args, **kwargs): extra_kw = {param.name: kwargs.pop(param.name) for param in wrapped_kwonly_params if param.name in kwargs} ba = default_sel_sig.bind(*args, **kwargs) # apply_defaults ba.arguments = ChainMap( ba.arguments, {name: param.default for name, param in default_sel_sig.parameters.items() if param.default is not param.empty}) sel = Selection(*ba.args, **ba.kwargs) return func(sel, **extra_kw) wrapper.__signature__ = Signature( list(sel_sig.parameters.values()) + wrapped_kwonly_params) return wrapper
def _call_with_selection(func)
Decorator that passes a `Selection` built from the non-kwonly args.
2.461912
2.33536
1.054189
artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)}) return artist
def _set_valid_props(artist, kwargs)
Set valid properties for the artist, dropping the others.
4.590794
4.128487
1.11198
if currency == self._currency: return self rate = xrates.quotation(self._currency, currency) if rate is None: raise ExchangeRateNotFound(xrates.backend_name, self._currency, currency) amount = self._amount * rate return self.__class__(amount, currency)
def to(self, currency)
Return equivalent money object in another currency
5.165535
4.99798
1.033525
if BABEL_AVAILABLE: if BABEL_VERSION < StrictVersion('2.2'): raise Exception('Babel {} is unsupported. ' 'Please upgrade to 2.2 or higher.'.format(BABEL_VERSION)) return babel.numbers.format_currency( self._amount, self._currency, format=pattern, locale=locale, currency_digits=currency_digits, format_type=format_type) else: raise NotImplementedError("formatting requires Babel " "(https://pypi.python.org/pypi/Babel)")
def format(self, locale=LC_NUMERIC, pattern=None, currency_digits=True, format_type='standard')
Return a locale-aware, currency-formatted string. This method emulates babel.numbers.format_currency(). A specific locale identifier (language[_territory]) can be passed, otherwise the system's default locale will be used. A custom formatting pattern of the form "¤#,##0.00;(¤#,##0.00)" (positive[;negative]) can also be passed, otherwise it will be determined from the locale and the CLDR (Unicode Common Locale Data Repository) included with Babel. >>> m = Money('1234.567', 'EUR') >>> m.format() # assuming the system's locale is 'en_US' €1,234.57 >>> m.format('de_DE') # German formatting 1.234,57 € >>> m.format('de', '#,##0 ¤') # German formatting (short), no cents 1.235 € >>> m.format(pattern='#,##0.00 ¤¤¤') # Default locale, full name 1,235.57 euro Learn more about this formatting syntaxis at: http://www.unicode.org/reports/tr35/tr35-numbers.html
3.411249
3.822494
0.892414
try: currency, amount = s.strip().split(' ') return cls(amount, currency) except ValueError as err: # RADAR: Python2 money.six.raise_from(ValueError("failed to parse string " " '{}': {}".format(s, err)), None)
def loads(cls, s)
Parse from a string representation (repr)
8.531014
8.539536
0.999002
# The rounding allows sorting areas without floating point issues. bbox = bbox_1.intersection(bbox_1, bbox_2) return round(bbox.width * bbox.height, 8) if bbox else 0
def _get_rounded_intersection_area(bbox_1, bbox_2)
Compute the intersection area between two bboxes rounded to 8 digits.
8.843627
6.432897
1.37475
r yield from ax.collections yield from ax.images yield from ax.lines yield from ax.patches yield from ax.texts
def _iter_axes_subartists(ax)
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*.
4.61578
4.879838
0.945888
return bool(artist and artist.axes and (artist.container in artist.axes.containers if isinstance(artist, _pick_info.ContainerArtist) else artist in _iter_axes_subartists(artist.axes)))
def _is_alive(artist)
Check whether *artist* is still present on its parent axes.
14.962186
9.758525
1.533243
event = copy.copy(event) event.xdata, event.ydata = ( ax.transData.inverted().transform_point((event.x, event.y))) return event
def _reassigned_axes_event(event, ax)
Reassign *event* to *ax*.
3.053477
2.938114
1.039264
if pickables is None: # Do not import pyplot ourselves to avoid forcing the backend. plt = sys.modules.get("matplotlib.pyplot") pickables = [ plt.figure(num) for num in plt.get_fignums()] if plt else [] elif (isinstance(pickables, Container) or not isinstance(pickables, Iterable)): pickables = [pickables] def iter_unpack_figures(pickables): for entry in pickables: if isinstance(entry, Figure): yield from entry.axes else: yield entry def iter_unpack_axes(pickables): for entry in pickables: if isinstance(entry, Axes): yield from _iter_axes_subartists(entry) containers.extend(entry.containers) elif isinstance(entry, Container): containers.append(entry) else: yield entry containers = [] artists = list(iter_unpack_axes(iter_unpack_figures(pickables))) for container in containers: contained = list(filter(None, container.get_children())) for artist in contained: with suppress(ValueError): artists.remove(artist) if contained: artists.append(_pick_info.ContainerArtist(container)) return Cursor(artists, **kwargs)
def cursor(pickables=None, **kwargs)
Create a `Cursor` for a list of artists, containers, and axes. Parameters ---------- pickables : Optional[List[Union[Artist, Container, Axes, Figure]]] All artists and containers in the list or on any of the axes or figures passed in the list are selectable by the constructed `Cursor`. Defaults to all artists and containers on any of the figures that :mod:`~matplotlib.pyplot` is tracking. Note that the latter will only work when relying on pyplot, not when figures are directly instantiated (e.g., when manually embedding Matplotlib in a GUI toolkit). **kwargs Keyword arguments are passed to the `Cursor` constructor.
3.733862
3.378994
1.105022
r for sel in self._selections: if sel.annotation.axes is None: raise RuntimeError("Annotation unexpectedly removed; " "use 'cursor.remove_selection' instead") return tuple(self._selections)
def selections(self)
r"""The tuple of current `Selection`\s.
14.896916
12.144608
1.226628
# pi: "pick_info", i.e. an incomplete selection. # Pre-fetch the figure and axes, as callbacks may actually unset them. figure = pi.artist.figure axes = pi.artist.axes if axes.get_renderer_cache() is None: figure.canvas.draw() # Needed by draw_artist below anyways. renderer = pi.artist.axes.get_renderer_cache() ann = pi.artist.axes.annotate( _pick_info.get_ann_text(*pi), xy=pi.target, xytext=(np.nan, np.nan), ha=_MarkedStr("center"), va=_MarkedStr("center"), visible=self.visible, **self.annotation_kwargs) ann.draggable(use_blit=not self._multiple) extras = [] if self._highlight: hl = self.add_highlight(*pi) if hl: extras.append(hl) sel = pi._replace(annotation=ann, extras=extras) self._selections.append(sel) for cb in self._callbacks["add"]: cb(sel) # Check that `ann.axes` is still set, as callbacks may have removed the # annotation. if ann.axes and ann.xyann == (np.nan, np.nan): fig_bbox = figure.get_window_extent() ax_bbox = axes.get_window_extent() overlaps = [] for idx, annotation_position in enumerate( self.annotation_positions): ann.set(**annotation_position) # Work around matplotlib/matplotlib#7614: position update is # missing. ann.update_positions(renderer) bbox = ann.get_window_extent(renderer) overlaps.append( (_get_rounded_intersection_area(fig_bbox, bbox), _get_rounded_intersection_area(ax_bbox, bbox), # Avoid needlessly jumping around by breaking ties using # the last used position as default. idx == self._last_auto_position)) auto_position = max(range(len(overlaps)), key=overlaps.__getitem__) ann.set(**self.annotation_positions[auto_position]) self._last_auto_position = auto_position else: if isinstance(ann.get_ha(), _MarkedStr): ann.set_ha({-1: "right", 0: "center", 1: "left"}[ np.sign(np.nan_to_num(ann.xyann[0]))]) if isinstance(ann.get_va(), _MarkedStr): ann.set_va({-1: "top", 0: "center", 1: "bottom"}[ np.sign(np.nan_to_num(ann.xyann[1]))]) if (extras or len(self.selections) > 1 and not self._multiple or not figure.canvas.supports_blit): # Either: # - there may be more things to draw, or # - annotation removal will make a full redraw necessary, or # - blitting is not (yet) supported. figure.canvas.draw_idle() elif ann.axes: # Fast path, only needed if the annotation has not been immediately # removed. figure.draw_artist(ann) # Explicit argument needed on MacOSX backend. figure.canvas.blit(figure.bbox) # Removal comes after addition so that the fast blitting path works. if not self._multiple: for sel in self.selections[:-1]: self.remove_selection(sel) return sel
def add_selection(self, pi)
Create an annotation for a `Selection` and register it. Returns a new `Selection`, that has been registered by the `Cursor`, with the added annotation set in the :attr:`annotation` field and, if applicable, the highlighting artist in the :attr:`extras` field. Emits the ``"add"`` event with the new `Selection` as argument. When the event is emitted, the position of the annotation is temporarily set to ``(nan, nan)``; if this position is not explicitly set by a callback, then a suitable position will be automatically computed. Likewise, if the text alignment is not explicitly set but the position is, then a suitable alignment will be automatically computed.
5.051974
4.921668
1.026476
hl = _pick_info.make_highlight( artist, *args, **ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs)) if hl: artist.axes.add_artist(hl) return hl
def add_highlight(self, artist, *args, **kwargs)
Create, add, and return a highlighting artist. This method is should be called with an "unpacked" `Selection`, possibly with some fields set to None. It is up to the caller to register the artist with the proper `Selection` (by calling ``sel.extras.append`` on the result of this method) in order to ensure cleanup upon deselection.
7.880658
10.233662
0.770072
if event not in self._callbacks: raise ValueError("{!r} is not a valid cursor event".format(event)) if func is None: return partial(self.connect, event) self._callbacks[event].append(func) return func
def connect(self, event, func=None)
Connect a callback to a `Cursor` event; return the callback. Two events can be connected to: - callbacks connected to the ``"add"`` event are called when a `Selection` is added, with that selection as only argument; - callbacks connected to the ``"remove"`` event are called when a `Selection` is removed, with that selection as only argument. This method can also be used as a decorator:: @cursor.connect("add") def on_add(sel): ... Examples of callbacks:: # Change the annotation text and alignment: lambda sel: sel.annotation.set( text=sel.artist.get_label(), # or use e.g. sel.target.index ha="center", va="bottom") # Make label non-draggable: lambda sel: sel.draggable(False)
3.261155
3.594187
0.907342
try: self._callbacks[event].remove(cb) except KeyError: raise ValueError("{!r} is not a valid cursor event".format(event)) except ValueError: raise ValueError("Callback {} is not registered".format(event))
def disconnect(self, event, cb)
Disconnect a previously connected callback. If a callback is connected multiple times, only one connection is removed.
4.134219
4.458672
0.927231
for disconnectors in self._disconnectors: disconnectors() for sel in self.selections: self.remove_selection(sel) for s in type(self)._keep_alive.values(): with suppress(KeyError): s.remove(self)
def remove(self)
Remove a cursor. Remove all `Selection`\\s, disconnect all callbacks, and allow the cursor to be garbage collected.
9.112571
7.237389
1.259097
self._selections.remove(sel) # <artist>.figure will be unset so we save them first. figures = {artist.figure for artist in [sel.annotation] + sel.extras} # ValueError is raised if the artist has already been removed. with suppress(ValueError): sel.annotation.remove() for artist in sel.extras: with suppress(ValueError): artist.remove() for cb in self._callbacks["remove"]: cb(sel) for figure in figures: figure.canvas.draw_idle()
def remove_selection(self, sel)
Remove a `Selection`.
5.730512
5.325259
1.0761
if args is None: raise_error("Couldn't extract GitHub authentication code " "from response") # TODO: Is there a case where the length of the error will be < 0? error = args.get("error_description", None) if error is not None: if len(error) >= 0: raise_github_error(error) else: raise_error("Something went wrong") access_code = args.get("code", None) # access_code is supposed to be a list with 1 thing in it if not isinstance(access_code, list) or access_code[0] is None or \ len(access_code) != 1 or len(access_code[0]) <= 0: raise_error("Couldn't extract GitHub authentication code from " "response") # If we get here, everything was good - no errors access_code = access_code[0].decode('ascii') return access_code
def extract_code_from_args(args)
Extracts the access code from the arguments dictionary (given back from github)
4.014211
3.757026
1.068454
"Request access token from GitHub" token_response = request_session.post( "https://github.com/login/oauth/access_token", data={ "client_id": self.oauth_client_id, "client_secret": self.oauth_client_secret, "code": access_code }, headers={"Accept": "application/json"}, ) return helper_request_access_token(token_response.json())
def request_access_token(self, access_code)
Request access token from GitHub
2.584154
2.421165
1.067319
if not recs: return None if len(recs) == 1: return recs.pop() new_rec = {} for rec in recs: for k, v in rec.iteritems(): if k in new_rec: new_rec[k] = "%s, %s" % (new_rec[k], v) else: new_rec[k] = v return new_rec
def combine_dicts(recs)
Combine a list of recs, appending values to matching keys
1.763397
1.716975
1.027037
final_recs = {} for rec in rec_list: rec_key = rec[key] if rec_key in final_recs: for k, v in rec.iteritems(): if k in final_recs[rec_key] and final_recs[rec_key][k] != v: raise Exception("Mis-match for key '%s'" % k) final_recs[rec_key][k] = v else: final_recs[rec_key] = rec return final_recs.values()
def combine_recs(rec_list, key)
Use a common key to combine a list of recs
1.869721
1.82141
1.026524
parser = ArgumentParser(prog="hwinfo") filter_choices = ['bios', 'nic', 'storage', 'gpu', 'cpu'] parser.add_argument("-f", "--filter", choices=filter_choices, help="Query a specific class.") parser.add_argument("-m", "--machine", default='localhost', help="Remote host address.") parser.add_argument("-u", "--username", help="Username for remote host.") parser.add_argument("-p", "--password", help="Password for remote host.") parser.add_argument("-l", "--logs", help="Path to the directory with the logfiles.") parser.add_argument("-e", "--export", action="store_true", help="Export result in JSON format.") args = parser.parse_args() validate_args(args) if args.logs: if ".tar" in args.logs: host = HostFromTarball(args.logs) else: host = HostFromLogs(args.logs) else: host = Host(args.machine, args.username, args.password) options = [] if args.filter: filter_args = args.filter.split(',') for arg in filter_args: options.append(arg.strip()) else: options = filter_choices if args.export: print export_system_info(host, options) else: print system_info(host, options)
def main()
Entry Point
2.724721
2.707278
1.006443
if filename in self.fdata: return self.fdata[filename] else: filepath = find_in_tarball(self.tarloc, filename) return read_from_tarball(self.tarloc, filepath)
def _load_from_file(self, filename)
Find filename in tar, and load it
5.258688
4.055693
1.296619
tag = normalize_characters(tag) if tag in EXCEPTIONS: return [('grandfathered', tag)] else: # The first subtag is always either the language code, or 'x' to mark # the entire tag as private-use. Other subtags are distinguished # by their length and format, but the language code is distinguished # entirely by the fact that it is required to come first. subtags = tag.split('-') if subtags[0] == 'x': if len(subtags) == 1: raise LanguageTagError("'x' is not a language tag on its own") else: # the entire language tag is private use, but we know that, # whatever it is, it fills the "language" slot return [('language', tag)] elif len(subtags[0]) >= 2: return [('language', subtags[0])] + parse_subtags(subtags[1:]) else: subtag_error(subtags[0], 'a language code')
def parse_tag(tag)
Parse the syntax of a language tag, without looking up anything in the registry, yet. Returns a list of (type, value) tuples indicating what information will need to be looked up.
5.745822
5.558221
1.033752
index = 0 parsed = [] while index < len(subtags) and len(subtags[index]) == 3 and index < 3: parsed.append(('extlang', subtags[index])) index += 1 return parsed + parse_subtags(subtags[index:], SCRIPT)
def parse_extlang(subtags)
Parse an 'extended language' tag, which consists of 1 to 3 three-letter language codes. Extended languages are used for distinguishing dialects/sublanguages (depending on your view) of macrolanguages such as Arabic, Bahasa Malay, and Chinese. It's supposed to also be acceptable to just use the sublanguage as the primary language code, and your code should know what's a macrolanguage of what. For example, 'zh-yue' and 'yue' are the same language (Cantonese), and differ only in whether they explicitly spell out that Cantonese is a kind of Chinese.
3.802976
4.400855
0.864145
subtag = subtags[0] if len(subtags) == 1: raise LanguageTagError( "The subtag %r must be followed by something" % subtag ) if subtag == 'x': # Private use. Everything after this is arbitrary codes that we # can't look up. return [('private', '-'.join(subtags))] else: # Look for the next singleton, if there is one. boundary = 1 while boundary < len(subtags) and len(subtags[boundary]) != 1: boundary += 1 # We've parsed a complete extension subtag. Return to the main # parse_subtags function, but expect to find nothing but more # extensions at this point. return ([('extension', '-'.join(subtags[:boundary]))] + parse_subtags(subtags[boundary:], EXTENSION))
def parse_extension(subtags)
An extension tag consists of a 'singleton' -- a one-character subtag -- followed by other subtags. Extension tags are in the BCP 47 syntax, but their meaning is outside the scope of the standard. For example, there's the u- extension, which is used for setting Unicode properties in some context I'm not aware of. If the singleton is 'x', it's a private use extension, and consumes the rest of the tag. Otherwise, it stops at the next singleton.
5.622087
5.208914
1.07932
options = SUBTAG_TYPES[expected:] if len(options) == 1: expect_str = options[0] elif len(options) == 2: expect_str = '%s or %s' % (options[0], options[1]) else: expect_str = '%s, or %s' % (', '.join(options[:-1]), options[-1]) got_str = SUBTAG_TYPES[got] raise LanguageTagError("This %s subtag, %r, is out of place. " "Expected %s." % (got_str, subtag, expect_str))
def order_error(subtag, got, expected)
Output an error indicating that tags were out of order.
3.047347
2.959776
1.029587