code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def forward(self, hidden: torch.Tensor): residual = hidden hidden = self.norm(hidden) hidden = self.mlp(hidden) if self.gated_attn: hidden = self.gating_block(hidden) out = hidden + residual return out
Args: hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`): Input tensor to the layer. Returns: `torch.Tensor`: Transformed tensor.
github-repos
def loop(self, xy, yx): if len(xy) > 0: self.iter += self.runs_per_iter if self.iter < 2: return True t_test, self.p_value = ttest_ind(xy, yx, equal_var=False) if self.p_value > self.threshold and self.iter < self.max_iter: return True else: return False
Tests the loop condition based on the new results and the parameters. Args: xy (list): list containing all the results for one set of samples yx (list): list containing all the results for the other set. Returns: bool: True if the loop has to continue, False otherwise.
juraj-google-style
def visit_boolean_op(self, boolean_logic: _evaluation.BooleanOperatorNode) -> _sql_data_types.Select: lhs_result = self.visit(boolean_logic.left) rhs_result = self.visit(boolean_logic.right) if lhs_result.sql_data_type != _sql_data_types.Boolean: lhs_result = lhs_result.is_not_null() if rhs_result.sql_data_type != _sql_data_types.Boolean: rhs_result = rhs_result.is_not_null() lhs_subquery = lhs_result.as_operand() rhs_subquery = rhs_result.as_operand() if boolean_logic.op == _ast.BooleanLogic.Op.IMPLIES: sql_value = f'NOT {lhs_subquery} OR {rhs_subquery}' elif boolean_logic.op == _ast.BooleanLogic.Op.XOR: sql_value = f'{lhs_subquery} <> {rhs_subquery}' else: sql_value = f'{lhs_subquery} {boolean_logic.op.upper()} {rhs_subquery}' return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=_sql_data_types.Boolean, _sql_alias='logic_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)
Translates a FHIRPath Boolean logic operation to Spark SQL. Note that evaluation for Boolean logic is only supported for Boolean operands of scalar cardinality. Args: boolean_logic: The FHIRPath AST `BooleanLogic` node. Returns: A compiled Spark SQL expression.
github-repos
def swd_read32(self, offset): value = self._dll.JLINK_SWD_GetU32(offset) return ctypes.c_uint32(value).value
Gets a unit of ``32`` bits from the input buffer. Args: self (JLink): the ``JLink`` instance offset (int): the offset (in bits) from which to start reading Returns: The integer read from the input buffer.
juraj-google-style
def restore_state(self, state, name=None): if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_restore_state_v2(self._reader_ref, state, name=name) else: return gen_io_ops.reader_restore_state(self._reader_ref, state, name=name)
Restore a reader to a previously saved state. Not all Readers support being restored, so this can produce an Unimplemented error. Args: state: A string Tensor. Result of a SerializeState of a Reader with matching type. name: A name for the operation (optional). Returns: The created Operation.
github-repos
def getsource(classorfunc): if _isbuiltin(classorfunc): return '' try: source = inspect.getsource(classorfunc) except TypeError: source = getsourcefallback(classorfunc) declaration = [] lines = source.splitlines() if (PY2 and (not isinstance(source, unicode))): encoding = detect_encoding(iter(lines).next)[0] sourcelines = (s.decode(encoding) for s in lines) else: sourcelines = iter(lines) found_keyword = False for line in sourcelines: words = line.split() if (not words): continue if (words[0] in ('def', 'class')): found_keyword = True if found_keyword: cind = line.find(':') if (cind > 0): declaration.append(line[:(cind + 1)]) after_decl = line[(cind + 1):].strip() break else: declaration.append(line) bodylines = list(sourcelines) if (type(classorfunc) == type): cls = classorfunc base_imports = {} for base in cls.__bases__: if ((base.__name__ == 'object') and (base.__module__ == 'builtins')): continue if (base in base_imports): continue if (base.__module__ == '__main__'): continue base_imports[base] = ('from %s import %s' % (base.__module__, base.__name__)) cind = declaration[0].index('class ') declstring = (declaration[0][:cind] + ('class %s(%s):%s' % (cls.__name__, ','.join([base.__name__ for base in cls.__bases__]), after_decl))) declaration = [impstring for (c, impstring) in base_imports.items() if (c.__module__ != '__builtin__')] declaration.append(declstring) else: declaration[(- 1)] += after_decl return '\n'.join((declaration + bodylines))
Return the source code for a class or function. Notes: Returned source will not include any decorators for the object. This will only return the explicit declaration of the object, not any dependencies Args: classorfunc (type or function): the object to get the source code for Returns: str: text of source code (without any decorators). Note: in python 2, this returns unicode
codesearchnet
def __init__(self, sdat): self._isteps = {} self._all_isteps_known = False super().__init__(sdat)
Initialization of instances: Args: sdat (:class:`StagyyData`): the StagyyData instance owning the :class:`_Snaps` instance. Attributes: sdat (:class:`StagyyData`): the StagyyData instance owning the :class:`_Snaps` instance.
juraj-google-style
def get_path(self, origX, origY, destX, destY): return super(AStar, self).get_path(origX, origY, destX, destY)
Get the shortest path from origXY to destXY. Returns: List[Tuple[int, int]]: Returns a list walking the path from orig to dest. This excludes the starting point and includes the destination. If no path is found then an empty list is returned.
codesearchnet
def train_model(self, train_op, cost_to_log, num_steps, feed_vars=(), feed_data=None, print_every=100): costs = [train_op] if (isinstance(cost_to_log, collections.Sequence) and (not isinstance(cost_to_log, six.string_types))): costs.extend(cost_to_log) else: costs.append(cost_to_log) return self.run_model(costs, num_steps, feed_vars=feed_vars, feed_data=feed_data, print_every=print_every)[2:]
Trains the given model. Args: train_op: The training operation. cost_to_log: A cost to log. num_steps: Number of batches to run. feed_vars: A list or tuple of the variables that will be fed. feed_data: A generator that produces tuples of the same length as feed_vars. print_every: Print and save every so many steps. Returns: `cost_to_log` from the final step.
codesearchnet
def _on_status_message(self, sequence, topic, message): self._logger.debug("Received message on (topic=%s): %s" % (topic, message)) try: conn_key = self._find_connection(topic) except ArgumentError: self._logger.warn("Dropping message that does not correspond with a known connection, message=%s", message) return if messages.ConnectionResponse.matches(message): if self.name != message['client']: self._logger.debug("Connection response received for a different client, client=%s, name=%s", message['client'], self.name) return self.conns.finish_connection(conn_key, message['success'], message.get('failure_reason', None)) else: self._logger.warn("Dropping message that did not correspond with a known schema, message=%s", message)
Process a status message received Args: sequence (int): The sequence number of the packet received topic (string): The topic this message was received on message (dict): The message itself
juraj-google-style
def get_variables_in_scope(scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES): scope_name = get_variable_scope_name(scope) if scope_name: scope_name = (re.escape(scope_name) + '/') return tuple(tf.get_collection(collection, scope_name))
Returns a tuple `tf.Variable`s in a scope for a given collection. Args: scope: `tf.VariableScope` or string to retrieve variables from. collection: Collection to restrict query to. By default this is `tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable variables such as moving averages. Returns: A tuple of `tf.Variable` objects.
codesearchnet
def decode_body(headers: MutableMapping, body: bytes) -> dict: (type_, encoding) = parse_content_type(headers) decoded_body = body.decode(encoding) if (type_ == 'application/json'): payload = json.loads(decoded_body) elif (decoded_body == 'ok'): payload = {'ok': True} else: payload = {'ok': False, 'data': decoded_body} return payload
Decode the response body For 'application/json' content-type load the body as a dictionary Args: headers: Response headers body: Response body Returns: decoded body
codesearchnet
def get_model(self, model, model_id): return self._store.find_record(self._get_model_class(model), int(model_id))
Get a single model from the server. Args: model (string): The class as a string. model_id (string): The integer ID as a string. Returns: :class:`cinder_data.model.CinderModel`: A instance of the model.
juraj-google-style
def replace_all(self, replacements): for override in replacements: assert isinstance(override, PTransformOverride) self._replace(override) for override in replacements: self._check_replacement(override)
Dynamically replaces PTransforms in the currently populated hierarchy. Currently this only works for replacements where input and output types are exactly the same. TODO: Update this to also work for transform overrides where input and output types are different. Args: replacements (List[~apache_beam.pipeline.PTransformOverride]): a list of :class:`~apache_beam.pipeline.PTransformOverride` objects.
github-repos
def make_fout(fout='./tmp', fmt='pcap'): if (fmt == 'pcap'): from pcapkit.dumpkit import PCAP as output elif (fmt == 'plist'): from dictdumper import PLIST as output elif (fmt == 'json'): from dictdumper import JSON as output elif (fmt == 'tree'): from dictdumper import Tree as output fmt = 'txt' elif (fmt == 'html'): from dictdumper import JavaScript as output fmt = 'js' elif (fmt == 'xml'): from dictdumper import XML as output else: from pcapkit.dumpkit import NotImplementedIO as output if (fmt is not None): warnings.warn(f'Unsupported output format: {fmt}; disabled file output feature', FormatWarning, stacklevel=stacklevel()) return (output, '') try: pathlib.Path(fout).mkdir(parents=True, exist_ok=True) except FileExistsError as error: if (fmt is None): warnings.warn(error.strerror, FileWarning, stacklevel=stacklevel()) else: raise FileExists(*error.args) from None return (output, fmt)
Make root path for output. Positional arguments: * fout -- str, root path for output * fmt -- str, output format Returns: * output -- dumper of specified format
codesearchnet
def find_template_filename(self, template_name): def next_file(): filename = (self.path / template_name) (yield filename) try: exts = self.default_file_extensions except AttributeError: return strfilename = str(filename) for ext in exts: (yield Path((strfilename + ext))) for filename in next_file(): if filename.is_file(): return filename
Searches for a file matching the given template name. If found, this method returns the pathlib.Path object of the found template file. Args: template_name (str): Name of the template, with or without a file extension. Returns: pathlib.Path: Path to the matching filename.
codesearchnet
def swd_sync(self, pad=False): if pad: self._dll.JLINK_SWD_SyncBytes() else: self._dll.JLINK_SWD_SyncBits() return None
Causes a flush to write all data remaining in output buffers to SWD device. Args: self (JLink): the ``JLink`` instance pad (bool): ``True`` if should pad the data to full byte size Returns: ``None``
codesearchnet
def set_viewbox(self, x, y, w, h): self.attributes['viewBox'] = "%s %s %s %s" % (x, y, w, h) self.attributes['preserveAspectRatio'] = 'none'
Sets the origin and size of the viewbox, describing a virtual view area. Args: x (int): x coordinate of the viewbox origin y (int): y coordinate of the viewbox origin w (int): width of the viewbox h (int): height of the viewbox
juraj-google-style
def dropout(x, keep_prob, noise_shape=None, name=None): noise_shape = convert_to_shape(noise_shape) if noise_shape is None: noise_shape = x.shape with tf.variable_scope(name, default_name="dropout"): if keep_prob == 1.0: return x noise = cast(less(random_uniform( x.mesh, noise_shape, dtype=x.dtype), keep_prob), x.dtype) noise /= keep_prob return x * noise
Dropout layer. Args: x: a Tensor keep_prob: a float between 0.0 and 1.0 noise_shape: an optional Shape (a subset of x.shape) name: an optional string Returns: a Tensor
juraj-google-style
def create_chapter_from_url(self, url, title=None): try: request_object = requests.get(url, headers=self.request_headers, allow_redirects=False) except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError): raise ValueError(('%s is an invalid url or no network connection' % url)) except requests.exceptions.SSLError: raise ValueError(("Url %s doesn't have valid SSL certificate" % url)) unicode_string = request_object.text return self.create_chapter_from_string(unicode_string, url, title)
Creates a Chapter object from a url. Pulls the webpage from the given url, sanitizes it using the clean_function method, and saves it as the content of the created chapter. Basic webpage loaded before any javascript executed. Args: url (string): The url to pull the content of the created Chapter from title (Option[string]): The title of the created Chapter. By default, this is None, in which case the title will try to be inferred from the webpage at the url. Returns: Chapter: A chapter object whose content is the webpage at the given url and whose title is that provided or inferred from the url Raises: ValueError: Raised if unable to connect to url supplied
codesearchnet
def _verify_and_get_subgroup_size(self, group_assignment, num_shards): if not group_assignment: return None if not (isinstance(group_assignment, list) and all((isinstance(i, list) for i in group_assignment))): raise ValueError(f'Argument `group_assignment` must be a list of lists. Received: {group_assignment}') replica_ids = set() for g in group_assignment: for i in g: replica_ids.add(i) if set(range(num_shards)) != replica_ids: raise ValueError(f'Argument `group_assignment` must be a permutation of range({num_shards}). Received: {group_assignment}') subgroup_size_list = [len(group) for group in group_assignment] if all((subgroup_size_list[0] == size for size in subgroup_size_list)): return subgroup_size_list[0] else: raise ValueError(f'The size of each subgroup in `group_assignment` must be equal. Received: {group_assignment}')
Verify group_assignment and get the subgroup size". Args: group_assignment: list of group ids for applying the optimizer to subgroups. num_shards: The number of TPU shards. Returns: The size of one subgroup in group_assignment. Raises: ValueError: If group_assignment is invalid.
github-repos
def mtr_tr_dense(sz): n = 2 ** sz hparams = mtf_bitransformer_base() hparams.d_model = 1024 hparams.max_length = 256 hparams.batch_size = 128 hparams.d_ff = int(4096 * n) hparams.d_kv = 128 hparams.encoder_num_heads = int(8 * n) hparams.decoder_num_heads = int(8 * n) hparams.learning_rate_decay_steps = 51400 hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" hparams.mesh_shape = "batch:32" hparams.label_smoothing = 0.1 hparams.layer_prepostprocess_dropout = 0.1 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 return hparams
Series of machine translation models. All models are trained on sequences of 256 tokens. You can use the dataset translate_enfr_wmt32k_packed. 154000 steps = 3 epochs. Args: sz: an integer Returns: a hparams
juraj-google-style
def sample_id(self, lon): if self.grid == 'WAC': sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + 1.0 + (lon * np.pi / 180.0 - float(self.CENTER_LONGITUDE)) * self.A_AXIS_RADIUS * np.cos(self.CENTER_LATITUDE * np.pi / 180.0) / (self.MAP_SCALE * 1e-3)) else: sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + float(self.MAP_RESOLUTION) * (lon - float(self.CENTER_LONGITUDE))) + 1 return self._control_sample(sample)
Return the corresponding sample Args: lon (int): longidute in degree Returns: Correponding sample
juraj-google-style
def shutdown(cluster_info, queues=['input']): def _shutdown(iter): host = util.get_ip_address() executor_id = util.read_executor_id() mgr = _get_manager(cluster_info, host, executor_id) for node in cluster_info: if node['host'] == host and node['executor_id'] == executor_id: tb_pid = node['tb_pid'] if tb_pid != 0: logging.info("Stopping tensorboard (pid={0})".format(tb_pid)) subprocess.Popen(["kill", str(tb_pid)]) logging.info("Stopping all queues") for q in queues: try: queue = mgr.get_queue(q) logging.info("Feeding None into {0} queue".format(q)) queue.put(None, block=True) except (AttributeError, KeyError): msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(q) raise Exception(msg) logging.info("Setting mgr.state to 'stopped'") mgr.set('state', 'stopped') return [True] return _shutdown
Stops all TensorFlow nodes by feeding ``None`` into the multiprocessing.Queues. Args: :cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc). :queues: *INTERNAL_USE* Returns: A nodeRDD.mapPartitions() function
juraj-google-style
def from_string(cls, key, key_id=None): key = _helpers.from_bytes(key) (marker_id, key_bytes) = pem.readPemBlocksFromFile(six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER) if (marker_id == 0): private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format='DER') elif (marker_id == 1): (key_info, remaining) = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC) if (remaining != b''): raise ValueError('Unused bytes', remaining) private_key_info = key_info.getComponentByName('privateKey') private_key = rsa.key.PrivateKey.load_pkcs1(private_key_info.asOctets(), format='DER') else: raise ValueError('No key could be detected.') return cls(private_key, key_id=key_id)
Construct an Signer instance from a private key in PEM format. Args: key (str): Private key in PEM format. key_id (str): An optional key id used to identify the private key. Returns: google.auth.crypt.Signer: The constructed signer. Raises: ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in PEM format.
codesearchnet
def AsCode(self, indent_per_depth=2): indent = ' ' * indent_per_depth * self.depth tokens_str = ' '.join((tok.value for tok in self._tokens)) return indent + tokens_str
Return a "code" representation of this line. The code representation shows how the line would be printed out as code. TODO(eliben): for now this is rudimentary for debugging - once we add formatting capabilities, this method will have other uses (not all tokens have spaces around them, for example). Arguments: indent_per_depth: how much spaces to indent per depth level. Returns: A string representing the line as code.
github-repos
def real(input, name=None): with ops.name_scope(name, 'Real', [input]) as name: input = ops.convert_to_tensor(input, name='input') if input.dtype.is_complex: real_dtype = input.dtype.real_dtype return gen_math_ops.real(input, Tout=real_dtype, name=name) elif input.dtype.is_numeric: return input else: raise TypeError('input must be a numeric tensor, but got tensor with dtype {}'.format(input.dtype))
Returns the real part of a complex (or real) tensor. Given a tensor `input`, this operation returns a tensor of type `float` that is the real part of each element in `input` considered as a complex number. For example: ```python x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) tf.math.real(x) # [-2.25, 3.25] ``` If `input` is already real, it is returned unchanged. Args: input: A `Tensor`. Must have numeric type. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`.
github-repos
def _gen_condition(cls, initial, new_public_keys): try: threshold = len(new_public_keys) except TypeError: threshold = None if (isinstance(new_public_keys, list) and (len(new_public_keys) > 1)): ffill = ThresholdSha256(threshold=threshold) reduce(cls._gen_condition, new_public_keys, ffill) elif (isinstance(new_public_keys, list) and (len(new_public_keys) <= 1)): raise ValueError('Sublist cannot contain single owner') else: try: new_public_keys = new_public_keys.pop() except AttributeError: pass if isinstance(new_public_keys, Fulfillment): ffill = new_public_keys else: ffill = Ed25519Sha256(public_key=base58.b58decode(new_public_keys)) initial.add_subfulfillment(ffill) return initial
Generates ThresholdSha256 conditions from a list of new owners. Note: This method is intended only to be used with a reduce function. For a description on how to use this method, see :meth:`~.Output.generate`. Args: initial (:class:`cryptoconditions.ThresholdSha256`): A Condition representing the overall root. new_public_keys (:obj:`list` of :obj:`str`|str): A list of new owners or a single new owner. Returns: :class:`cryptoconditions.ThresholdSha256`:
codesearchnet
def ParseText(self, text, eof=True): lines = [] if text: lines = text.splitlines() for line in lines: self._CheckLine(line) if (self._cur_state_name in ('End', 'EOF')): break if ((self._cur_state_name != 'End') and ('EOF' not in self.states) and eof): self._AppendRecord() return self._result
Passes CLI output through FSM and returns list of tuples. First tuple is the header, every subsequent tuple is a row. Args: text: (str), Text to parse with embedded newlines. eof: (boolean), Set to False if we are parsing only part of the file. Suppresses triggering EOF state. Raises: TextFSMError: An error occurred within the FSM. Returns: List of Lists.
codesearchnet
def outer(vector1, vector2=None): if (vector2 is None): vector2 = np.array(vector1).conj() else: vector2 = np.array(vector2).conj() return np.outer(vector1, vector2)
Construct the outer product of two vectors. The second vector argument is optional, if absent the projector of the first vector will be returned. Args: vector1 (ndarray): the first vector. vector2 (ndarray): the (optional) second vector. Returns: np.array: The matrix |v1><v2|.
codesearchnet
def truncate(text, length=255): lines = [] i = 0 while i < len(text) - 1: try: lines.append(text[i:i+length]) i += length except IndexError as e: lines.append(text[i:]) return lines
Splits the message into a list of strings of of length `length` Args: text (str): The text to be divided length (int, optional): The length of the chunks of text. \ Defaults to 255. Returns: list: Text divided into chunks of length `length`
juraj-google-style
def close(self, suppress_warning: bool = False) -> None: if self._file is None: if not suppress_warning: logging.warn("Connection to %s is already closed", self.filename) else: self._file.close() self._file = None self.layers = None self.ra = None self.row_attrs = None self.ca = None self.col_attrs = None self.row_graphs = None self.col_graphs = None self.shape = (0, 0) self._closed = True
Close the connection. After this, the connection object becomes invalid. Warns user if called after closing. Args: suppress_warning: Suppresses warning message if True (defaults to false)
juraj-google-style
def ProduceExtractionWarning(self, message, path_spec=None): if (not self._storage_writer): raise RuntimeError('Storage writer not set.') if ((not path_spec) and self._file_entry): path_spec = self._file_entry.path_spec parser_chain = self.GetParserChain() warning = warnings.ExtractionWarning(message=message, parser_chain=parser_chain, path_spec=path_spec) self._storage_writer.AddWarning(warning) self._number_of_warnings += 1 self.last_activity_timestamp = time.time()
Produces an extraction warning. Args: message (str): message of the warning. path_spec (Optional[dfvfs.PathSpec]): path specification, where None will use the path specification of current file entry set in the mediator. Raises: RuntimeError: when storage writer is not set.
codesearchnet
def from_timestamp_pb(cls, stamp): microseconds = int(stamp.seconds * 1e6) bare = from_microseconds(microseconds) return cls( bare.year, bare.month, bare.day, bare.hour, bare.minute, bare.second, nanosecond=stamp.nanos, tzinfo=pytz.UTC, )
Parse RFC 3339-compliant timestamp, preserving nanoseconds. Args: stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp message
juraj-google-style
def exists(self, path=None, client_kwargs=None, assume_exists=None): try: self.head(path, client_kwargs) except ObjectNotFoundError: return False except ObjectPermissionError: if (assume_exists is None): raise return assume_exists return True
Return True if path refers to an existing path. Args: path (str): Path or URL. client_kwargs (dict): Client arguments. assume_exists (bool or None): This value define the value to return in the case there is no enough permission to determinate the existing status of the file. If set to None, the permission exception is reraised (Default behavior). if set to True or False, return this value. Returns: bool: True if exists.
codesearchnet
def _maybe_init_tags(self, run_id, tag_to_metadata): cursor = self._db.cursor() cursor.execute('SELECT tag_name, tag_id FROM Tags WHERE run_id = ?', (run_id,)) tag_to_id = {row[0]: row[1] for row in cursor.fetchall() if row[0] in tag_to_metadata} new_tag_data = [] for tag, metadata in six.iteritems(tag_to_metadata): if tag not in tag_to_id: tag_id = self._create_id() tag_to_id[tag] = tag_id new_tag_data.append((run_id, tag_id, tag, time.time(), metadata.display_name, metadata.plugin_data.plugin_name, self._make_blob(metadata.plugin_data.content))) cursor.executemany( , new_tag_data) return tag_to_id
Returns a tag-to-ID map for the given tags, creating rows if needed. Args: run_id: the ID of the run to which these tags belong. tag_to_metadata: map of tag name to SummaryMetadata for the tag.
juraj-google-style
def run_in_v1_v2(device_to_use: Optional[str]=None, assert_no_eager_garbage: bool=False) -> Callable[[Callable[..., Any]], Callable[..., None]]: def decorator(f: Callable[..., Any]) -> Callable[..., None]: decorator_tag = 'wrapped_with_v1_v2_decorator' if hasattr(f, decorator_tag): return f def decorated(self: 'TensorFlowTestCase', *args, **kwargs) -> None: logging.info('Running %s in V1 mode.', f.__name__) try: with self.subTest('V1_mode'): v2_compat.disable_v2_behavior() f(self, *args, **kwargs) except unittest.case.SkipTest: pass def run_v2(self: 'TensorFlowTestCase', **kwargs) -> None: logging.info('Running %s in V2 mode.', f.__name__) if device_to_use: with ops.device(device_to_use): f(self, *args, **kwargs) else: f(self, *args, **kwargs) if assert_no_eager_garbage: ops.reset_default_graph() run_v2 = assert_no_new_tensors(assert_no_garbage_created(run_v2)) self.tearDown() self._tempdir = None ops.reset_default_graph() v2_compat.enable_v2_behavior() with self.subTest('V2_mode'): self.setUp() run_v2(self, **kwargs) tf_decorated = tf_decorator.make_decorator(f, decorated) tf_decorated.__dict__[decorator_tag] = True return tf_decorated return decorator
Execute the decorated test in v1 and v2 modes. The overall execution is similar to that of `run_in_graph_and_eager_mode`. Args: device_to_use: A string in the following format: "/device:CPU:0". assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage collector and asserts that no extra garbage has been created when running the test with eager execution enabled. This will fail if there are reference cycles (e.g. a = []; a.append(a)). Off by default because some tests may create garbage for legitimate reasons (e.g. they define a class which inherits from `object`), and because DEBUG_SAVEALL is sticky in some Python interpreters (meaning that tests which rely on objects being collected elsewhere in the unit test file will not work). Additionally, checks that nothing still has a reference to Tensors that the test allocated. Returns: A decorator that runs a given test in v1 and v2 modes.
github-repos
def symlink(self, link_target, path, dir_fd=None): link_target = self._path_with_dir_fd(link_target, self.symlink, dir_fd) self.filesystem.create_symlink(path, link_target, create_missing_dirs=False)
Creates the specified symlink, pointed at the specified link target. Args: link_target: The target of the symlink. path: Path to the symlink to create. dir_fd: If not `None`, the file descriptor of a directory, with `link_target` being relative to this directory. New in Python 3.3. Raises: OSError: if the file already exists.
codesearchnet
def convert_saved_model_v1(saved_model_path, exported_names, tags, lift_variables, include_variables_in_initializers, upgrade_legacy=True, show_debug_info=False): return pywrap_mlir.experimental_convert_saved_model_v1_to_mlir(saved_model_path, exported_names, tags, lift_variables, include_variables_in_initializers, upgrade_legacy, show_debug_info)
Converts a v1 SavedModel to MLIR module. Args: saved_model_path: Path to SavedModel. exported_names: Names to export. tags: MetaGraphDef to be loaded is identified by the supplied tags. lift_variables: Whether to promote tf.VarHandleOp to resource arguments. include_variables_in_initializers: Keeps the variables in initializers before lifting variables. upgrade_legacy: Functionalize the input graph before importing. show_debug_info: Whether to include locations in the emitted textual form. Returns: A textual representation of the MLIR module corresponding to the SavedModule.
github-repos
def _find_extraneous_saver_nodes(graph_def, saver_def): nodes = {node_def.name: (set((tensor.get_op_name(x) for x in node_def.input)), node_def.op) for node_def in graph_def.node} retain_scope_save = None retain_scope_restore = None if saver_def is not None: save_op_name = tensor.get_op_name(saver_def.save_tensor_name) restore_op_name = tensor.get_op_name(saver_def.restore_op_name) retain_scope_restore = _get_scope(restore_op_name) + '/' retain_scope_save = _get_scope(save_op_name) + '/' all_saver_node_names = set((name for name, (_, op) in nodes.items() if op in SAVE_AND_RESTORE_OPS)) all_saver_scopes = set((_get_scope(x) for x in all_saver_node_names)) - all_saver_node_names all_saver_scopes = set((x + '/' for x in all_saver_scopes)) extraneous_scopes = all_saver_scopes - set([retain_scope_save, retain_scope_restore]) extraneous_node_names = set() for name, _ in nodes.items(): for extraneous_scope in extraneous_scopes: if name.startswith(extraneous_scope): extraneous_node_names.add(name) break return extraneous_node_names
Identifies any nodes in the graph_def related to unused Savers. This approach assumes that each Saver is cleanly isolated in its own name scope, so we need only identify the scopes associated with extraneous Savers and return all the nodes in those scopes. Args: graph_def: a GraphDef proto to evaluate. saver_def: a SaverDef proto referencing Save/Restore ops to be retained. Returns: An iterable of node names that may be safely omitted.
github-repos
def get_data_for_sensors(macs=[], search_duratio_sec=5, bt_device=''): log.info('Get latest data for sensors. Stop with Ctrl+C.') log.info('Stops automatically in %ss', search_duratio_sec) log.info('MACs: %s', macs) datas = dict() for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, search_duratio_sec, bt_device=bt_device): datas[new_data[0]] = new_data[1] return datas
Get lates data for sensors in the MAC's list. Args: macs (array): MAC addresses search_duratio_sec (int): Search duration in seconds. Default 5 bt_device (string): Bluetooth device id Returns: dict: MAC and state of found sensors
juraj-google-style
def discover(names=None, pattern=['*.py'], skip='efp', dry_run=False, blacklist=None, name_greps=None, manual_reset=False, delete_history=False, max_devices=0, continue_from=None, result_file='./result.json', auto_reboot=False, keep_explorer=False, add_all_devices=False): if not os.path.exists(settings.OUTPUT_PATH): os.mkdir(settings.OUTPUT_PATH) if delete_history: os.system('del history.json') if blacklist: try: excludes = [line.strip('\n') for line in open(blacklist, 'r').readlines() if not line.startswith(' except: logger.exception('Failed to open test case black list file') raise else: excludes = [] log = None if os.path.isfile(result_file): try: log = json.load(open(result_file, 'r')) except: logger.exception('Failed to open result file') if not log: log = {} json.dump(log, open(result_file, 'w'), indent=2) suite = unittest.TestSuite() discovered = unittest.defaultTestLoader.discover('cases', pattern) if names and continue_from: names = names[names.index(continue_from):] for s1 in discovered: for s2 in s1: for case in s2: if case.__class__ is HarnessCase: continue case_name = unicode(case.__class__.__name__) if name_greps and not any(fnmatch.fnmatch(case_name, name_grep) for name_grep in name_greps): logger.info('case[%s] skipped by name greps', case_name) continue if len(names) and case_name not in names: logger.info('case[%s] skipped', case_name) continue if case_name in log.keys(): if (log[case_name]['passed'] and ('p' in skip)) \ or (log[case_name]['passed'] is False and ('f' in skip)) \ or (log[case_name]['passed'] is None and ('e' in skip)): logger.warning('case[%s] skipped for its status[%s]', case_name, log[case_name]['passed']) continue if continue_from: if continue_from != case_name: logger.warning('case[%s] skipped for continue from[%s]', case_name, continue_from) continue else: continue_from = None if case_name in excludes: logger.warning('case[%s] skipped for blacklist', case_name) continue if max_devices and case.golden_devices_required > max_devices: logger.warning('case[%s] skipped for exceeding max golden devices allowed[%d]', case_name, max_devices) continue suite.addTest(case) logger.info('case[%s] added', case_name) if auto_reboot: argv = [] argv.append('"%s"' % os.sep.join([os.getcwd(), 'start.bat'])) argv.extend(['-p', pattern]) argv.extend(['-k', skip]) argv.extend(['-o', result_file]) argv.append('-a') if manual_reset: argv.append('-m') if delete_history: argv.append('-d') auto_reboot_args = argv + names else: auto_reboot_args = None os.system('del "%s"' % RESUME_SCRIPT_PATH) if manual_reset: settings.PDU_CONTROLLER_TYPE = 'MANUAL_PDU_CONTROLLER' settings.PDU_CONTROLLER_OPEN_PARAMS = {} settings.PDU_CONTROLLER_REBOOT_PARAMS = {} result = SimpleTestResult(result_file, auto_reboot_args, keep_explorer, add_all_devices) for case in suite: logger.info(case.__class__.__name__) if dry_run: return suite.run(result) return result
Discover all test cases and skip those passed Args: pattern (str): Pattern to match case modules, refer python's unittest documentation for more details skip (str): types cases to skip
juraj-google-style
def wait_at_barrier(self, barrier_id, timeout_in_ms): ensure_initialized() pywrap_tfe.TFE_WaitAtBarrier(self._context_handle, barrier_id, timeout_in_ms)
Blocks until all coordinated tasks are at the barrier. The barrier may fail if it times out or if one of the tasks is unhealthy. Args: barrier_id: Unique string identifying the barrier. timeout_in_ms: Duration before the barrier times out and fails.
github-repos
def enableSync(self, url, definition=None): adminFS = AdminFeatureService(url=url, securityHandler=self._securityHandler) cap = str(adminFS.capabilities) existingDef = {} enableResults = 'skipped' if ('Sync' in cap): return 'Sync is already enabled' else: capItems = cap.split(',') capItems.append('Sync') existingDef['capabilities'] = ','.join(capItems) enableResults = adminFS.updateDefinition(json_dict=existingDef) if ('error' in enableResults): return enableResults['error'] adminFS = None del adminFS return enableResults
Enables Sync capability for an AGOL feature service. Args: url (str): The URL of the feature service. definition (dict): A dictionary containing valid definition values. Defaults to ``None``. Returns: dict: The result from :py:func:`arcrest.hostedservice.service.AdminFeatureService.updateDefinition`.
codesearchnet
def _get(self, url, params=None): if not params: params = {} params.update({'login': self.login, 'key': self.key}) response_json = requests.get(self.api_url + url, params).json() return self._process_response(response_json)
Used by every other method, it makes a GET request with the given params. Args: url (str): relative path of a specific service (account_info, ...). params (:obj:`dict`, optional): contains parameters to be sent in the GET request. Returns: dict: results of the response of the GET request.
juraj-google-style
def __init__(self, tcex): self._tcex = tcex self._is_organization = False self._notification_type = None self._recipients = None self._priority = 'Low'
Initialize the Class properties. Args: tcex (obj): An instance of TcEx object.
juraj-google-style
def find_nearest_color_hexstr(hexdigits, color_table=None, method='euclid'): triplet = [] try: if len(hexdigits) == 3: for digit in hexdigits: digit = int(digit, 16) triplet.append((digit * 16) + digit) elif len(hexdigits) == 6: triplet.extend(int(hexdigits[i:i+2], 16) for i in (0, 2, 4)) else: raise ValueError('wrong length: %r' % hexdigits) except ValueError: return None return find_nearest_color_index(*triplet, color_table=color_table, method=method)
Given a three or six-character hex digit string, return the nearest color index. Arguments: hexdigits: a three/6 digit hex string, e.g. 'b0b', '123456' Returns: int, None: index, or None on error.
juraj-google-style
def from_values_indices(cls, values, indices, populate=False, structure=None, voigt_rank=None, vsym=True, verbose=False): indices = np.array(indices) if voigt_rank: shape = (([3] * (voigt_rank % 2)) + ([6] * (voigt_rank else: shape = (np.ceil((np.max((indices + 1), axis=0) / 3.0)) * 3) base = np.zeros(shape.astype(int)) for (v, idx) in zip(values, indices): base[tuple(idx)] = v if (6 in shape): obj = cls.from_voigt(base) else: obj = cls(base) if populate: assert structure, 'Populate option must include structure input' obj = obj.populate(structure, vsym=vsym, verbose=verbose) elif structure: obj = obj.fit_to_structure(structure) return obj
Creates a tensor from values and indices, with options for populating the remainder of the tensor. Args: values (floats): numbers to place at indices indices (array-likes): indices to place values at populate (bool): whether to populate the tensor structure (Structure): structure to base population or fit_to_structure on voigt_rank (int): full tensor rank to indicate the shape of the resulting tensor. This is necessary if one provides a set of indices more minimal than the shape of the tensor they want, e.g. Tensor.from_values_indices((0, 0), 100) vsym (bool): whether to voigt symmetrize during the optimization procedure verbose (bool): whether to populate verbosely
codesearchnet
def env_valid(env): if env not in EFConfig.ENV_LIST: raise ValueError("unknown env: {}; env must be one of: ".format(env) + ", ".join(EFConfig.ENV_LIST)) return True
Given an env, determine if it's valid Args: env: the env to check Returns: True if the env is valid Raises: ValueError with message if the env is not valid
juraj-google-style
def proj_path(*path_parts): path_parts = path_parts or ['.'] if not os.path.isabs(path_parts[0]): proj_path = _find_proj_root() if proj_path is not None: path_parts = [proj_path] + list(path_parts) return os.path.normpath(os.path.join(*path_parts))
Return absolute path to the repo dir (root project directory). Args: path (str): The path relative to the project root (pelconf.yaml). Returns: str: The given path converted to an absolute path.
juraj-google-style
def fix_report(self, report, errors='drop', prefer='before'): if (not isinstance(report, SignedListReport)): raise ArgumentError('Report must be a SignedListReport', report=report) if (errors not in ('drop',)): raise ArgumentError("Unknown errors handler: {}, supported=['drop']".format(errors)) self.ensure_prepared() fixed_readings = [] dropped_readings = 0 for reading in report.visible_readings: assignment = self.assign_utc(reading.reading_id, reading.raw_time, prefer=prefer) if (assignment is None): dropped_readings += 1 continue fixed_reading = IOTileReading(assignment.rtc_value, reading.stream, reading.value, reading_time=assignment.utc, reading_id=reading.reading_id) fixed_readings.append(fixed_reading) fixed_report = SignedListReport.FromReadings(report.origin, fixed_readings, report_id=report.report_id, selector=report.streamer_selector, streamer=report.origin_streamer, sent_timestamp=report.sent_timestamp) fixed_report.received_time = report.received_time if (dropped_readings > 0): self._logger.warning('Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X', dropped_readings, len(report.visible_readings), report.report_id, report.origin) return fixed_report
Perform utc assignment on all readings in a report. The returned report will have all reading timestamps in UTC. This only works on SignedListReport objects. Note that the report should typically have previously been added to the UTC assigner using add_report or no reference points from the report will be used. Args: report (SignedListReport): The report that we should fix. errors (str): The behavior that we should have when we can't fix a given reading. The only currently support behavior is drop, which means that the reading will be dropped and not included in the new report. prefer (str): Whether to prefer fixing readings by looking for reference points after the reading or before, all other things being equal. See the description of ``assign_utc``. Returns: SignedListReport: The report with UTC timestamps.
codesearchnet
def render_html(root, options=0, extensions=None): if extensions is None: extensions = _cmark.ffi.NULL raw_result = _cmark.lib.cmark_render_html( root, options, extensions) return _cmark.ffi.string(raw_result).decode('utf-8')
Render a given syntax tree as HTML. Args: root (Any): The reference to the root node of the syntax tree. options (int): The cmark options. extensions (Any): The reference to the syntax extensions, generally from :func:`parser_get_syntax_extensions` Returns: str: The rendered HTML.
juraj-google-style
def _find_metric_value(session_or_group, metric_name): for metric_value in session_or_group.metric_values: if (metric_value.name.tag == metric_name.tag and metric_value.name.group == metric_name.group): return metric_value
Returns the metric_value for a given metric in a session or session group. Args: session_or_group: A Session protobuffer or SessionGroup protobuffer. metric_name: A MetricName protobuffer. The metric to search for. Returns: A MetricValue protobuffer representing the value of the given metric or None if no such metric was found in session_or_group.
juraj-google-style
def retry(self, **kwargs): path = '%s/%s/retry' % (self.manager.path, self.get_id()) self.manager.gitlab.http_post(path)
Retry the job. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabJobRetryError: If the job could not be retried
juraj-google-style
def __init__(self, agent, environment, repeat_actions=1, history=None, id_=0): super(ParallelRunner, self).__init__(agent, environment, repeat_actions, history) self.id = id_ self.current_timestep = None self.episode_actions = [] self.num_parallel = self.agent.execution['num_parallel'] print('ParallelRunner with {} parallel buffers.'.format(self.num_parallel))
Initialize a single Runner object (one Agent/one Environment). Args: id_ (int): The ID of this Runner (for distributed TF runs).
juraj-google-style
def setDirname(self, dirname): sep = utils._getPathSep(dirname) if not dirname.endswith(sep): dirname += sep self._dir = utils.asString(dirname)
Set a new directory name for the sequence. Args: dirname (str): the new directory name
juraj-google-style
def slice_batch_indices(indices): num_in_full_batch = num_full_batches * batch_size first_k_indices = array_ops.slice(indices, [0], [num_in_full_batch]) first_k_indices = array_ops.reshape(first_k_indices, [num_full_batches, batch_size]) flat_dataset = dataset_ops.DatasetV2.from_tensor_slices(first_k_indices) if self._partial_batch_size: index_remainder = dataset_ops.DatasetV2.from_tensors(array_ops.slice(indices, [num_in_full_batch], [self._partial_batch_size])) flat_dataset = flat_dataset.concatenate(index_remainder) if shuffle == 'batch': flat_dataset = flat_dataset.shuffle(1024).repeat(epochs) return flat_dataset
Convert a Tensor of indices into a dataset of batched indices. This step can be accomplished in several ways. The most natural is to slice the Tensor in a Dataset map. (With a condition on the upper index to handle the partial batch.) However it turns out that coercing the Tensor into a shape which is divisible by the batch size (and handling the last partial batch separately) allows for a much more favorable memory access pattern and improved performance. Args: indices: Tensor which determines the data order for an entire epoch. Returns: A Dataset of batched indices.
github-repos
def WriteEventBody(self, event): inode = getattr(event, 'inode', None) if inode is None: event.inode = 0 try: message, _ = self._output_mediator.GetFormattedMessages(event) except errors.WrongFormatter: message = None if message: event.message = message json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event) json_string = json.dumps(json_dict, sort_keys=True) if py2to3.PY_2: json_string = codecs.decode(json_string, 'ascii') self._output_writer.Write(json_string) self._output_writer.Write('\n')
Writes the body of an event object to the output. Args: event (EventObject): event.
juraj-google-style
def getVariable(self, name): return lock_and_call((lambda : Variable(self._impl.getVariable(name))), self._lock)
Get the variable with the corresponding name. Args: name: Name of the variable to be found. Raises: TypeError: if the specified variable does not exist.
codesearchnet
def convert_gemm(params, w_name, scope_name, inputs, layers, weights, names): print('Converting Linear ...') if (names == 'short'): tf_name = ('FC' + random_string(6)) elif (names == 'keep'): tf_name = w_name else: tf_name = (w_name + str(random.random())) bias_name = '{0}.bias'.format(w_name) weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy().transpose() (input_channels, output_channels) = W.shape keras_weights = [W] has_bias = False if (bias_name in weights): bias = weights[bias_name].numpy() keras_weights = [W, bias] has_bias = True dense = keras.layers.Dense(output_channels, weights=keras_weights, use_bias=has_bias, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros') layers[scope_name] = dense(layers[inputs[0]])
Convert Linear. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def getUserSid(username): if six.PY2: username = _to_unicode(username) domain = win32api.GetComputerName() if username.find('\\') != -1: domain = username.split('\\')[0] username = username.split('\\')[-1] domain = domain.upper() return win32security.ConvertSidToStringSid( win32security.LookupAccountName(None, domain + '\\' + username)[0])
Get the Security ID for the user Args: username (str): The user name for which to look up the SID Returns: str: The user SID CLI Example: .. code-block:: bash salt '*' user.getUserSid jsnuffy
juraj-google-style
def imdb(limit=None, shuffle=True): movie_review_url = 'http: path = keras.utils.get_file( 'aclImdb.tar.gz', movie_review_url, extract=True)[:-7] X_train, y_train = read_pos_neg_data(path, 'train', limit) X_test, y_test = read_pos_neg_data(path, 'test', limit) if shuffle: X_train, y_train = sklearn.utils.shuffle(X_train, y_train) X_test, y_test = sklearn.utils.shuffle(X_test, y_test) return X_train, X_test, y_train, y_test
Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data Args: limit: get only first N items for each class Returns: [X_train, y_train, X_test, y_test]
juraj-google-style
def _run(self, data, store, signal, context, *, success_callback=None, stop_callback=None, abort_callback=None): if (data is None): data = MultiTaskData() data.add_dataset(self._name) try: if (self._callback_init is not None): self._callback_init(data, store, signal, context) result = self.run(data, store, signal, context) if (self._callback_finally is not None): self._callback_finally(TaskStatus.Success, data, store, signal, context) if (success_callback is not None): success_callback() except StopTask as err: if (self._callback_finally is not None): self._callback_finally(TaskStatus.Stopped, data, store, signal, context) if (stop_callback is not None): stop_callback(exc=err) result = (Action(data, limit=[]) if err.skip_successors else None) except AbortWorkflow as err: if (self._callback_finally is not None): self._callback_finally(TaskStatus.Aborted, data, store, signal, context) if (abort_callback is not None): abort_callback(exc=err) result = None signal.stop_workflow() except: if (self._callback_finally is not None): self._callback_finally(TaskStatus.Error, data, store, signal, context) signal.stop_workflow() raise if (result is None): data.flatten(in_place=True) data.add_task_history(self.name) return Action(data) else: if (not isinstance(result, Action)): raise TaskReturnActionInvalid() result.data.flatten(in_place=True) result.data.add_task_history(self.name) return result
The internal run method that decorates the public run method. This method makes sure data is being passed to and from the task. Args: data (MultiTaskData): The data object that has been passed from the predecessor task. store (DataStoreDocument): The persistent data store object that allows the task to store data for access across the current workflow run. signal (TaskSignal): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. context (TaskContext): The context in which the tasks runs. success_callback: This function is called when the task completed successfully stop_callback: This function is called when a StopTask exception was raised. abort_callback: This function is called when an AbortWorkflow exception was raised. Raises: TaskReturnActionInvalid: If the return value of the task is not an Action object. Returns: Action: An Action object containing the data that should be passed on to the next task and optionally a list of successor tasks that should be executed.
codesearchnet
def __readCommissioningLogs(self, durationInSeconds): self.logThreadStatus = self.logStatus['running'] logs = Queue() t_end = time.time() + durationInSeconds while time.time() < t_end: time.sleep(0.3) if self.logThreadStatus == self.logStatus['pauseReq']: self.logThreadStatus = self.logStatus['paused'] if self.logThreadStatus != self.logStatus['running']: continue try: line = self._readline() if line: print line logs.put(line) if "Join success" in line: self.joinCommissionedStatus = self.joinStatus['succeed'] break elif "Join failed" in line: self.joinCommissionedStatus = self.joinStatus['failed'] break except Exception: pass self.logThreadStatus = self.logStatus['stop'] return logs
read logs during the commissioning process Args: durationInSeconds: time duration for reading commissioning logs Returns: Commissioning logs
juraj-google-style
def random(length: int = 8, chars: str = digits + ascii_lowercase) -> Iterator[str]: while True: yield "".join([choice(chars) for _ in range(length)])
A random string. Not unique, but has around 1 in a million chance of collision (with the default 8 character length). e.g. 'fubui5e6' Args: length: Length of the random string. chars: The characters to randomly choose from.
juraj-google-style
def incoming_args(self, nodeid): _vars = self._vars ep = self._eps[nodeid] lbl = ep[2] iv = ep[3].get(IVARG_ROLE) in_args_list = [] if iv in _vars: for role, nids in _vars[iv]['refs'].items(): if role != IVARG_ROLE: in_args_list.append((nids, role, iv)) if lbl in _vars: for role, nids in _vars[lbl]['refs'].items(): if role != 'LBL': in_args_list.append((nids, role, lbl)) for nid, role, hi in _vars[lbl].get('hcrefs', []): in_args_list.append(([nid], role, hi)) in_args = {} for nids, role, tgt in in_args_list: for nid in nids: if nid not in in_args: in_args[nid] = {} in_args[nid][role] = tgt return in_args
Return the arguments that target *nodeid*. Valid arguments include regular variable arguments and scopal (label-selecting or HCONS) arguments. MOD/EQ links and intrinsic arguments are not included. Args: nodeid: the nodeid of the EP that is the arguments' target Returns: dict: `{source_nodeid: {rargname: value}}`
juraj-google-style
def setSingleStep(self, singleStep): if (not isinstance(singleStep, int)): raise TypeError('Argument is not of type int') self._singleStep = abs(singleStep) return self._singleStep
setter to _singleStep. converts negativ values to positiv ones. Args: singleStep (int): new _singleStep value. converts negativ values to positiv ones. Raises: TypeError: If the given argument is not an integer. Returns: int or long: the absolute value of the given argument.
codesearchnet
def __init__(self, parent=None, iconSize=QtCore.QSize(36, 36)): super(DataTableWidget, self).__init__(parent) self._iconSize = iconSize self.initUi()
Constructs the object with the given parent. Args: parent (QObject, optional): Causes the objected to be owned by `parent` instead of Qt. Defaults to `None`. iconSize (QSize, optional): Size of edit buttons. Defaults to QSize(36, 36).
juraj-google-style
def PushEvent(self, timestamp, event_data): heap_values = (timestamp, event_data) heapq.heappush(self._heap, heap_values) self.data_size += len(event_data)
Pushes a serialized event onto the heap. Args: timestamp (int): event timestamp, which contains the number of micro seconds since January 1, 1970, 00:00:00 UTC. event_data (bytes): serialized event.
juraj-google-style
def connected_client(self): future = self.get_connected_client() cb = functools.partial(self._connected_client_release_cb, future) return ContextManagerFuture(future, cb)
Returns a ContextManagerFuture to be yielded in a with statement. Returns: A ContextManagerFuture object. Examples: >>> with (yield pool.connected_client()) as client: # client is a connected tornadis.Client instance # it will be automatically released to the pool thanks to # the "with" keyword reply = yield client.call("PING")
codesearchnet
def _record_purchase(sailthru_client, email, item, purchase_incomplete, message_id, options): try: sailthru_response = sailthru_client.purchase(email, [item], incomplete=purchase_incomplete, message_id=message_id, options=options) if not sailthru_response.is_ok(): error = sailthru_response.get_error() logger.error("Error attempting to record purchase in Sailthru: %s", error.get_message()) return not can_retry_sailthru_request(error) except SailthruClientError as exc: logger.exception("Exception attempting to record purchase for %s in Sailthru - %s", email, text_type(exc)) return False return True
Record a purchase in Sailthru Arguments: sailthru_client (object): SailthruClient email (str): user's email address item (dict): Sailthru required information about the course purchase_incomplete (boolean): True if adding item to shopping cart message_id (str): Cookie used to identify marketing campaign options (dict): Sailthru purchase API options (e.g. template name) Returns: False if retryable error, else True
juraj-google-style
def generate_enum_doc(enum_descriptor, locations, path, name_prefix=''): print(make_subsection((name_prefix + enum_descriptor.name))) location = locations[path] if location.HasField('leading_comments'): print(textwrap.dedent(location.leading_comments)) row_tuples = [] for (value_index, value) in enumerate(enum_descriptor.value): field_location = locations[(path + (2, value_index))] row_tuples.append((make_code(value.name), value.number, textwrap.fill(get_comment_from_location(field_location), INFINITY))) print_table(('Name', 'Number', 'Description'), row_tuples)
Generate doc for an enum. Args: enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum to generate docs for. locations: Dictionary of location paths tuples to descriptor_pb2.SourceCodeInfo.Location instances. path: Path tuple to the enum definition. name_prefix: Optional prefix for this enum's name.
codesearchnet
def remove_variable(self, v): if (v not in self): return adj = self.adj while adj[v]: self.remove_interaction(v, next(iter(adj[v]))) del self.linear[v] try: del self._counterpart if ((self.vartype is not Vartype.BINARY) and hasattr(self, '_binary')): del self._binary elif ((self.vartype is not Vartype.SPIN) and hasattr(self, '_spin')): del self._spin except AttributeError: pass
Remove variable v and all its interactions from a binary quadratic model. Args: v (variable): The variable to be removed from the binary quadratic model. Notes: If the specified variable is not in the binary quadratic model, this function does nothing. Examples: This example creates an Ising model and then removes one variable. >>> import dimod ... >>> bqm = dimod.BinaryQuadraticModel({'a': 0.0, 'b': 1.0, 'c': 2.0}, ... {('a', 'b'): 0.25, ('a','c'): 0.5, ('b','c'): 0.75}, ... -0.5, dimod.SPIN) >>> bqm.remove_variable('a') >>> 'a' in bqm.linear False >>> ('b','c') in bqm.quadratic True
codesearchnet
def _events(self, using_url, filters=None, limit=None): if (not isinstance(limit, (int, NoneType))): limit = None if (filters is None): filters = [] if isinstance(filters, string_types): filters = filters.split(',') if (not self.blocking): self.blocking = True while self.blocking: params = {'since': self._last_seen_id, 'limit': limit} if filters: params['events'] = ','.join(map(str, filters)) try: data = self.get(using_url, params=params, raw_exceptions=True) except (ConnectTimeout, ConnectionError) as e: data = None except Exception as e: reraise('', e) if data: self._last_seen_id = data[(- 1)]['id'] for event in data: self._count += 1 (yield event)
A long-polling method that queries Syncthing for events.. Args: using_url (str): REST HTTP endpoint filters (List[str]): Creates an "event group" in Syncthing to only receive events that have been subscribed to. limit (int): The number of events to query in the history to catch up to the current state. Returns: generator[dict]
codesearchnet
def build_or_reuse_placeholder(tensor_spec): g = tfv1.get_default_graph() name = tensor_spec.name try: tensor = g.get_tensor_by_name((name + ':0')) assert ('Placeholder' in tensor.op.type), 'Tensor {} exists but is not a placeholder!'.format(name) assert tensor_spec.is_compatible_with(tensor), 'Tensor {} exists but is not compatible with the signature!'.format(tensor) return tensor except KeyError: with tfv1.name_scope(None): ret = tfv1.placeholder(tensor_spec.dtype, shape=tensor_spec.shape, name=tensor_spec.name) return ret
Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one. Args: tensor_spec (tf.TensorSpec): Returns: tf.Tensor:
codesearchnet
def eval_rs(gains, losses): count = len(gains) + len(losses) avg_gains = stats.avg(gains, count=count) if gains else 1 avg_losses = stats.avg(losses,count=count) if losses else 1 if avg_losses == 0: return avg_gains else: return avg_gains / avg_losses
Evaluates the RS variable in RSI algorithm Args: gains: List of price gains. losses: List of prices losses. Returns: Float of average gains over average losses.
juraj-google-style
def Parse(self, conditions, host_data): result = CheckResult(check_id=self.check_id) methods = self.SelectChecks(conditions) result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods]) return result
Runs methods that evaluate whether collected host_data has an issue. Args: conditions: A list of conditions to determine which Methods to trigger. host_data: A map of artifacts and rdf data. Returns: A CheckResult populated with Anomalies if an issue exists.
codesearchnet
def _get_degree(num_nodes): d_float = (0.5 * (np.sqrt(((8.0 * num_nodes) + 1.0)) - 3.0)) d_int = int(np.round(d_float)) if (((d_int + 1) * (d_int + 2)) == (2 * num_nodes)): return d_int else: raise ValueError(num_nodes, 'not a triangular number')
Get the degree of the current surface. Args: num_nodes (int): The number of control points for a B |eacute| zier surface. Returns: int: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2` equals ``num_nodes``. Raises: ValueError: If ``num_nodes`` isn't a triangular number.
codesearchnet
def build(cls, name: str, param_names: tuple[str, ...], posonly_count: int, varargs_name: str | None, kwonly_params: tuple[str, ...], kwargs_name: str | None, defaults: 'dict[str, cfg.Variable]', annotations: dict[str, Any], ctx: 'context.Context') -> 'SimpleFunction': annotations = dict(annotations) for n in itertools.chain(param_names, [varargs_name, kwargs_name], kwonly_params): if n and n not in annotations: annotations[n] = ctx.convert.unsolvable if not isinstance(defaults, dict): defaults = dict(zip(param_names[-len(defaults):], defaults)) signature = function.Signature(name, param_names, posonly_count, varargs_name, kwonly_params, kwargs_name, defaults, annotations) return cls(signature, ctx)
Returns a SimpleFunction. Args: name: Name of the function as a string param_names: Tuple of parameter names as strings. This DOES include positional-only parameters and does NOT include keyword-only parameters. posonly_count: Number of positional-only parameters. varargs_name: The "args" in "*args". String or None. kwonly_params: Tuple of keyword-only parameters as strings. kwargs_name: The "kwargs" in "**kwargs". String or None. defaults: Dictionary of string names to values of default arguments. annotations: Dictionary of string names to annotations (strings or types). ctx: The abstract context for this function.
github-repos
def _compile_internal(computation, inputs=None): if inputs is None: inputs = [] if not isinstance(inputs, collections_abc.Sequence): raise TypeError('inputs must be a list') flat_inputs = nest.flatten(inputs) flat_inputs = [ops.convert_to_tensor(x) for x in flat_inputs] cluster_name = ops.get_default_graph().unique_name('cluster') pivot = control_flow_ops.no_op(name=cluster_name + '/pivot') context = XLACompileContext(name=cluster_name, pivot=pivot) try: context.Enter() flat_inputs = [array_ops.identity(x, name='input_{}'.format(i)) for i, x in enumerate(flat_inputs)] computation_inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_inputs) vscope = variable_scope.get_variable_scope() saved_use_resource = vscope.use_resource vscope.set_use_resource(True) with _disable_summary_context(): outputs = computation(*computation_inputs) vscope.set_use_resource(saved_use_resource) outputs_is_flat = is_flat(outputs) if outputs_is_flat: output_tensors, control_deps = _postprocess_flat_outputs(outputs) else: output_tensors, control_deps = _postprocess_non_flat_outputs(outputs) context.ExitResult(output_tensors) finally: context.report_unsupported_operations() context.Exit() if not output_tensors: return control_flow_ops.group(control_deps, name='output_0') output_tensors = [xla_ops.xla_cluster_output(o, name='output{}'.format(i)) for i, o in enumerate(output_tensors)] with ops.control_dependencies(control_deps): output_tensors = [array_ops.identity(o, name='output_%d' % i) for i, o in enumerate(output_tensors)] if not outputs_is_flat: output_tensors = nest.pack_sequence_as(structure=outputs, flat_sequence=output_tensors) return output_tensors
Builds graph operators that compiles and symbolically executes computation. Args: computation: A Python function that builds the computation to compile and execute. inputs: A list of inputs or `None` (equivalent to an empty list). Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. Returns: Same data structure as if computation(*inputs) is called directly with some exceptions for correctness. Exceptions include: 1) None output 2) Single value output 3) Operation-only outputs Raises: ValueError: If any element in computation outputs is neither an operations or a value that can be converted to tensor. ValueError: If computation outputs is non-flat and contains any Operations. TypeError: If `inputs` is not a list or tuple.
github-repos
def delete_file(self, file_id): if not is_valid_uuid(file_id): raise StorageArgumentException( 'Invalid UUID for file_id: {0}'.format(file_id)) self._authenticated_request \ .to_endpoint('file/{}/'.format(file_id)) \ .delete()
Delete a file. Args: file_id (str): The UUID of the file to delete. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
juraj-google-style
async def getTempCoreProx(mods=None): acm = genTempCoreProxy(mods) prox = (await acm.__aenter__()) object.__setattr__(prox, '_acm', acm) async def onfini(): (await prox._acm.__aexit__(None, None, None)) prox.onfini(onfini) return prox
Get a Telepath Proxt to a Cortex instance which is backed by a temporary Cortex. Args: mods (list): A list of additional CoreModules to load in the Cortex. Notes: The Proxy returned by this should be fini()'d to tear down the temporary Cortex. Returns: s_telepath.Proxy
codesearchnet
def unpack_rpc_payload(resp_format, payload): code = _create_argcode(resp_format, payload) return struct.unpack(code, payload)
Unpack an RPC payload according to resp_format. Args: resp_format (str): a struct format code (without the <) for the parameter format for this RPC. This format code may include the final character V, which means that it expects a variable length bytearray. payload (bytes): The binary payload that should be unpacked. Returns: list: A list of the unpacked payload items.
juraj-google-style
def output_shapes(self): return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self.element_spec)
Returns the shape of each component of an element of this dataset. Returns: A (nested) structure of `tf.TensorShape` objects corresponding to each component of an element of this dataset.
github-repos
def GetFilter(cls, filter_name): try: filt_cls = cls.GetPlugin(filter_name) except KeyError: raise DefinitionError("Filter %s does not exist." % filter_name) return filt_cls()
Return an initialized filter. Only initialize filters once. Args: filter_name: The name of the filter, as a string. Returns: an initialized instance of the filter. Raises: DefinitionError if the type of filter has not been defined.
juraj-google-style
def fts_match_any(self, fts, inv): return any([self.fts_match(fts, s) for s in inv])
Return `True` if any segment in `inv` matches the features in `fts` Args: fts (list): a collection of (value, feature) tuples inv (list): a collection of IPA segments represented as Unicode strings Returns: bool: `True` if any segment in `inv` matches the features in `fts`
codesearchnet
async def verify_scriptworker_task(chain, obj): errors = [] if obj.worker_impl != "scriptworker": errors.append("{} {} must be run from scriptworker!".format(obj.name, obj.task_id)) raise_on_errors(errors)
Verify the signing trust object. Currently the only check is to make sure it was run on a scriptworker. Args: chain (ChainOfTrust): the chain we're operating on obj (ChainOfTrust or LinkOfTrust): the trust object for the signing task.
juraj-google-style
def _process_tensor_event_in_chunks(self, event, tensor_chunks): value = event.summary.value[0] debugger_plugin_metadata = json.loads(compat.as_text(value.metadata.plugin_data.content)) device_name = debugger_plugin_metadata['device'] num_chunks = debugger_plugin_metadata['numChunks'] chunk_index = debugger_plugin_metadata['chunkIndex'] if num_chunks <= 1: return event debug_node_name = value.node_name timestamp = int(event.wall_time) tensor_key = '%s_%s_%d' % (device_name, debug_node_name, timestamp) if tensor_key not in tensor_chunks: tensor_chunks[tensor_key] = [None] * num_chunks chunks = tensor_chunks[tensor_key] if value.tensor.tensor_content: chunks[chunk_index] = value.tensor elif value.tensor.string_val: chunks[chunk_index] = event if None not in chunks: if value.tensor.tensor_content: event.summary.value[0].tensor.tensor_content = b''.join((chunk.tensor_content for chunk in chunks)) del tensor_chunks[tensor_key] return event elif value.tensor.string_val: merged_event = chunks[0] for chunk in chunks[1:]: merged_event.summary.value[0].tensor.string_val.extend(list(chunk.summary.value[0].tensor.string_val)) return merged_event
Possibly reassemble event chunks. Due to gRPC's message size limit, a large tensor can be encapsulated in multiple Event proto chunks to be sent through the debugger stream. This method keeps track of the chunks that have arrived, reassemble all chunks corresponding to a tensor when they have arrived and return the reassembled Event proto. Args: event: The single Event proto that has arrived. tensor_chunks: A dict used to keep track of the Event protos that have arrived but haven't been reassembled. Returns: If all Event protos corresponding to a tensor have arrived, returns the reassembled Event proto. Otherwise, return None.
github-repos
def _ParseDataStreamWithParser(self, parser_mediator, parser, file_entry, data_stream_name): file_object = file_entry.GetFileObject(data_stream_name=data_stream_name) if (not file_object): raise RuntimeError('Unable to retrieve file-like object from file entry.') try: self._ParseFileEntryWithParser(parser_mediator, parser, file_entry, file_object=file_object) finally: file_object.close()
Parses a data stream of a file entry with a specific parser. Args: parser_mediator (ParserMediator): parser mediator. parser (BaseParser): parser. file_entry (dfvfs.FileEntry): file entry. data_stream_name (str): data stream name. Raises: RuntimeError: if the file-like object is missing.
codesearchnet
def options(self, section): if (not self.has_section(section)): raise NoSectionError(section) from None return self.__getitem__(section).options()
Returns list of configuration options for the named section. Args: section (str): name of section Returns: list: list of option names
codesearchnet
def pipelines(self): if (not self.response): return set() elif ((self._pipelines is None) and self.response): self._pipelines = set() for group in self.response.payload: for pipeline in group['pipelines']: self._pipelines.add(pipeline['name']) return self._pipelines
Returns a set of all pipelines from the last response Returns: set: Response success: all the pipelines available in the response Response failure: an empty set
codesearchnet
def commutes(m1: np.ndarray, m2: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool: return ((m1.shape[0] == m1.shape[1]) and (m1.shape == m2.shape) and np.allclose(m1.dot(m2), m2.dot(m1), rtol=rtol, atol=atol))
Determines if two matrices approximately commute. Two matrices A and B commute if they are square and have the same size and AB = BA. Args: m1: One of the matrices. m2: The other matrix. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the two matrices have compatible sizes and a commutator equal to zero within tolerance.
codesearchnet
def read_label_file(path): labels = [] for record in textfile.read_separated_lines_generator(path, separator='\t', max_columns=3): value = '' if len(record) > 2: value = str(record[2]) labels.append([float(_clean_time(record[0])), float(_clean_time(record[1])), value]) return labels
Read the labels from an audacity label file. Args: path (str): Path to the label file. Returns: list: List of labels (start [sec], end [sec], label) Example:: >>> read_label_file('/path/to/label/file.txt') [ [0.0, 0.2, 'sie'], [0.2, 2.2, 'hallo'] ]
juraj-google-style
def dumps(self, with_defaults=False): return self._rw.dump_config_to_string(self._config, with_defaults=with_defaults)
Generate a string representing all the configuration values. Args: with_defaults (bool): if ``True``, values of items with no custom values will be included in the output if they have a default value set.
codesearchnet
def join(self, other): if self.contains(other): return True if other.contains(self): self.x = other.x self.y = other.y self.width = other.width self.height = other.height return True if (not self.intersects(other, edges=True)): return False if ((self.left == other.left) and (self.width == other.width)): y_min = min(self.bottom, other.bottom) y_max = max(self.top, other.top) self.y = y_min self.height = (y_max - y_min) return True if ((self.bottom == other.bottom) and (self.height == other.height)): x_min = min(self.left, other.left) x_max = max(self.right, other.right) self.x = x_min self.width = (x_max - x_min) return True return False
Try to join a rectangle to this one, if the result is also a rectangle and the operation is successful and this rectangle is modified to the union. Arguments: other (Rectangle): Rectangle to join Returns: bool: True when successfully joined, False otherwise
codesearchnet
def find_elements_by_id(self, id_, update=False) -> Elements: return self.find_elements(by=By.ID, value=id_, update=update)
Finds multiple elements by id. Args: id_: The id of the elements to be found. update: If the interface has changed, this option should be True. Returns: A list with elements if any was found. An empty list if not. Raises: NoSuchElementException - If the element wasn't found. Usage: elements = driver.find_elements_by_id('foo')
juraj-google-style
def Record(self, obj): if (len(self._visit_recorder_objects) >= _MAX_VISIT_OBJECTS): return False obj_id = id(obj) if (obj_id in self._visit_recorder_objects): return False self._visit_recorder_objects[obj_id] = obj return True
Records the object as visited. Args: obj: visited object. Returns: True if the object hasn't been previously visited or False if it has already been recorded or the quota has been exhausted.
codesearchnet
class GroupViTTextEncoder(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() self.config = config self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a [`GroupViTEncoderLayer`]. Args: config: GroupViTTextConfig
github-repos
def index_in_block(self, channel_index: int) -> int: if channel_index < 0 or channel_index >= self.cdim: raise ValueError() struct = self.block_structure if len(struct) == 1: return channel_index, 0 i = 1 while sum(struct[:i]) <= channel_index and i < self.cdim: i += 1 block_index = i - 1 index_in_block = channel_index - sum(struct[:block_index]) return index_in_block, block_index
Return the index a channel has within the subblock it belongs to I.e., only for reducible circuits, this gives a result different from the argument itself. Args: channel_index (int): The index of the external channel Raises: ValueError: for an invalid `channel_index`
juraj-google-style
def get_weights_of_nn_sites(self, structure, n): return [e['weight'] for e in self.get_nn_info(structure, n)]
Get weight associated with each near neighbor of site with index n in structure. Args: structure (Structure): input structure. n (integer): index of site for which to determine the weights. Returns: weights (list of floats): near-neighbor weights.
codesearchnet