code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def _build(self, input_, prev_state):
self._in_to_hidden_linear = basic.Linear(self._hidden_size, name='in_to_hidden', initializers=self._initializers.get('in_to_hidden'), partitioners=self._partitioners.get('in_to_hidden'), regularizers=self._regularizers.get('in_to_hidden'))
self._hidden_to_hidden_linear = basic.Linear(self._hidden_size, name='hidden_to_hidden', initializers=self._initializers.get('hidden_to_hidden'), partitioners=self._partitioners.get('hidden_to_hidden'), regularizers=self._regularizers.get('hidden_to_hidden'))
in_to_hidden = self._in_to_hidden_linear(input_)
hidden_to_hidden = self._hidden_to_hidden_linear(prev_state)
output = self._activation((in_to_hidden + hidden_to_hidden))
return (output, output) | Connects the VanillaRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
input_: a 2D Tensor of size [batch_size, input_size].
prev_state: a 2D Tensor of size [batch_size, hidden_size].
Returns:
output: a 2D Tensor of size [batch_size, hidden_size].
next_state: a Tensor of size [batch_size, hidden_size].
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations. | codesearchnet |
def CopyNoFail(src, root=None):
if (root is None):
root = str(CFG['tmp_dir'])
src_path = (local.path(root) / src)
if src_path.exists():
Copy(src_path, '.')
return True
return False | Just copy fName into the current working directory, if it exists.
No action is executed, if fName does not exist. No Hash is checked.
Args:
src: The filename we want to copy to '.'.
root: The optional source dir we should pull fName from. Defaults
to benchbuild.settings.CFG["tmpdir"].
Returns:
True, if we copied something. | codesearchnet |
def set_icon_data(self, base64_data, mimetype="image/png", rel="icon"):
self.add_child("favicon", '<link rel="%s" href="%s" type="%s" />'%(rel, base64_data, mimetype)) | Allows to define an icon for the App
Args:
base64_data (str): base64 encoded image data (ie. "data:image/x-icon;base64,AAABAAEAEBA....")
mimetype (str): mimetype of the image ("image/png" or "image/x-icon"...)
rel (str): leave it unchanged (standard "icon") | juraj-google-style |
def _OpenFileObject(self, path_spec):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
parent_path_spec = path_spec.parent
parent_location = getattr(parent_path_spec, 'location', None)
if not parent_location:
raise errors.PathSpecError(
'Unsupported parent path specification without location.')
file_system = resolver.Resolver.OpenFileSystem(
parent_path_spec, resolver_context=self._resolver_context)
file_object = resolver.Resolver.OpenFileObject(
parent_path_spec, resolver_context=self._resolver_context)
vmdk_handle = pyvmdk.handle()
vmdk_handle.open_file_object(file_object)
parent_location_path_segments = file_system.SplitPath(parent_location)
extent_data_files = []
for extent_descriptor in iter(vmdk_handle.extent_descriptors):
extent_data_filename = extent_descriptor.filename
_, path_separator, filename = extent_data_filename.rpartition('/')
if not path_separator:
_, path_separator, filename = extent_data_filename.rpartition('\\')
if not path_separator:
filename = extent_data_filename
parent_location_path_segments.pop()
parent_location_path_segments.append(filename)
extent_data_file_location = file_system.JoinPath(
parent_location_path_segments)
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = extent_data_file_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent
extent_data_file_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(extent_data_file_path_spec):
break
extent_data_files.append(extent_data_file_path_spec)
if len(extent_data_files) != vmdk_handle.number_of_extents:
raise IOError('Unable to locate all extent data files.')
file_objects = []
for extent_data_file_path_spec in extent_data_files:
file_object = resolver.Resolver.OpenFileObject(
extent_data_file_path_spec, resolver_context=self._resolver_context)
file_objects.append(file_object)
vmdk_handle.open_extent_data_files_file_objects(file_objects)
return vmdk_handle | Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyvmdk.handle: a file-like object.
Raises:
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect. | juraj-google-style |
def _FractionalMaxPoolGrad(op: ops.Operation, grad_0, unused_grad_1, unused_grad_2):
return gen_nn_ops.fractional_max_pool_grad(op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2], op.get_attr('overlapping')) | Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op. | github-repos |
def send_magic_packet(*macs, **kwargs):
packets = []
ip = kwargs.pop('ip_address', BROADCAST_IP)
port = kwargs.pop('port', DEFAULT_PORT)
for k in kwargs:
raise TypeError('send_magic_packet() got an unexpected keyword argument {!r}'.format(k))
for mac in macs:
packet = create_magic_packet(mac)
packets.append(packet)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
for packet in packets:
sock.send(packet)
sock.close() | Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9) | codesearchnet |
def index_bgen(fn, legacy=False):
logger.info("Indexing {} (BGEN) using 'bgenix'{}".format(
fn, " (legacy mode)" if legacy else "",
))
command = ["bgenix", "-g", fn, "-index"]
if legacy:
command.append("-with-rowid")
try:
logger.info("Executing '{}'".format(" ".join(command)))
subprocess.Popen(command).communicate()
except FileNotFoundError:
logger.error("Cannot find 'bgenix', impossible to index {}".format(fn))
sys.exit(1)
logger.info("Index generated") | Indexes a BGEN file.
Args:
fn (str): The name of the BGEN file. | juraj-google-style |
def _GetAPFSVolumeIdentifiers(self, scan_node):
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError('Invalid scan node.')
volume_system = apfs_volume_system.APFSVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
return []
if self._volumes:
if self._volumes == 'all':
volumes = range(1, volume_system.number_of_volumes + 1)
else:
volumes = self._volumes
selected_volume_identifiers = self._NormalizedVolumeIdentifiers(
volume_system, volumes, prefix='apfs')
if not set(selected_volume_identifiers).difference(volume_identifiers):
return selected_volume_identifiers
if len(volume_identifiers) > 1:
try:
volume_identifiers = self._PromptUserForAPFSVolumeIdentifiers(
volume_system, volume_identifiers)
except KeyboardInterrupt:
raise errors.UserAbort('File system scan aborted.')
return self._NormalizedVolumeIdentifiers(
volume_system, volume_identifiers, prefix='apfs') | Determines the APFS volume identifiers.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
Returns:
list[str]: APFS volume identifiers.
Raises:
SourceScannerError: if the format of or within the source is not
supported or the the scan node is invalid.
UserAbort: if the user requested to abort. | juraj-google-style |
class LayoutLMv3FastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
apply_ocr: Optional[bool]
ocr_lang: Optional[str]
tesseract_config: Optional[str] | Args:
apply_ocr (`bool`, *optional*, defaults to `True`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by
the `apply_ocr` parameter in the `preprocess` method.
ocr_lang (`str`, *optional*):
The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.
tesseract_config (`str`, *optional*):
Any additional custom configuration flags that are forwarded to the `config` parameter when calling
Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the
`preprocess` method. | github-repos |
def tpu_devices(devices=None):
return find_devices('TPU', devices) | Gets TPU devices out of `devices`.
Args:
devices: A device list (as a list of strings). If None, the list of all
available devices will be used for it.
Returns:
Those in `devices` that are TPUs. | github-repos |
def _FormatArgToken(self, token_data):
return {'string': token_data.argument_value.rstrip('\x00'), 'num_arg': token_data.argument_index, 'is': token_data.argument_name} | Formats an argument token as a dictionary of values.
Args:
token_data (bsm_token_data_arg32|bsm_token_data_arg64): AUT_ARG32 or
AUT_ARG64 token data.
Returns:
dict[str, str]: token values. | codesearchnet |
def _get_args(typ):
try:
if typ.__args__ is None:
return ()
return typ.__args__
except AttributeError:
if isinstance(typ, typing.TypeVar):
return (typ.__name__,)
return () | Returns a list of arguments to the given type.
Args:
typ: A typing module typing type.
Returns:
A tuple of args. | github-repos |
def headers_present(self, headers):
headers = {name: re.compile('(.*)') for name in headers}
self.add_matcher(matcher('HeadersMatcher', headers)) | Defines a list of headers that must be present in the
outgoing request in order to satisfy the matcher, no matter what value
the headers hosts.
Header keys are case insensitive.
Arguments:
headers (list|tuple): header keys to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.headers_present(['content-type', 'Authorization'])) | codesearchnet |
def date_proc(func):
@wraps(func)
def wrapped(request, *args, **kwargs):
if 'date' in request.GET and request.GET['date'] == '':
raise Http404("api does not exist")
elif 'date' not in request.GET:
date = datetime.today()
return func(request, date)
else:
date = tuple(int(intValue) for intValue in request.GET['date'].split('-'))
if len(date) == 3:
date = datetime(*date)
elif len(date) == 2:
date = datetime(*date, day = 1)
else:
date = datetime(*date, month = 1, day = 1)
return func(request, date)
return wrapped | An decorator checking whether date parameter is passing in or not. If not, default date value is all PTT data.
Else, return PTT data with right date.
Args:
func: function you want to decorate.
request: WSGI request parameter getten from django.
Returns:
date:
a datetime variable, you can only give year, year + month or year + month + day, three type.
The missing part would be assigned default value 1 (for month is Jan, for day is 1). | juraj-google-style |
def to_base_10_int(n, input_base):
return sum(c * input_base ** i for i, c in enumerate(n[::-1])) | Converts an integer in any base into it's decimal representation.
Args:
n - An integer represented as a tuple of digits in the specified base.
input_base - the base of the input number.
Returns:
integer converted into base 10.
Example:
>>> to_base_10_int((8,1), 16)
129 | juraj-google-style |
def min(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
return cls._binary_op(x, y, tf.minimum, tf.float32) | Returns a TensorFluent for the minimum function.
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the minimum function. | codesearchnet |
def _skip_op(self, op_id, op, ops_in_exec_path, report_handler):
if TensorTracer.while_loop_op(op):
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_WHILELOOP_OP))
return True
if TensorTracer.control_flow_op(op):
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_CONTROLFLOW_OP))
return True
if TensorTracer.unsafe_op(op):
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_UNSAFE_OP))
return True
if TensorTracer.device_mismatch(self._tt_config.device_type, op):
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_DEVICE_MISMATCH))
return True
if op not in ops_in_exec_path:
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_NOT_EXECUTED))
return True
if self._is_in_control_flow(op) or not self._is_in_outmost_while_loop(op):
if not self._should_trace_in_control_flow():
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_IN_CONTROL_FLOW))
return True
if self._is_user_included_op(op):
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
if tensor_tracer_flags.TT_CHECK_FILTER.value:
logging.info('USER_INCLUDED op %s', op.name)
return False
if not self._inside_op_range(op_id):
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_OUTSIDE_OP_RANGE))
return True
if not self._is_interesting_op(op):
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_LESS_INTERESTING_OP))
return True
if self._is_user_excluded_op(op):
report_handler.instrument_op(op, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
if tensor_tracer_flags.TT_CHECK_FILTER.value:
logging.info('USER_EXCLUDED op %s', op.name)
return True
return False | Returns True if we should not trace Op.
Args:
op_id: Topological index of the op.
op: tf.Operation
ops_in_exec_path: Set of operations that are in the execution path.
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the op should not be traced, false otherwise. | github-repos |
def query(self, rank):
self._flush()
current = self._head
if not current:
return 0
mid_rank = math.floor(rank * self._observations)
max_rank = mid_rank + math.floor(
self._invariant(mid_rank, self._observations) / 2)
rank = 0.0
while current._successor:
rank += current._rank
if rank + current._successor._rank + current._successor._delta > max_rank:
return current._value
current = current._successor
return current._value | Retrieves the value estimate for the requested quantile rank.
The requested quantile rank must be registered in the estimator's
invariants a priori!
Args:
rank: A floating point quantile rank along the interval [0, 1].
Returns:
A numeric value for the quantile estimate. | juraj-google-style |
def EscapeWildcards(string):
precondition.AssertType(string, Text)
return string.replace('%', '\\%').replace('_', '\\_') | Escapes wildcard characters for strings intended to be used with `LIKE`.
Databases don't automatically escape wildcard characters ('%', '_'), so any
non-literal string that is passed to `LIKE` and is expected to match literally
has to be manually escaped.
Args:
string: A string to escape.
Returns:
An escaped string. | codesearchnet |
def _estimate_step_duration(self, current, now):
if current:
if self._time_after_first_step is not None and current > 1:
time_per_unit = (now - self._time_after_first_step) / (current - 1)
else:
time_per_unit = (now - self._start) / current
if current == 1:
self._time_after_first_step = now
return time_per_unit
else:
return 0 | Estimate the duration of a single step.
Given the step number `current` and the corresponding time `now` this
function returns an estimate for how long a single step takes. If this
is called before one step has been completed (i.e. `current == 0`) then
zero is given as an estimate. The duration estimate ignores the duration
of the (assumed to be non-representative) first step for estimates when
more steps are available (i.e. `current>1`).
Args:
current: Index of current step.
now: The current time.
Returns: Estimate of the duration of a single step. | github-repos |
def _get_input_to_checker_function(self, flag_values):
return dict(([key, flag_values[key].value] for key in self.flag_names)) | Given flag values, returns the input to be given to checker.
Args:
flag_values: flags.FlagValues, the FlagValues instance to get flags from.
Returns:
dict, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc). | codesearchnet |
def open_pspsfile(self, ecut=20, pawecutdg=None):
from pymatgen.io.abinit.tasks import AbinitTask
from abipy.core.structure import Structure
from abipy.abio.factories import gs_input
from abipy.electrons.psps import PspsFile
lattice = 10 * np.eye(3)
structure = Structure(lattice, [self.element], coords=[[0, 0, 0]])
if self.ispaw and pawecutdg is None: pawecutdg = ecut * 4
inp = gs_input(structure, pseudos=[self], ecut=ecut, pawecutdg=pawecutdg,
spin_mode="unpolarized", kppa=1)
inp["prtpsps"] = -1
task = AbinitTask.temp_shell_task(inp)
task.start_and_wait()
filepath = task.outdir.has_abiext("_PSPS.nc")
if not filepath:
logger.critical("Cannot find PSPS.nc file in %s" % task.outdir)
return None
try:
return PspsFile(filepath)
except Exception as exc:
logger.critical("Exception while reading PSPS file at %s:\n%s" % (filepath, str(exc)))
return None | Calls Abinit to compute the internal tables for the application of the
pseudopotential part. Returns :class:`PspsFile` object providing methods
to plot and analyze the data or None if file is not found or it's not readable.
Args:
ecut: Cutoff energy in Hartree.
pawecutdg: Cutoff energy for the PAW double grid. | juraj-google-style |
def get_relevant_lyric_tokens(full_tokens, max_n_lyric_tokens, total_length, offset, duration):
full_tokens = full_tokens[0]
if len(full_tokens) < max_n_lyric_tokens:
tokens = torch.cat([torch.zeros(max_n_lyric_tokens - len(full_tokens), dtype=torch.long).to(full_tokens.device), full_tokens])
indices = [-1] * (max_n_lyric_tokens - len(full_tokens)) + list(range(0, len(full_tokens)))
else:
midpoint = int(len(full_tokens) * (offset + duration / 2.0) / total_length)
midpoint = min(max(midpoint, max_n_lyric_tokens
tokens = full_tokens[midpoint - max_n_lyric_tokens
indices = list(range(midpoint - max_n_lyric_tokens
return (tokens.unsqueeze(dim=0), indices) | Extract only the relevant tokens based on the character position. A total of `max_n_lyric_tokens` tokens will be
returned. If the provided token sequence is smaller, it will be padded, otherwise, only characters ranging from the
midpoint - `max_n_lyric_tokens//2` to the midpoint + `max_n_lyric_tokens//2` will be returned. This *focuses* on
the most relevant tokens (in time) for the sequence.
Args:
full_tokens (`List[int]`):
List containing the token ids of the entire lyrics.
total_length (`int`):
Total expected length of the music (not all of it is generated, see duration), in samples.
offset (`int`):
Starting sample in the music. If the offset is greater than 0, the lyrics will be shifted take that into
account
duration (`int`):
Expected duration of the generated music, in samples. The duration has to be smaller than the total length,
which represent the overall length of the signal, | github-repos |
def get(self, config_id):
return self.prepare_model(self.client.api.inspect_config(config_id)) | Get a config.
Args:
config_id (str): Config ID.
Returns:
(:py:class:`Config`): The config.
Raises:
:py:class:`docker.errors.NotFound`
If the config does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error. | codesearchnet |
def whois_nameservers(self, nameservers):
api_name = 'opendns-whois-nameservers'
fmt_url_path = u'whois/nameservers/{0}'
return self._multi_get(api_name, fmt_url_path, nameservers) | Calls WHOIS Nameserver end point
Args:
emails: An enumerable of nameservers
Returns:
A dict of {nameserver: domain_result} | juraj-google-style |
def _add_example(self, example):
if self.has_enumerated_subtypes():
self._add_example_enumerated_subtypes_helper(example)
else:
self._add_example_helper(example) | Adds a "raw example" for this type.
This does basic sanity checking to ensure that the example is valid
(required fields specified, no unknown fields, correct types, ...).
The example is not available via :meth:`get_examples` until
:meth:`_compute_examples` is called.
Args:
example (stone.frontend.ast.AstExample): An example of this type. | juraj-google-style |
def __init__(self, feature_dict):
super(FeaturesDict, self).__init__()
self._feature_dict = {k: to_feature(v) for k, v in feature_dict.items()} | Initialize the features.
Args:
feature_dict (dict): Dictionary containing the feature connectors of a
example. The keys should correspond to the data dict as returned by
tf.data.Dataset(). Types (tf.int32,...) and dicts will automatically
be converted into FeatureConnector.
Raises:
ValueError: If one of the given features is not recognized | juraj-google-style |
def find_all(pcoll, regex, group=0, outputEmpty=True):
regex = Regex._regex_compile(regex)
def _process(element):
matches = regex.finditer(element)
if group == Regex.ALL:
yield [(m.group(), m.groups()[0]) for m in matches if outputEmpty or m.groups()[0]]
else:
yield [m.group(group) for m in matches if outputEmpty or m.group(group)]
return pcoll | FlatMap(_process) | Returns the matches if a portion of the line matches the Regex. By default,
list of group 0 will return with empty items. To get all groups, pass the
`Regex.ALL` flag in the `group` parameter which returns all the groups in
the tuple format.
Args:
regex: the regular expression string or (re.compile) pattern.
group: (optional) name of the group, it can be integer or a string value.
outputEmpty: (optional) Should empty be output. True to output empties
and false if not. | github-repos |
def _ParseAndValidateRecord(self, parser_mediator, text_file_object):
try:
title = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
url = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
timestamp = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
popularity_index = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
except UnicodeDecodeError:
return False
if ((len(title) == self._MAXIMUM_LINE_SIZE) and (title[(- 1)] != '\n')):
return False
if ((len(url) == self._MAXIMUM_LINE_SIZE) and (url[(- 1)] != '\n')):
return False
if ((len(timestamp) == self._MAXIMUM_LINE_SIZE) and (timestamp[(- 1)] != '\n')):
return False
if ((len(popularity_index) == self._MAXIMUM_LINE_SIZE) and (popularity_index[(- 1)] != '\n')):
return False
title = title.strip()
url = url.strip()
timestamp = timestamp.strip()
popularity_index = popularity_index.strip()
if ((not title) or (not url) or (not timestamp) or (not popularity_index)):
return False
event_data = OperaGlobalHistoryEventData()
if (not self._IsValidUrl(url)):
return False
event_data.url = url
if (title != url):
event_data.title = title
try:
event_data.popularity_index = int(popularity_index, 10)
timestamp = int(timestamp, 10)
except ValueError:
return False
if (event_data.popularity_index < 0):
event_data.description = 'First and Only Visit'
else:
event_data.description = 'Last Visit'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
return True | Parses and validates an Opera global history record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
text_file_object (dfvfs.TextFile): text file.
Returns:
bool: True if the record was successfully parsed. | codesearchnet |
def unzip(x, split_dim, current_length, num_splits=2, name=None):
with tf.name_scope(name, 'unzip', [x]) as scope:
x = tf.convert_to_tensor(x, name='x')
all_splits = tf.split(value=x, num_or_size_splits=current_length, axis=split_dim, name=scope)
splits = [[] for _ in xrange(num_splits)]
for i in xrange(current_length):
splits[(i % num_splits)].append(all_splits[i])
return [tf.concat(s, split_dim) for s in splits] | Splits a tensor by unzipping along the split_dim.
For example the following array split into 2 would be:
[1, 2, 3, 4, 5, 6] -> [1, 3, 5], [2, 4, 6]
and by 3:
[1, 2, 3, 4] -> [1, 4], [2], [3]
Args:
x: The tensor to split.
split_dim: The dimension to split along.
current_length: Current length along the split_dim.
num_splits: The number of splits.
name: Optional name for this op.
Returns:
A length num_splits sequence. | codesearchnet |
def _hash_bucket_tensors(self, input_ids, num_hashes: int, num_buckets: int):
if num_hashes > len(_PRIMES):
raise ValueError(f'`num_hashes` must be <= {len(_PRIMES)}')
primes = _PRIMES[:num_hashes]
result_tensors = []
for prime in primes:
hashed = (input_ids + 1) * prime % num_buckets
result_tensors.append(hashed)
return result_tensors | Converts ids to hash bucket ids via multiple hashing.
Args:
input_ids: The codepoints or other IDs to be hashed.
num_hashes: The number of hash functions to use.
num_buckets: The number of hash buckets (i.e. embeddings in each table).
Returns:
A list of tensors, each of which is the hash bucket IDs from one hash function. | github-repos |
def from_service_account_info(cls, info, **kwargs):
signer = _service_account_info.from_dict(
info, require=['client_email', 'token_uri'])
return cls._from_signer_and_info(signer, info, **kwargs) | Creates a Credentials instance from parsed service account info.
Args:
info (Mapping[str, str]): The service account info in Google
format.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.service_account.Credentials: The constructed
credentials.
Raises:
ValueError: If the info is not in the expected format. | juraj-google-style |
def inter_data_operation(self, axis, func, other):
if axis:
partitions = self.row_partitions
other_partitions = other.row_partitions
else:
partitions = self.column_partitions
other_partitions = other.column_partitions
func = self.preprocess_func(func)
result = np.array([partitions[i].apply(func, num_splits=self._compute_num_partitions(), other_axis_partition=other_partitions[i]) for i in range(len(partitions))])
return (self.__constructor__(result) if axis else self.__constructor__(result.T)) | Apply a function that requires two BaseFrameManager objects.
Args:
axis: The axis to apply the function over (0 - rows, 1 - columns)
func: The function to apply
other: The other BaseFrameManager object to apply func to.
Returns:
A new BaseFrameManager object, the type of object that called this. | codesearchnet |
def __init__(self, shape, num_actions, probabilities=None, scope='categorical', summary_labels=()):
self.num_actions = num_actions
action_size = util.prod(shape) * self.num_actions
if probabilities is None:
logits = 0.0
else:
logits = [log(prob) for _ in range(util.prod(shape)) for prob in probabilities]
self.logits = Linear(size=action_size, bias=logits, scope='logits', summary_labels=summary_labels)
super(Categorical, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels) | Categorical distribution.
Args:
shape: Action shape.
num_actions: Number of discrete action alternatives.
probabilities: Optional distribution bias. | juraj-google-style |
def _merge_call(self, fn, args, kwargs):
t = threading.current_thread()
assert isinstance(t, _MirroredReplicaThread)
t.merge_fn = fn
t.merge_args = args
t.merge_kwargs = kwargs
t.captured_name_scope = t.graph.get_name_scope()
if t.captured_name_scope:
t.captured_name_scope += '/'
t.captured_var_scope = variable_scope.get_variable_scope()
t.captured_control_deps = t.graph._current_control_dependencies()
t.merge_call_entered_in_eager = context.context().executing_eagerly()
if ops.get_default_graph() != t.graph:
raise RuntimeError('`merge_call` called while defining a new graph or a tf.function. This can often happen if the function `fn` passed to `strategy.run()` contains a nested `@tf.function`, and the nested `@tf.function` contains a synchronization point, such as aggregating gradients (e.g, optimizer.apply_gradients), or if the function `fn` uses a control flow statement which contains a synchronization point in the body. Such behaviors are not yet supported. Instead, please avoid nested `tf.function`s or control flow statements that may potentially cross a synchronization boundary, for example, wrap the `fn` passed to `strategy.run` or the entire `strategy.run` inside a `tf.function` or move the control flow out of `fn`. If you are subclassing a `tf.keras.Model`, please avoid decorating overridden methods `test_step` and `train_step` in `tf.function`.')
t.has_paused.set()
t.should_run.wait()
t.should_run.clear()
if t.coord.should_stop():
raise _RequestedStop()
t.merge_call_entered_in_eager = None
return t.merge_result | `merge_call()` implementation for synchronized replica.
This pauses the current replica thread and passes `fn` and its arguments to
the main thread. The main thread will wait until all replicas pause, then
invoke `fn` with grouped arguments. The current replica thread will continue
after `fn` completes.
See `_call_for_each_replica` for the logic in the main thread.
Args:
fn: a function that is called in cross replica context with grouped
arguments from each replica. `fn` should returns grouped values.
args: positional arguments to `fn`.
kwargs: keyword arguments to `fn`.
Returns:
Return value of `fn` for the current replica.
Raises:
RuntimeError: when merge_call happens in a different graph, e.g. in a
different tf.function, which is not supported now.
_RequestedStop: when stop is requested. | github-repos |
def get(self, attr: FetchAttribute) -> MaybeBytes:
attr_name = attr.value.decode('ascii')
method = getattr(self, ('_get_' + attr_name.replace('.', '_')))
return method(attr) | Return the bytes representation of the given message attribue.
Args:
attr: The fetch attribute.
Raises:
:class:`NotFetchable` | codesearchnet |
def save(self, data):
if self.__nested:
raise ConfigLoaderException("Cannot save the config if the 'nested' paramter is True!")
if (self.__loaded_config_file is None):
raise ConfigLoaderException('Load not called yet!')
try:
with open(self.__loaded_config_file, 'w') as f:
f.write(self.__formatter.encode(data))
except Exception as e:
raise ConfigLoaderException(('Config data is not serializable: %s' % e)) | Save the config data
Args:
data: any serializable config data
Raises:
ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name,
or the data is not serializable or the loader is nested | codesearchnet |
def _snake_to_camel(name, strict=False):
if strict:
name = name.lower()
terms = name.split('_')
return terms[0] + ''.join([term.capitalize() for term in terms[1:]]) | Converts parameter names from snake_case to camelCase.
Args:
name, str. Snake case.
strict: bool, default True. If True, will set name to lowercase before
converting, otherwise assumes original name is proper camel case.
Set to False if name may already be in camelCase.
Returns:
str: CamelCase. | juraj-google-style |
def get_users(self, sort=True):
self._load()
if sort:
self.users.sort(key=operator.itemgetter('name'))
return self.users | Get list of users in the room.
Kwargs:
sort (bool): If True, sort rooms by name
Returns:
array. List of users | codesearchnet |
def configure(self, cfg, handler, path=''):
for (name, attr) in handler.attributes():
if (cfg.get(name) is not None):
continue
if (attr.expected_type not in [list, dict]):
cfg[name] = self.set(handler, attr, name, path, cfg)
elif ((attr.default is None) and (not hasattr(handler, ('configure_%s' % name)))):
self.action_required.append(('%s.%s: %s' % (path, name, attr.help_text)).strip('.'))
for (name, attr) in handler.attributes():
if (cfg.get(name) is not None):
continue
if hasattr(handler, ('configure_%s' % name)):
fn = getattr(handler, ('configure_%s' % name))
fn(self, cfg, ('%s.%s' % (path, name)))
if ((attr.expected_type in [list, dict]) and (not cfg.get(name))):
try:
del cfg[name]
except KeyError:
pass | Start configuration process for the provided handler
Args:
cfg (dict): config container
handler (config.Handler class): config handler to use
path (str): current path in the configuration progress | codesearchnet |
def start(self, **kwargs):
if (not self.is_running()):
self.websock_url = self.chrome.start(**kwargs)
self.websock = websocket.WebSocketApp(self.websock_url)
self.websock_thread = WebsockReceiverThread(self.websock, name=('WebsockThread:%s' % self.chrome.port))
self.websock_thread.start()
self._wait_for((lambda : self.websock_thread.is_open), timeout=30)
self.send_to_chrome(method='Network.enable')
self.send_to_chrome(method='Page.enable')
self.send_to_chrome(method='Console.enable')
self.send_to_chrome(method='Runtime.enable')
self.send_to_chrome(method='ServiceWorker.enable')
self.send_to_chrome(method='ServiceWorker.setForceUpdateOnPageLoad')
self.send_to_chrome(method='Network.setBlockedURLs', params={'urls': ['*google-analytics.com/analytics.js', '*google-analytics.com/ga.js']}) | Starts chrome if it's not running.
Args:
**kwargs: arguments for self.chrome.start(...) | codesearchnet |
def disassemble(self, annotate=False, blocks=False):
ops = disassemble(self.co_code, self.internals)
if annotate:
ops = [self.annotate_op(op) for op in ops]
if blocks:
return blocks_from_ops(ops)
else:
return ops | Disassemble the bytecode of this code object into a series of
opcodes and labels. Can also annotate the opcodes and group
the opcodes into blocks based on the labels.
Arguments:
annotate(bool): Whether to annotate the operations.
blocks(bool): Whether to group the operations into blocks.
Returns:
list: A list of :class:`Op` (or :class:`AnnotatedOp`) instances
and labels. | juraj-google-style |
def zone_compare(timezone):
if (timezone.lower() in mapper.win_to_unix):
check_zone = timezone
elif (timezone.lower() in mapper.unix_to_win):
check_zone = mapper.get_win(timezone)
else:
raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))
return (get_zone() == mapper.get_unix(check_zone, 'Unknown')) | Compares the given timezone with the machine timezone. Mostly useful for
running state checks.
Args:
timezone (str):
The timezone to compare. This can be in Windows or Unix format. Can
be any of the values returned by the ``timezone.list`` function
Returns:
bool: ``True`` if they match, otherwise ``False``
Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver' | codesearchnet |
def get_commits(self, since_sha=None):
assert self.tempdir
cmd = ['git', 'log', '--first-parent', '--reverse', COMMIT_FORMAT]
if since_sha:
commits = [self.get_commit(since_sha)]
cmd.append('{}..HEAD'.format(since_sha))
else:
commits = []
cmd.append('HEAD')
output = cmd_output(*cmd, cwd=self.tempdir)
for sha, date in chunk_iter(output.splitlines(), 2):
commits.append(Commit(sha, int(date)))
return commits | Returns a list of Commit objects.
Args:
since_sha - (optional) A sha to search from | juraj-google-style |
def ChunkedCausalMultiHeadedAttention(feature_depth, num_heads=8, dropout=0.0, chunk_selector=None, mode='train'):
prepare_attention_input = combinators.Serial(combinators.Branch(), combinators.Parallel(combinators.Branch(num_branches=3), CausalMask(axis=(- 2))), combinators.Parallel(combinators.Parallel(core.Dense(feature_depth), core.Dense(feature_depth), core.Dense(feature_depth)), combinators.Identity()))
return combinators.Serial(combinators.Map(prepare_attention_input), ChunkedAttentionSelector(selector=chunk_selector), combinators.Map(PureMultiHeadedAttention(feature_depth=feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), check_shapes=False), combinators.Map(core.Dense(feature_depth))) | Transformer-style causal multi-headed attention operating on chunks.
Accepts inputs that are a list of chunks and applies causal attention.
Args:
feature_depth: int: depth of embedding
num_heads: int: number of attention heads
dropout: float: dropout rate
chunk_selector: a function from chunk number to list of chunks to attend.
mode: str: 'train' or 'eval'
Returns:
Multi-headed self-attention layer. | codesearchnet |
def getAll(self, event_name):
raw_events = self._event_client.eventGetAll(self._id, event_name)
return [snippet_event.from_dict(msg) for msg in raw_events] | Gets all the events of a certain name that have been received so
far. This is a non-blocking call.
Args:
callback_id: The id of the callback.
event_name: string, the name of the event to get.
Returns:
A list of SnippetEvent, each representing an event from the Java
side. | codesearchnet |
def delete_object_from_file(file_name, save_key, file_location):
file = __os.path.join(file_location, file_name)
shelve_store = __shelve.open(file)
del shelve_store[save_key]
shelve_store.close() | Function to delete objects from a shelve
Args:
file_name: Shelve storage file name
save_key: The name of the key the item is stored in
file_location: The location of the file, derive from the os module
Returns: | juraj-google-style |
def confirm_cw_log(self, account, region, vpcname):
try:
cw = self.session.client('logs', region)
token = None
log_groups = []
while True:
result = (cw.describe_log_groups() if (not token) else cw.describe_log_groups(nextToken=token))
token = result.get('nextToken')
log_groups.extend([x['logGroupName'] for x in result.get('logGroups', [])])
if (not token):
break
if (vpcname not in log_groups):
cw.create_log_group(logGroupName=vpcname)
cw_vpc = VPC.get(vpcname)
cw_vpc.set_property('vpc_flow_logs_log_group', vpcname)
self.log.info('Created log group {}/{}/{}'.format(account.account_name, region, vpcname))
auditlog(event='vpc_flow_logs.create_cw_log_group', actor=self.ns, data={'account': account.account_name, 'region': region, 'log_group_name': vpcname, 'vpc': vpcname})
return True
except Exception:
self.log.exception('Failed creating log group for {}/{}/{}.'.format(account, region, vpcname)) | Create a new CloudWatch log group based on the VPC Name if none exists. Returns `True` if succesful
Args:
account (:obj:`Account`): Account to create the log group in
region (`str`): Region to create the log group in
vpcname (`str`): Name of the VPC the log group is fow
Returns:
`bool` | codesearchnet |
def compute_dtype(self):
return self._compute_dtype | The compute dtype of this policy.
This is the dtype layers will do their computations in. Typically layers
output tensors with the compute dtype as well.
Note that even if the compute dtype is float16 or bfloat16, hardware
devices may not do individual adds, multiplies, and other fundamental
operations in float16 or bfloat16, but instead may do some of them in
float32 for numeric stability. The compute dtype is the dtype of the
inputs and outputs of the ops that the layer executes.
Internally, many ops will do certain internal calculations in
float32 or some other device-internal intermediate format with higher
precision than float16/bfloat16, to increase numeric stability.
Returns:
The compute dtype of this policy, as a string. | github-repos |
def _find_current_phase(self, global_step):
epoch_size = sum(phase.steps for phase in self._phases)
epoch = int(global_step
steps_in = global_step % epoch_size
for phase in self._phases:
if steps_in < phase.steps:
return phase, epoch, steps_in
steps_in -= phase.steps | Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch. | juraj-google-style |
def _get_document_path(client, path):
parts = (client._database_string, "documents") + path
return _helpers.DOCUMENT_PATH_DELIMITER.join(parts) | Convert a path tuple into a full path string.
Of the form:
``projects/{project_id}/databases/{database_id}/...
documents/{document_path}``
Args:
client (~.firestore_v1beta1.client.Client): The client that holds
configuration details and a GAPIC client object.
path (Tuple[str, ...]): The components in a document path.
Returns:
str: The fully-qualified document path. | juraj-google-style |
def __init__(self, id, type=None, **kwargs):
super(Catalog, self).__init__(id, type, **kwargs) | Create a catalog object (get a catalog by ID or get or create one given by name and type)
Args:
id (str): A catalog id or name
Kwargs:
type (str): 'song' or 'artist', specifying the catalog type
Returns:
A catalog object
Example:
>>> c = catalog.Catalog('my_songs', type='song')
>>> c.id
u'CAVKUPC12BCA792120'
>>> c.name
u'my_songs'
>>> | juraj-google-style |
def _convert_fields(fields, field_values, context):
converted = {}
if len(fields) != len(field_values):
_report_field_mismatches(fields, field_values)
for field in fields:
if field.name not in field_values:
_report_field_mismatches(fields, field_values)
field_value = field_values[field.name]
converted[field.name] = _convert_value(field_value, field.value_type, (field.name,), context)
field_values.update(converted) | Type-checks and converts each field in `field_values` (in place).
Args:
fields: A list of `ExtensionTypeField` objects.
field_values: A `dict` mapping field names to values. Must contain an entry
for each field. I.e., `set(field_values.keys())` must be equal to
`set([f.name for f in fields])`.
context: _ConversionContext, indicates what kind of value we are converting.
Raises:
ValueError: If the keys of `field_values` do not match the names of
the fields in `fields`.
TypeError: If any value in `field_values` does not have the type indicated
by the corresponding `ExtensionTypeField` object. | github-repos |
def calc_timestep_statistic(self, statistic, time):
ti = np.where((self.times == time))[0][0]
ma = np.where((self.masks[ti].ravel() == 1))
if (statistic in ['mean', 'max', 'min', 'std', 'ptp']):
stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)()
elif (statistic == 'median'):
stat_val = np.median(self.timesteps[ti].ravel()[ma])
elif ('percentile' in statistic):
per = int(statistic.split('_')[1])
stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per)
elif ('dt' in statistic):
stat_name = statistic[:(- 3)]
if (ti == 0):
stat_val = 0
else:
stat_val = (self.calc_timestep_statistic(stat_name, time) - self.calc_timestep_statistic(stat_name, (time - 1)))
else:
stat_val = np.nan
return stat_val | Calculate statistics from the primary attribute of the StObject.
Args:
statistic: statistic being calculated
time: Timestep being investigated
Returns:
Value of the statistic | codesearchnet |
def rpc_name(rpc_id):
name = _RPC_NAME_MAP.get(rpc_id)
if (name is None):
name = ('RPC 0x%04X' % rpc_id)
return name | Map an RPC id to a string name.
This function looks the RPC up in a map of all globally declared RPCs,
and returns a nice name string. if the RPC is not found in the global
name map, returns a generic name string such as 'rpc 0x%04X'.
Args:
rpc_id (int): The id of the RPC that we wish to look up.
Returns:
str: The nice name of the RPC. | codesearchnet |
def json_to_params(fn=None, return_json=True):
def json_to_params_decorator(fn):
@handle_type_error
@wraps(fn)
def json_to_params_wrapper(*args, **kwargs):
data = decode_json_body()
if (type(data) in [tuple, list]):
args = (list(args) + data)
elif (type(data) == dict):
allowed_keys = (set(data.keys()) - set(kwargs.keys()))
for key in allowed_keys:
kwargs[key] = data[key]
elif (type(data) in PRIMITIVE_TYPES):
args = list(args)
args.append(data)
if (not return_json):
return fn(*args, **kwargs)
return encode_json_body(fn(*args, **kwargs))
return json_to_params_wrapper
if fn:
return json_to_params_decorator(fn)
return json_to_params_decorator | Convert JSON in the body of the request to the parameters for the wrapped
function.
If the JSON is list, add it to ``*args``.
If dict, add it to ``**kwargs`` in non-rewrite mode (no key in ``**kwargs``
will be overwritten).
If single value, add it to ``*args``.
Args:
return_json (bool, default True): Should the decorator automatically
convert returned value to JSON? | codesearchnet |
def relative_to_contrib(diff, project):
path = pathlib.Path(diff.b_path)
contrib_path = project.contrib_module_path
return path.relative_to(contrib_path) | Compute relative path of changed file to contrib dir
Args:
diff (git.diff.Diff): file diff
project (Project): project
Returns:
Path | juraj-google-style |
def info(name):
try:
handle_scm = win32service.OpenSCManager(
None, None, win32service.SC_MANAGER_CONNECT)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to connect to the SCM: {0}'.format(exc.strerror))
try:
handle_svc = win32service.OpenService(
handle_scm, name,
win32service.SERVICE_ENUMERATE_DEPENDENTS |
win32service.SERVICE_INTERROGATE |
win32service.SERVICE_QUERY_CONFIG |
win32service.SERVICE_QUERY_STATUS)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed To Open {0}: {1}'.format(name, exc.strerror))
try:
config_info = win32service.QueryServiceConfig(handle_svc)
status_info = win32service.QueryServiceStatusEx(handle_svc)
try:
description = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DESCRIPTION)
except pywintypes.error:
description = 'Failed to get description'
delayed_start = win32service.QueryServiceConfig2(
handle_svc, win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO)
finally:
win32service.CloseServiceHandle(handle_scm)
win32service.CloseServiceHandle(handle_svc)
ret = dict()
try:
sid = win32security.LookupAccountName(
'', 'NT Service\\{0}'.format(name))[0]
ret['sid'] = win32security.ConvertSidToStringSid(sid)
except pywintypes.error:
ret['sid'] = 'Failed to get SID'
ret['BinaryPath'] = config_info[3]
ret['LoadOrderGroup'] = config_info[4]
ret['TagID'] = config_info[5]
ret['Dependencies'] = config_info[6]
ret['ServiceAccount'] = config_info[7]
ret['DisplayName'] = config_info[8]
ret['Description'] = description
ret['Status_ServiceCode'] = status_info['ServiceSpecificExitCode']
ret['Status_CheckPoint'] = status_info['CheckPoint']
ret['Status_WaitHint'] = status_info['WaitHint']
ret['StartTypeDelayed'] = delayed_start
flags = list()
for bit in SERVICE_TYPE:
if isinstance(bit, int):
if config_info[0] & bit:
flags.append(SERVICE_TYPE[bit])
ret['ServiceType'] = flags if flags else config_info[0]
flags = list()
for bit in SERVICE_CONTROLS:
if status_info['ControlsAccepted'] & bit:
flags.append(SERVICE_CONTROLS[bit])
ret['ControlsAccepted'] = flags if flags else status_info['ControlsAccepted']
try:
ret['Status_ExitCode'] = SERVICE_ERRORS[status_info['Win32ExitCode']]
except KeyError:
ret['Status_ExitCode'] = status_info['Win32ExitCode']
try:
ret['StartType'] = SERVICE_START_TYPE[config_info[1]]
except KeyError:
ret['StartType'] = config_info[1]
try:
ret['ErrorControl'] = SERVICE_ERROR_CONTROL[config_info[2]]
except KeyError:
ret['ErrorControl'] = config_info[2]
try:
ret['Status'] = SERVICE_STATE[status_info['CurrentState']]
except KeyError:
ret['Status'] = status_info['CurrentState']
return ret | Get information about a service on the system
Args:
name (str): The name of the service. This is not the display name. Use
``get_service_name`` to find the service name.
Returns:
dict: A dictionary containing information about the service.
CLI Example:
.. code-block:: bash
salt '*' service.info spooler | juraj-google-style |
def _get_beamer_page(self):
PIL_limit = 40000
beamer_limit = 550
aspect_ratio = (self.sum_row_heights / self.sum_column_widths)
margin_factor = 1.5
height = min((self.sum_row_heights * margin_factor), beamer_limit)
width = min((self.sum_column_widths * margin_factor), beamer_limit)
if ((height * width) > PIL_limit):
height = min(np.sqrt((PIL_limit * aspect_ratio)), beamer_limit)
width = min(np.sqrt((PIL_limit / aspect_ratio)), beamer_limit)
height = max(height, 10)
width = max(width, 10)
return (height, width, self.scale) | Get height, width & scale attributes for the beamer page.
Returns:
tuple: (height, width, scale) desirable page attributes | codesearchnet |
def ScanForVolumeSystem(self, source_path_spec):
if (source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW):
return None
if source_path_spec.IsVolumeSystemRoot():
return source_path_spec
if (source_path_spec.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER):
return None
try:
type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators(source_path_spec, resolver_context=self._resolver_context)
except (IOError, RuntimeError) as exception:
raise errors.BackEndError('Unable to process source path specification with error: {0!s}'.format(exception))
if (not type_indicators):
return None
if (len(type_indicators) > 1):
raise errors.BackEndError('Unsupported source found more than one volume system types.')
if ((type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION) and (source_path_spec.type_indicator in [definitions.TYPE_INDICATOR_TSK_PARTITION])):
return None
if (type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS):
return path_spec_factory.Factory.NewPathSpec(type_indicators[0], location='/', parent=source_path_spec)
return path_spec_factory.Factory.NewPathSpec(type_indicators[0], parent=source_path_spec) | Scans the path specification for a supported volume system format.
Args:
source_path_spec (PathSpec): source path specification.
Returns:
PathSpec: volume system path specification or None if no supported volume
system type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one volume
system type is found. | codesearchnet |
def filter(self, field_name, operand, value):
if operand not in self._FILTER_OPERANDS:
raise ValueError('Operand must be one of {}'.format(', '.join(self._FILTER_OPERANDS)))
record_stub = record_factory(self._app)
field = record_stub.get_field(field_name)
self._raw['filters'].append({
"fieldId": field.id,
"filterType": operand,
"value": field.get_report(value)
}) | Adds a filter to report
Notes:
All filters are currently AND'ed together
Args:
field_name (str): Target field name to filter on
operand (str): Operand used in comparison. See `swimlane.core.search` for options
value: Target value used in comparision | juraj-google-style |
def __init__(self, shape, probability=0.5, scope='bernoulli', summary_labels=()):
self.shape = shape
action_size = util.prod(self.shape)
self.logit = Linear(size=action_size, bias=log(probability), scope='logit', summary_labels=summary_labels)
super(Bernoulli, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels) | Bernoulli distribution.
Args:
shape: Action shape.
probability: Optional distribution bias. | juraj-google-style |
def auth(self, token):
t = self.sendToken(token)
return self.getToken(t) | Take an existing Skype token and refresh it, to extend the expiry time without other credentials.
Args:
token (str): existing Skype token
Returns:
(str, datetime.datetime) tuple: Skype token, and associated expiry if known
Raises:
.SkypeAuthException: if the login request is rejected
.SkypeApiException: if the login form can't be processed | juraj-google-style |
def get_simulated_data(nmr_problems):
nmr_observed_tanks = 10
nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype='uint')
observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype='uint')
return (observations, nmr_tanks_ground_truth) | Simulate some data.
This returns the simulated tank observations and the corresponding ground truth maximum number of tanks.
Args:
nmr_problems (int): the number of problems
Returns:
tuple: (observations, nmr_tanks_ground_truth) | codesearchnet |
def create_inference_graph(wanted_words, sample_rate, clip_duration_ms, clip_stride_ms, window_size_ms, window_stride_ms, feature_bin_count, model_architecture, preprocess):
words_list = input_data.prepare_words_list(wanted_words.split(','))
model_settings = models.prepare_model_settings(len(words_list), sample_rate, clip_duration_ms, window_size_ms, window_stride_ms, feature_bin_count, preprocess)
runtime_settings = {'clip_stride_ms': clip_stride_ms}
wav_data_placeholder = tf.compat.v1.placeholder(tf.string, [], name='wav_data')
decoded_sample_data = tf.audio.decode_wav(wav_data_placeholder, desired_channels=1, desired_samples=model_settings['desired_samples'], name='decoded_sample_data')
spectrogram = audio_ops.audio_spectrogram(decoded_sample_data.audio, window_size=model_settings['window_size_samples'], stride=model_settings['window_stride_samples'], magnitude_squared=True)
if preprocess == 'average':
fingerprint_input = tf.nn.pool(input=tf.expand_dims(spectrogram, -1), window_shape=[1, model_settings['average_window_width']], strides=[1, model_settings['average_window_width']], pooling_type='AVG', padding='SAME')
elif preprocess == 'mfcc':
fingerprint_input = audio_ops.mfcc(spectrogram, sample_rate, dct_coefficient_count=model_settings['fingerprint_width'])
elif preprocess == 'micro':
if not frontend_op:
raise Exception('Micro frontend op is currently not available when running TensorFlow directly from Python, you need to build and run through Bazel, for example `bazel run tensorflow/examples/speech_commands:freeze_graph`')
sample_rate = model_settings['sample_rate']
window_size_ms = model_settings['window_size_samples'] * 1000 / sample_rate
window_step_ms = model_settings['window_stride_samples'] * 1000 / sample_rate
int16_input = tf.cast(tf.multiply(decoded_sample_data.audio, 32767), tf.int16)
micro_frontend = frontend_op.audio_microfrontend(int16_input, sample_rate=sample_rate, window_size=window_size_ms, window_step=window_step_ms, num_channels=model_settings['fingerprint_width'], out_scale=1, out_type=tf.float32)
fingerprint_input = tf.multiply(micro_frontend, 10.0 / 256.0)
else:
raise Exception('Unknown preprocess mode "%s" (should be "mfcc", "average", or "micro")' % preprocess)
fingerprint_size = model_settings['fingerprint_size']
reshaped_input = tf.reshape(fingerprint_input, [-1, fingerprint_size])
logits = models.create_model(reshaped_input, model_settings, model_architecture, is_training=False, runtime_settings=runtime_settings)
softmax = tf.nn.softmax(logits, name='labels_softmax')
return (reshaped_input, softmax) | Creates an audio model with the nodes needed for inference.
Uses the supplied arguments to create a model, and inserts the input and
output nodes that are needed to use the graph for inference.
Args:
wanted_words: Comma-separated list of the words we're trying to recognize.
sample_rate: How many samples per second are in the input audio files.
clip_duration_ms: How many samples to analyze for the audio pattern.
clip_stride_ms: How often to run recognition. Useful for models with cache.
window_size_ms: Time slice duration to estimate frequencies from.
window_stride_ms: How far apart time slices should be.
feature_bin_count: Number of frequency bands to analyze.
model_architecture: Name of the kind of model to generate.
preprocess: How the spectrogram is processed to produce features, for
example 'mfcc', 'average', or 'micro'.
Returns:
Input and output tensor objects.
Raises:
Exception: If the preprocessing mode isn't recognized. | github-repos |
def counter(self, counter_name, default=0):
return self._state.counters_map.get(counter_name, default) | Get the current counter value.
Args:
counter_name: name of the counter in string.
default: default value in int if one doesn't exist.
Returns:
Current value of the counter. | codesearchnet |
def create_statement_inspection_table(sts: List[Influence]):
columns = ['un_groundings', 'subj_polarity', 'obj_polarity', 'Sentence', 'Source API']
polarity_to_str = (lambda x: ('+' if (x == 1) else ('-' if (x == (- 1)) else 'None')))
l = []
for s in sts:
subj_un_grounding = s.subj.db_refs['UN'][0][0].split('/')[(- 1)]
obj_un_grounding = s.obj.db_refs['UN'][0][0].split('/')[(- 1)]
subj_polarity = s.subj_delta['polarity']
obj_polarity = s.obj_delta['polarity']
subj_adjectives = s.subj_delta['adjectives']
for e in s.evidence:
l.append(((subj_un_grounding, obj_un_grounding), subj_polarity, obj_polarity, e.text, e.source_api))
df = pd.DataFrame(l, columns=columns)
df = df.pivot_table(index=['un_groundings', 'Source API', 'Sentence'])
def hover(hover_color='
return dict(selector='tr:hover', props=[('background-color', ('%s' % hover_color))])
styles = [hover(), dict(props=[('font-size', '100%'), ('font-family', 'Gill Sans')])]
return df.style.set_table_styles(styles) | Display an HTML representation of a table with INDRA statements to
manually inspect for validity.
Args:
sts: A list of INDRA statements to be manually inspected for validity. | codesearchnet |
def valid(self, value, level=[]):
self.validation_failures = []
if value is None and self._optional:
return True
if not isinstance(value, list):
self.validation_failures.append(('.'.join(level), str(value)))
return False
bRet = True
if self._type == 'unique':
lItems = []
for i in range(len(value)):
lLevel = level[:]
lLevel.append('[%d]' % i)
if not self._node.valid(value[i], lLevel):
self.validation_failures.extend(self._node.validation_failures[:])
bRet = False;
continue;
if self._type == 'unique':
try:
iIndex = lItems.index(value[i])
self.validation_failures.append(('.'.join(lLevel), 'duplicate of %s[%d]' % ('.'.join(level), iIndex)))
bRet = False
continue
except ValueError:
lItems.append(value[i])
if self._minimum is not None:
if len(value) < self._minimum:
self.validation_failures.append(('.'.join(level), 'did not meet minimum'))
bRet = False
if self._maximum is not None:
if len(value) > self._maximum:
self.validation_failures.append(('.'.join(level), 'exceeds maximum'))
bRet = False
return bRet | Valid
Checks if a value is valid based on the instance's values
Arguments:
value {mixed} -- The value to validate
Returns:
bool | juraj-google-style |
def validate_sqs_policy(self, accounts):
sqs_queue_name = self.dbconfig.get('sqs_queue_name', self.ns)
sqs_queue_region = self.dbconfig.get('sqs_queue_region', self.ns)
sqs_account = AWSAccount.get(self.dbconfig.get('sqs_queue_account', self.ns))
session = get_aws_session(sqs_account)
sqs = session.client('sqs', region_name=sqs_queue_region)
sqs_queue_url = sqs.get_queue_url(QueueName=sqs_queue_name, QueueOwnerAWSAccountId=sqs_account.account_number)
sqs_attribs = sqs.get_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], AttributeNames=['Policy'])
policy = json.loads(sqs_attribs['Attributes']['Policy'])
for account in accounts:
arn = 'arn:aws:sns:*:{}:{}'.format(account.account_number, sqs_queue_name)
if (arn not in policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn']):
self.log.warning('SQS policy is missing condition for ARN {}'.format(arn))
policy['Statement'][0]['Condition']['ForAnyValue:ArnEquals']['aws:SourceArn'].append(arn)
sqs.set_queue_attributes(QueueUrl=sqs_queue_url['QueueUrl'], Attributes={'Policy': json.dumps(policy)}) | Given a list of accounts, ensures that the SQS policy allows all the accounts to write to the queue
Args:
accounts (`list` of :obj:`Account`): List of accounts
Returns:
`None` | codesearchnet |
def close_log(log_file):
sys.stdout = sys.__stdout__
if log_file is not None:
log_file.close()
del log_file | Closes the open file and returns :py:class:`sys.stdout` to the default (i.e., console output).
Args:
log_file (file): The file object to close. | juraj-google-style |
def validate_addr(self, address, id=None, endpoint=None):
return self._call_endpoint(VALIDATE_ADDR, params=[address], id=id, endpoint=endpoint) | returns whether or not addr string is valid
Args:
address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK')
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call | juraj-google-style |
def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
self._dtype = dtype
self._ragged_rank = ragged_rank
self._row_splits_dtype = row_splits_dtype | Initializes a RaggedTensorType object.
Args:
dtype: data type of the `RaggedTensor`'s inner values.
ragged_rank: ragged_rank of the declared `RaggedTensor`.
row_splits_dtype: data type for the `RaggedTensor`'s row splits.
One of: `tf.int32` or `tf.int64`. | github-repos |
def serialize_sparse_v2(sp_input, out_type=dtypes.string, name=None):
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.serialize_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type) | Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
out_type: The `dtype` to use for serialization.
name: A name prefix for the returned tensors (optional).
Returns:
A 3-vector (1-D `Tensor`), with each column representing the serialized
`SparseTensor`'s indices, values, and shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`. | github-repos |
def _GetSignatureScanner(cls, specification_store):
signature_scanner = pysigscan.scanner()
signature_scanner.set_scan_buffer_size(cls._SCAN_BUFFER_SIZE)
for format_specification in specification_store.specifications:
for signature in format_specification.signatures:
pattern_offset = signature.offset
if (pattern_offset is None):
signature_flags = pysigscan.signature_flags.NO_OFFSET
elif (pattern_offset < 0):
pattern_offset *= (- 1)
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END
else:
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START
signature_scanner.add_signature(signature.identifier, pattern_offset, signature.pattern, signature_flags)
return signature_scanner | Initializes a signature scanner based on a specification store.
Args:
specification_store (FormatSpecificationStore): specification store.
Returns:
pysigscan.scanner: signature scanner. | codesearchnet |
def list_as_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/availabilitySets', '?api-version=', COMP_API])
return do_get_next(endpoint, access_token) | List availability sets in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of the list of availability set properties. | codesearchnet |
def _build_kernel(self, kernel_source, compile_flags=()):
return cl.Program(self._cl_context, kernel_source).build(' '.join(compile_flags)) | Convenience function for building the kernel for this worker.
Args:
kernel_source (str): the kernel source to use for building the kernel
Returns:
cl.Program: a compiled CL kernel | codesearchnet |
def _apply_unary_to_chunks(f, chunks_by_dev):
output = []
for x in chunks_by_dev:
with ops.colocate_with(x[0]):
output.append([f(t) for t in x])
return output | Apply a unary op to each tensor in chunks_by_dev, on same device.
Args:
f: a unary function over `tf.Tensor`.
chunks_by_dev: list of lists of `tf.Tensor`.
Returns:
new list of lists of `tf.Tensor` with the same structure as
chunks_by_dev containing the derived tensors. | github-repos |
def raise_for_status(response):
for err_name in web_exceptions.__all__:
err = getattr(web_exceptions, err_name)
if err.status_code == response.status:
payload = dict(
headers=response.headers,
reason=response.reason,
)
if issubclass(err, web_exceptions._HTTPMove):
raise err(response.headers['Location'], **payload)
raise err(**payload) | Raise an appropriate error for a given response.
Arguments:
response (:py:class:`aiohttp.ClientResponse`): The API response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: The appropriate
error for the response's status. | juraj-google-style |
def wait_for_vacancy(self, processor_type):
with self._condition:
self._condition.wait_for((lambda : (self._processor_available(processor_type) or self._cancelled_event.is_set())))
if self._cancelled_event.is_set():
raise WaitCancelledException()
processor = self[processor_type].next_processor()
return processor | Waits for a particular processor type to have the capacity to
handle additional transactions or until is_cancelled is True.
Args:
processor_type (ProcessorType): The family, and version of
the transaction processor.
Returns:
Processor | codesearchnet |
def make_x(self, operator, adjoint, with_batch=True):
raise NotImplementedError('make_x is not defined.') | Make an 'x' appropriate for calling operator.matmul(x).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making an 'x' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `x` with the same batch shape
as operator, and otherwise create a matrix without any batch shape.
Returns:
A `Tensor` | github-repos |
def get(self):
if isinstance(self.red, SoftInt):
red = self.red.get()
else:
red = self.red
if isinstance(self.green, SoftInt):
green = self.green.get()
else:
green = self.green
if isinstance(self.blue, SoftInt):
blue = self.blue.get()
else:
blue = self.blue
return (red, green, blue) | Get an rgb color tuple according to the probability distribution.
Returns:
tuple(int, int, int): A ``(red, green, blue)`` tuple.
Example:
>>> color = SoftColor(([(0, 1), (255, 10)],),
... ([(0, 1), (255, 10)],),
... ([(0, 1), (255, 10)],))
>>> color.get() # doctest: +SKIP
(234, 201, 243) | codesearchnet |
def wv45(msg):
d = hex2bin(data(msg))
if d[12] == '0':
return None
ws = bin2int(d[13:15])
return ws | Wake vortex.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: Wake vortex level. 0=NIL, 1=Light, 2=Moderate, 3=Severe | juraj-google-style |
def scanJoiner(self, xEUI='*', strPSKd='threadjpaketest'):
print '%s call scanJoiner' % self.port
timeout = 500
if not isinstance(xEUI, str):
eui64 = self.__convertLongToString(xEUI)
if len(eui64) < 16:
eui64 = eui64.zfill(16)
print eui64
else:
eui64 = xEUI
cmd = 'commissioner joiner add %s %s %s' % (eui64, strPSKd, str(timeout))
print cmd
if self.__sendCommand(cmd)[0] == 'Done':
if self.logThreadStatus == self.logStatus['stop']:
self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(120,))
return True
else:
return False | scan Joiner
Args:
xEUI: Joiner's EUI-64
strPSKd: Joiner's PSKd for commissioning
Returns:
True: successful to add Joiner's steering data
False: fail to add Joiner's steering data | juraj-google-style |
def _count_op_with_name_and_attribute(self, nodes: Iterable[node_def_pb2.NodeDef], op_name: str, attr_name: str, attr_val: _AttrValType, get_op_name: bool=False) -> int:
if get_op_name:
return len([node.attr.get(attr_name) == attr_val for node in nodes if node.name == op_name])
else:
return len([node.attr.get(attr_name) == attr_val for node in nodes if node.op == op_name]) | Determine the number of nodes whose operation name matches `op_name`.
If `attr_name` is given, additionally check if the `attr_val` matches with
the attribute value of the op.
Args:
nodes: Iterable of NodeDefs.
op_name: Name of the op to match.
attr_name: Name of the attribute of the op to match.
attr_val: Value of the attr_name to check.
get_op_name: If set True, checks node.name rather than node.op.
Returns:
The number of occurrences of nodes whose name match `op_name` and
'attr_val' if 'attr_name' is given. | github-repos |
def timestamp_d_b_Y_H_M_S(value):
d, b, Y, t, Z = value.split()
H, M, S = t.split(":")
return int(calendar.timegm((
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0
))) | Convert timestamp string to time in seconds since epoch.
Timestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted
by this function.
Args:
value: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
Note: The timezone is ignored it is simply assumed to be UTC/GMT. | juraj-google-style |
def _get_control_flow_context(self):
return self._control_flow_context | Returns the control flow context of this op.
Returns:
A context object. | github-repos |
def run(self, args):
kwargs = {}
kwargs['path'] = args.file[0]
kwargs['addr'] = args.addr
kwargs['on_progress'] = pylink.util.flash_progress_callback
jlink = self.create_jlink(args)
_ = jlink.flash_file(**kwargs)
print('Flashed device successfully.') | Flashes the device connected to the J-Link.
Args:
self (FlashCommand): the ``FlashCommand`` instance
args (Namespace): the arguments passed on the command-line
Returns:
``None`` | juraj-google-style |
def inputs(num_devices, dataset_name, data_dir=None, input_name=None, num_chunks=0, append_targets=False):
assert data_dir, 'Must provide a data directory'
data_dir = os.path.expanduser(data_dir)
(train_batches, train_eval_batches, eval_batches, input_name, input_shape) = _train_and_eval_batches(dataset_name, data_dir, input_name, num_devices)
def numpy_stream(dataset):
return dataset_to_stream(dataset, input_name, num_chunks=num_chunks, append_targets=append_targets)
if (num_chunks > 0):
length = input_shape[0]
input_shape = tuple(([tuple(([(length
return Inputs(train_stream=(lambda : numpy_stream(train_batches)), train_eval_stream=(lambda : numpy_stream(train_eval_batches)), eval_stream=(lambda : numpy_stream(eval_batches)), input_shape=input_shape) | Make Inputs for built-in datasets.
Args:
num_devices: how many devices to build the inputs for.
dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix
with "t2t_".
data_dir: data directory.
input_name: optional, name of the inputs from the dictionary.
num_chunks: optional, into how many pieces should we chunk (large inputs).
append_targets: optional, instead of inputs return a pair (inputs, targets)
which is useful for autoregressive models.
Returns:
trax.inputs.Inputs | codesearchnet |
def is_valid_package_name(name, raise_error=False):
is_valid = PACKAGE_NAME_REGEX.match(name)
if raise_error and not is_valid:
raise PackageRequestError("Not a valid package name: %r" % name)
return is_valid | Test the validity of a package name string.
Args:
name (str): Name to test.
raise_error (bool): If True, raise an exception on failure
Returns:
bool. | juraj-google-style |
def fulltypes_for_flat_tensors(element_spec):
specs = _specs_for_flat_tensors(element_spec)
full_types_lists = [_translate_to_fulltype_for_flat_tensors(s) for s in specs]
rval = nest.flatten(full_types_lists)
return rval | Convert the element_spec for a dataset to a list of FullType Def.
Note that "flat" in this function and in `_flat_tensor_specs` is a nickname
for the "batchable tensor list" encoding used by datasets and map_fn.
The FullTypeDef created corresponds to this encoding (e.g. that uses variants
and not the FullTypeDef corresponding to the default "component" encoding).
This is intended for temporary internal use and expected to be removed
when type inference support is sufficient. See limitations of
`_translate_to_fulltype_for_flat_tensors`.
Args:
element_spec: A nest of TypeSpec describing the elements of a dataset (or
map_fn).
Returns:
A list of FullTypeDef corresponding to ELEMENT_SPEC. The items
in this list correspond to the items in `_flat_tensor_specs`. | github-repos |
def set_smartplug_state(self, device_label, state):
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) | Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False' | juraj-google-style |
def __init__(self,
html_id=None,
name=None,
content=None,
template=None,
classes=None,
**kwargs):
if html_id is not None:
try:
self.html_id = html_id
except AttributeError:
self._html_id = html_id
if name is not None:
try:
self.name = name
except AttributeError:
self._name = name
if content is not None:
try:
self.content = content
except AttributeError:
self._content = content
if template is not None:
try:
self.template = template
except AttributeError:
self._template = template
if classes is not None:
try:
self.classes = classes
except AttributeError:
self._classes = classes
if not hasattr(self, 'template'):
raise AttributeError('template is a required widget attribute')
for kw, arg in kwargs.items():
setattr(self, kw, arg) | Init method.
Args:
html_id (str): an ID to set on the HTML item.
name (str): the name of the item, displayed in HTML.
content (): suitable content according to chosen display.
template (str): the template responsible for display.
classes (str): additional classes to pass to the HTML item. | juraj-google-style |
def expect_output(self, pattern, timeout=(- 1)):
logger.debug("Expecting output '{0}' from '{1}'".format(pattern, self.name))
try:
return self._spawn.expect(pattern, timeout)
except pexpect.exceptions.EOF as e:
logger.debug('Raising termination exception.')
raise TerminationException(instance=self, real_exception=e, output=self.get_output())
except pexpect.exceptions.TIMEOUT as e:
logger.debug('Raising timeout exception.')
raise TimeoutException(instance=self, real_exception=e, output=self.get_output())
except Exception as e:
logger.debug(('Expecting output failed: ' + str(e)))
raise NestedException(instance=self, real_exception=e, output=self.get_output()) | Wait until the running program performs some given output, or terminates.
Args:
pattern: The pattern the output should be checked for.
timeout (int): How many seconds should be waited for the output.
The pattern argument may be a string, a compiled regular expression,
or a list of any of those types. Strings will be compiled into regular expressions.
Returns:
int: The index into the pattern list. If the pattern was not a list, it returns 0 on a successful match.
Raises:
TimeoutException: The output did not match within the given time frame.
TerminationException: The program terminated before producing the output.
NestedException: An internal problem occured while waiting for the output. | codesearchnet |
def ReadFromFile(self, artifacts_reader, filename):
for artifact_definition in artifacts_reader.ReadFile(filename):
self.RegisterDefinition(artifact_definition) | Reads artifact definitions into the registry from a file.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
filename (str): name of the file to read from. | codesearchnet |
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=True, param_name='size')
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` argument must contain `height` and `width` keys. Got {size.keys()}')
return resize(image, size=(size['height'], size['width']), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) | Resize an image to (size["height"], size["width"]).
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PIL.Image.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred. | github-repos |
def valid_config_exists(config_path=CONFIG_PATH):
if os.path.isfile(config_path):
try:
config = read_config(config_path)
check_config(config)
except (ConfigurationError, IOError):
return False
else:
return False
return True | Verify that a valid config file exists.
Args:
config_path (str): Path to the config file.
Returns:
boolean: True if there is a valid config file, false if not. | codesearchnet |
def interactive_update_stack(self, fqn, template, old_parameters, parameters, stack_policy, tags, **kwargs):
logger.debug('Using interactive provider mode for %s.', fqn)
(changes, change_set_id) = create_change_set(self.cloudformation, fqn, template, parameters, tags, 'UPDATE', service_role=self.service_role, **kwargs)
old_parameters_as_dict = self.params_as_dict(old_parameters)
new_parameters_as_dict = self.params_as_dict([(x if ('ParameterValue' in x) else {'ParameterKey': x['ParameterKey'], 'ParameterValue': old_parameters_as_dict[x['ParameterKey']]}) for x in parameters])
params_diff = diff_parameters(old_parameters_as_dict, new_parameters_as_dict)
action = ('replacements' if self.replacements_only else 'changes')
full_changeset = changes
if self.replacements_only:
changes = requires_replacement(changes)
if (changes or params_diff):
ui.lock()
try:
output_summary(fqn, action, changes, params_diff, replacements_only=self.replacements_only)
ask_for_approval(full_changeset=full_changeset, params_diff=params_diff, include_verbose=True)
finally:
ui.unlock()
self.deal_with_changeset_stack_policy(fqn, stack_policy)
self.cloudformation.execute_change_set(ChangeSetName=change_set_id) | Update a Cloudformation stack in interactive mode.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack. | codesearchnet |
def blend_rgba(self, image: ImageInput) -> ImageInput:
if not isinstance(image, PIL.Image.Image):
return image
elif image.mode == 'RGB':
return image
img_rgba = np.array(image.convert('RGBA'))
if not (img_rgba[:, :, 3] < 255).any():
return image.convert('RGB')
alpha = img_rgba[:, :, 3] / 255.0
img_rgb = (1 - alpha[:, :, np.newaxis]) * 255 + alpha[:, :, np.newaxis] * img_rgba[:, :, :3]
return PIL.Image.fromarray(img_rgb.astype('uint8'), 'RGB') | Convert image to RGB by blending the transparency layer if it's in RGBA format.
If image is not `PIL.Image`, it si simply returned without modifications.
Args:
image (`ImageInput`):
Image to convert. | github-repos |
def find_all(self, model_class, params={}):
url = '{host}/{namespace}/{model}{params}'.format(
host=self._host,
namespace=self._namespace,
model=self._translate_name(model_class.__name__),
params=self._build_param_string(params)
)
data = self._get_json(url)['data']
fresh_models = []
for item in data:
fresh_model = model_class(item['attributes'])
fresh_model.id = item['id']
fresh_model.validate()
fresh_models.append(fresh_model)
if self._cache is not None:
self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model)
return fresh_models | Return an list of models from the API and caches the result.
Args:
model_class (:class:`cinder_data.model.CinderModel`): A subclass of
:class:`cinder_data.model.CinderModel` of your chosen model.
params (dict, optional): Description
Returns:
list: A list of instances of you model_class or and empty list. | juraj-google-style |
def reset(self, state):
state = _convert_to_state_tensor(state)
state.shape.assert_is_compatible_with([_get_state_size(self.algorithm)])
self._state_var.assign(state) | Resets the generator by a new state.
See `__init__` for the meaning of "state".
Args:
state: the new state. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.