code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def forward(self, g_values: torch.Tensor, mask: torch.Tensor, labels: Optional[torch.Tensor]=None, loss_batch_weight=1, return_dict=False) -> BayesianWatermarkDetectorModelOutput:
likelihoods_watermarked = self.likelihood_model_watermarked(g_values)
likelihoods_unwatermarked = 0.5 * torch.ones_like(g_values)
out = self._compute_posterior(likelihoods_watermarked=likelihoods_watermarked, likelihoods_unwatermarked=likelihoods_unwatermarked, mask=mask, prior=self.prior)
loss = None
if labels is not None:
loss_fct = BCELoss()
loss_unwweight = torch.sum(self.likelihood_model_watermarked.delta ** 2)
loss_weight = loss_unwweight * loss_batch_weight
loss = loss_fct(torch.clamp(out, 1e-05, 1 - 1e-05), labels) + loss_weight
if not return_dict:
return (out,) if loss is None else (out, loss)
return BayesianWatermarkDetectorModelOutput(loss=loss, posterior_probabilities=out) | Computes the watermarked posterior P(watermarked|g_values).
Args:
g_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth, ...)`):
g-values (with values 0 or 1)
mask:
A binary array shape [batch_size, seq_len] indicating which g-values should be used. g-values with mask
value 0 are discarded.
Returns:
p(watermarked | g_values), of shape [batch_size]. | github-repos |
def _prepare_babi_data(tmp_dir, data_dir):
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
file_path = os.path.join(tmp_dir, _TAR)
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/63.0.3239.132 Safari/537.36"}
resp = requests.get(_URL, headers=headers)
with open(file_path, "wb") as f:
f.write(resp.content)
tar = tarfile.open(file_path)
tar.extractall(tmp_dir)
tar.close()
return tmp_dir | Downloads and extracts the dataset.
Args:
tmp_dir: temp directory to download and extract the dataset
data_dir: The base directory where data and vocab files are stored.
Returns:
tmp_dir: temp directory containing the raw data. | juraj-google-style |
def extractUnits(self, inp):
inp = self._preprocess(inp)
units = []
description = ""
for w in inp.split(' '):
if self.isValidUnit(w) or w == '/':
if description:
description += " "
description += w
else:
if description:
units.append(description)
description = ""
if description:
units.append(description)
return units | Collects all the valid units from an inp string. Works by
appending consecutive words from the string and cross-referncing
them with a set of valid units.
Args:
inp (str): Some text which hopefully contains descriptions
of different units.
Returns:
A list of strings, each entry in which is a valid quantities
unit. | juraj-google-style |
def get_servo_position(self):
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(CALIBRATED_POSITION_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
if (self.servomodel==0x06) or (self.servomodel == 0x04):
return ((ord(rxdata[10])&0xff)<<8) | (ord(rxdata[9])&0xFF)
else:
return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)
except HerkulexError:
print "Could not read from the servos. Check connection" | Gets the current position of Herkulex
Args:
none
Returns:
int: position of the servo- 0 to 1023
Raises:
SerialException: Error occured while opening serial port | juraj-google-style |
def broadcast_recv(shape, dtype, group_size, group_key, instance_key, communication_hint='auto', timeout=0):
if group_size <= 1:
raise ValueError(f'Parameter `group_size` to broadcast_send must be at least 2. Received: {group_size}.')
return gen_collective_ops.collective_bcast_recv(shape=shape, T=dtype, group_size=group_size, group_key=group_key, instance_key=instance_key, communication_hint=communication_hint.lower(), timeout_seconds=timeout) | Receives a broadcasts tensor, across devices.
Args:
shape: Shape of the tensor to be received.
dtype: Type of the tensor to be received.
group_size: one plus the number of receiving tensors, i.e. the total
number of devices participating. Each tensor must reside on a
different device.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: If set to a non zero, set a completion timeout to detect staleness.
If the timer goes off, a DeadlineExceededError is raised.
The timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the broadcast receive.
Raises:
ValueError: if any of the input parameter constraints are not met. | github-repos |
def make_full_document(text, title=None, preamp_decl={}, preamb_extra=None):
import utool as ut
doc_preamb = ut.codeblock('\n %\\documentclass{article}\n \\documentclass[10pt,twocolumn,letterpaper]{article}\n % \\usepackage[utf8]{inputenc}\n \\usepackage[T1]{fontenc}\n\n \\usepackage{times}\n \\usepackage{epsfig}\n \\usepackage{graphicx}\n \\usepackage{amsmath,amsthm,amssymb}\n \\usepackage[usenames,dvipsnames,svgnames,table]{xcolor}\n \\usepackage{multirow}\n \\usepackage{subcaption}\n \\usepackage{booktabs}\n\n %\\pagenumbering{gobble}\n ')
if (preamb_extra is not None):
if isinstance(preamb_extra, (list, tuple)):
preamb_extra = '\n'.join(preamb_extra)
doc_preamb += (('\n' + preamb_extra) + '\n')
if (title is not None):
preamp_decl['title'] = title
decl_lines = ['\\{key}{{{val}}}'.format(key=key, val=val) for (key, val) in preamp_decl.items()]
doc_decllines = '\n'.join(decl_lines)
doc_header = ut.codeblock('\n \\begin{document}\n ')
if (preamp_decl.get('title') is not None):
doc_header += '\\maketitle'
doc_footer = ut.codeblock('\n \\end{document}\n ')
text_ = '\n'.join((doc_preamb, doc_decllines, doc_header, text, doc_footer))
return text_ | r"""
dummy preamble and document to wrap around latex fragment
Args:
text (str):
title (str):
Returns:
str: text_
CommandLine:
python -m utool.util_latex --test-make_full_document
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> text = 'foo'
>>> title = 'title'
>>> preamp_decl = {}
>>> text_ = make_full_document(text, title)
>>> result = str(text_)
>>> print(result) | codesearchnet |
def define_batch_env(constructor, num_agents, env_processes):
with tf.variable_scope('environments'):
if env_processes:
envs = [tools.wrappers.ExternalProcess(constructor) for _ in range(num_agents)]
else:
envs = [constructor() for _ in range(num_agents)]
batch_env = tools.BatchEnv(envs, blocking=(not env_processes))
batch_env = tools.InGraphBatchEnv(batch_env)
return batch_env | Create environments and apply all desired wrappers.
Args:
constructor: Constructor of an OpenAI gym environment.
num_agents: Number of environments to combine in the batch.
env_processes: Whether to step environment in external processes.
Returns:
In-graph environments object. | codesearchnet |
def _load_methods(package):
global _methods
_methods[package] = None
from acorn.config import settings
from acorn.logging.descriptors import _obj_getattr
spack = settings(package)
if spack is not None:
if spack.has_section("analysis.methods"):
_methods[package] = {}
from importlib import import_module
mappings = dict(spack.items("analysis.methods"))
for fqdn, target in mappings.items():
rootname = target.split('.')[0]
root = import_module(rootname)
caller = _obj_getattr(root, target)
_methods[package][fqdn] = caller | Loads the mappings from method call result to analysis.
Args:
package (str): name of the package to load for. | juraj-google-style |
def _binary_2d_label_to_1d_sparse_value(labels):
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
if indices != [[i] for i in range(len(labels))]:
raise ValueError('Expected 1 label/example, got %s.' % indices)
shape = [len(labels)]
return sparse_tensor.SparseTensorValue(np.array(indices, np.int64), np.array(values, np.int64), np.array(shape, np.int64)) | Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes]. Each
row must contain exactly 1 `1` value.
Returns:
`SparseTensorValue` of shape [batch_size]. Values are indices of `1` values
along the last dimension of `labels`.
Raises:
ValueError: if there is not exactly 1 `1` value per row of `labels`. | github-repos |
def allreduce_ring(xs, devices, reduction_fn_string="SUM"):
n = len(xs)
if len(devices) != n:
raise ValueError("devices must be a list of length len(xs)")
if n == 1:
return xs
shape = xs[0].shape.as_list()
size = None if None in shape else mtf.list_product(shape)
if size is None or size < 1024 or size % n != 0:
return allreduce_ring_single_shard(xs, devices, reduction_fn_string)
def _circular_shift(l, n):
n %= len(l)
return l[-n:] + l[:-n]
def _flatten_and_split(x):
return tf.split(tf.reshape(x, [-1]), n)
def _concat_and_reshape(xs):
return tf.reshape(tf.concat(xs, 0), shape)
x_split = mtf.parallel(devices, _flatten_and_split, xs)
x_split_t = mtf.transpose_list_of_lists(x_split)
y_split_t = []
for shard in xrange(n):
shard_xs = _circular_shift(x_split_t[shard], shard)
shard_devices = _circular_shift(devices, shard)
shard_ys = allreduce_ring_single_shard(
shard_xs, shard_devices, reduction_fn_string)
y_split_t.append(_circular_shift(shard_ys, -shard))
y_split = mtf.transpose_list_of_lists(y_split_t)
ys = mtf.parallel(devices, _concat_and_reshape, y_split)
return ys | Compute the reduction of all Tensors and put the result everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of strings
reduction_fn_string: "SUM" or "MAX"
Returns:
a list of n Tensors
Raises:
ValueError: if devices is not a list of n strings | juraj-google-style |
def to_view(self, view_name):
from . import _view
return _view.View(view_name, self._context).create(self._sql) | Create a View from this Query.
Args:
view_name: the name of the View either as a string or a 3-part tuple
(projectid, datasetid, name).
Returns:
A View for the Query. | juraj-google-style |
def __init__(self, type, data):
if not isinstance(type, int):
raise TypeError("ext type is not type integer")
elif sys.version_info[0] == 3 and not isinstance(data, bytes):
raise TypeError("ext data is not type \'bytes\'")
elif sys.version_info[0] == 2 and not isinstance(data, str):
raise TypeError("ext data is not type \'str\'")
self.type = type
self.data = data | Construct a new Ext object.
Args:
type: application-defined type integer
data: application-defined data byte array
Example:
>>> foo = umsgpack.Ext(0x05, b"\x01\x02\x03")
>>> umsgpack.packb({u"special stuff": foo, u"awesome": True})
'\x82\xa7awesome\xc3\xadspecial stuff\xc7\x03\x05\x01\x02\x03'
>>> bar = umsgpack.unpackb(_)
>>> print(bar["special stuff"])
Ext Object (Type: 0x05, Data: 01 02 03)
>>> | juraj-google-style |
def __init__(
self, summary=AverageSummary, alpha=1,
credibility=WeightedCredibility, reviewer=Reviewer, product=Product):
self.alpha = alpha
self.graph = nx.DiGraph()
self.reviewers = []
self.products = []
self._summary_cls = summary
self._review_cls = summary.review_class()
self.credibility = credibility(self)
self._reviewer_cls = reviewer
self._product_cls = product | Construct bipartite graph.
Args:
summary_type: specify summary type class, default value is AverageSummary.
alpha: used to compute weight of anomalous scores, default value is 1.
credibility: credibility class to be used in this graph.
(Default: WeightedCredibility)
reviewer: Class of reviewers.
product: Class of products. | juraj-google-style |
def take_grad(self, num_required, name=None):
return gen_data_flow_ops.sparse_accumulator_take_gradient(self._accumulator_ref, num_required, dtype=self._dtype, name=name) | Attempts to extract the average gradient from the accumulator.
The operation blocks until sufficient number of gradients have been
successfully applied to the accumulator.
Once successful, the following actions are also triggered:
- Counter of accumulated gradients is reset to 0.
- Aggregated gradient is reset to 0 tensor.
- Accumulator's internal time step is incremented by 1.
Args:
num_required: Number of gradients that needs to have been aggregated
name: Optional name for the operation
Returns:
A tuple of indices, values, and shape representing the average gradient.
Raises:
InvalidArgumentError: If `num_required` < 1 | github-repos |
def set_keras_mask(x, mask):
set_tensor_attr(x, '_keras_mask', mask) | Sets the Keras mask attribute for the given tensor in-place.
Args:
x: Input tensor.
mask: The mask tensor to be set. If `None`, the `_keras_mask` attribute
will be cleared. | github-repos |
def _CreateCommentsFromPrefix(comment_prefix, comment_lineno, comment_column, standalone=False):
comments = []
lines = comment_prefix.split('\n')
index = 0
while index < len(lines):
comment_block = []
while index < len(lines) and lines[index].lstrip().startswith('
comment_block.append(lines[index].strip())
index += 1
if comment_block:
new_lineno = comment_lineno + index - 1
comment_block[0] = comment_block[0].strip()
comment_block[-1] = comment_block[-1].strip()
comment_leaf = pytree.Leaf(type=token.COMMENT, value='\n'.join(comment_block), context=('', (new_lineno, comment_column)))
comment_node = comment_leaf if not standalone else pytree.Node(pygram.python_symbols.simple_stmt, [comment_leaf])
comments.append(comment_node)
while index < len(lines) and (not lines[index].lstrip()):
index += 1
return comments | Create pytree nodes to represent the given comment prefix.
Args:
comment_prefix: (unicode) the text of the comment from the node's prefix.
comment_lineno: (int) the line number for the start of the comment.
comment_column: (int) the column for the start of the comment.
standalone: (bool) determines if the comment is standalone or not.
Returns:
The simple_stmt nodes if this is a standalone comment, otherwise a list of
new COMMENT leafs. The prefix may consist of multiple comment blocks,
separated by blank lines. Each block gets its own leaf. | github-repos |
def set_symbols(self, symbols, functional=None, sym_potcar_map=None):
del self[:]
if sym_potcar_map:
for el in symbols:
self.append(PotcarSingle(sym_potcar_map[el]))
else:
for el in symbols:
p = PotcarSingle.from_symbol_and_functional(el, functional)
self.append(p) | Initialize the POTCAR from a set of symbols. Currently, the POTCARs can
be fetched from a location specified in .pmgrc.yaml. Use pmg config
to add this setting.
Args:
symbols ([str]): A list of element symbols
functional (str): The functional to use. If None, the setting
PMG_DEFAULT_FUNCTIONAL in .pmgrc.yaml is used, or if this is
not set, it will default to PBE.
sym_potcar_map (dict): A map of symbol:raw POTCAR string. If
sym_potcar_map is specified, POTCARs will be generated from
the given map data rather than the config file location. | codesearchnet |
def from_schema(cls, schema: class_schema.Schema, module_name: str, name: str, qualname: Optional[str]=None, is_method: bool=True) -> 'Signature':
arg_names = list(schema.metadata.get('init_arg_list', []))
if arg_names and arg_names[-1].startswith('*'):
vararg_name = arg_names[-1][1:]
arg_names.pop(-1)
else:
vararg_name = None
def get_arg_spec(arg_name):
field = schema.get_field(arg_name)
if not field:
raise ValueError(f'Argument {arg_name!r} is not a symbolic field.')
return field.value
args = []
if is_method:
args.append(Argument.from_annotation('self', Argument.Kind.POSITIONAL_OR_KEYWORD))
args.extend([Argument(n, Argument.Kind.POSITIONAL_OR_KEYWORD, get_arg_spec(n)) for n in arg_names])
varargs = None
if vararg_name:
varargs = Argument(vararg_name, Argument.Kind.VAR_POSITIONAL, get_arg_spec(vararg_name))
existing_names = set(arg_names)
if vararg_name:
existing_names.add(vararg_name)
kwonlyargs = []
varkw = None
for key, field in schema.fields.items():
if key not in existing_names and (not field.frozen):
if key.is_const:
kwonlyargs.append(Argument(str(key), Argument.Kind.KEYWORD_ONLY, field.value))
else:
varkw = Argument(schema.metadata.get('varkw_name', None) or 'kwargs', Argument.Kind.VAR_KEYWORD, class_schema.ValueSpec.DictType(field.value))
return Signature(callable_type=CallableType.FUNCTION, name=name, module_name=module_name, qualname=qualname, description=schema.description, args=args, kwonlyargs=kwonlyargs, varargs=varargs, varkw=varkw, return_value=schema.metadata.get('returns', None)) | Creates a signature from a schema object.
Args:
schema: A `pg.typing.Schema` object associated with a `pg.Object`.
module_name: Module name for the signature.
name: Function or method name of the signature.
qualname: Qualname of the signature.
is_method: If True, `self` will be added in the signature as the first
argument.
Returns:
A signature object from the schema. | github-repos |
def index(self, text, terms=None, **kwargs):
self.clear()
terms = terms or text.terms.keys()
pairs = combinations(terms, 2)
count = comb(len(terms), 2)
for t1, t2 in bar(pairs, expected_size=count, every=1000):
score = text.score_braycurtis(t1, t2, **kwargs)
self.set_pair(t1, t2, score) | Index all term pair distances.
Args:
text (Text): The source text.
terms (list): Terms to index. | juraj-google-style |
def assert_call(self, expected, left, right):
name_map = {left: 'left', right: 'right'}
node, result = self._is_instance.call(self._node, None, function.Args((left, right), self.new_dict(), None, None))
self.assertIn(node, self._node.outgoing)
result_map = {}
for b in result.bindings:
terms = set()
for o in b.origins:
self.assertEqual(node, o.where)
for sources in o.source_sets:
terms.add(' '.join(sorted(('%s:%d' % (name_map[b.variable], b.variable.bindings.index(b)) for b in sources))))
result_map[b.data] = terms
self.assertEqual(expected, result_map) | Check that call() returned the desired results.
Args:
expected: A dict from values to source sets, where a source set is
represented by the sorted binding names separated by spaces, for example
"left:0 right:1" would indicate binding #0 of variable "left" and
binding #1 of variable "right".
left: A Variable to use as the first arg to call().
right: A Variable to use as the second arg to call(). | github-repos |
def upper_bound(fm, nr_subs=None, scale_factor=1):
nr_subs_total = len(np.unique(fm.SUBJECTINDEX))
if (not nr_subs):
nr_subs = (nr_subs_total - 1)
assert (nr_subs < nr_subs_total)
intersub_scores = []
for measure in range(len(measures.scores)):
res_dict = {}
result_vectors = [(np.empty(nr_subs_total) + np.nan) for _ in np.unique(fm.category)]
res_dict.update(list(zip(np.unique(fm.category), result_vectors)))
intersub_scores.append(res_dict)
for fm_cat in fm.by_field('category'):
cat = fm_cat.category[0]
for (sub_counter, sub) in enumerate(np.unique(fm_cat.SUBJECTINDEX)):
image_scores = []
for fm_single in fm_cat.by_field('filenumber'):
predicting_subs = np.setdiff1d(np.unique(fm_single.SUBJECTINDEX), [sub])
np.random.shuffle(predicting_subs)
predicting_subs = predicting_subs[0:nr_subs]
predicting_fm = fm_single[ismember(fm_single.SUBJECTINDEX, predicting_subs)]
predicted_fm = fm_single[(fm_single.SUBJECTINDEX == sub)]
try:
predicting_fdm = compute_fdm(predicting_fm, scale_factor=scale_factor)
except RuntimeError:
predicting_fdm = None
image_scores.append(measures.prediction_scores(predicting_fdm, predicted_fm))
for (measure, score) in enumerate(nanmean(image_scores, 0)):
intersub_scores[measure][cat][sub_counter] = score
return intersub_scores | compute the inter-subject consistency upper bound for a fixmat.
Input:
fm : a fixmat instance
nr_subs : the number of subjects used for the prediction. Defaults
to the total number of subjects in the fixmat minus 1
scale_factor : the scale factor of the FDMs. Default is 1.
Returns:
A list of scores; the list contains one dictionary for each measure.
Each dictionary contains one key for each category and corresponding
values is an array with scores for each subject. | codesearchnet |
def parse_line(self, line):
toks = shlex.split(line)
return (toks[0], ([] if (len(toks) == 1) else toks[1:])) | Parse a line of input.
The input line is tokenized using the same rules as the way bash shell
tokenizes inputs. All quoting and escaping rules from the bash shell
apply here too.
The following cases are handled by __exec_line__():
1. Empty line.
2. The input line is completely made of whitespace characters.
3. The input line is the EOF character.
4. The first token, as tokenized by shlex.split(), is '!'.
5. Internal commands, i.e., commands registered with internal =
True
Arguments:
The line to parse.
Returns:
A tuple (cmd, args). The first element cmd must be a python3 string.
The second element is, by default, a list of strings representing
the arguments, as tokenized by shlex.split().
How to overload parse_line():
1. The signature of the method must be the same.
2. The return value must be a tuple (cmd, args), where the cmd is
a string representing the first token, and args is a list of
strings. | codesearchnet |
def __get_favorites(self, favorite_type, start=0, max_items=100):
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse([
('ObjectID',
'FV:2' if favorite_type is SONOS_FAVORITES
else 'R:0/{0}'.format(favorite_type)),
('BrowseFlag', 'BrowseDirectChildren'),
('Filter', '*'),
('StartingIndex', start),
('RequestedCount', max_items),
('SortCriteria', '')
])
result = {}
favorites = []
results_xml = response['Result']
if results_xml != '':
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'
if favorite_type == RADIO_SHOWS else
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'):
favorite = {}
favorite['title'] = item.findtext(
'{http:
favorite['uri'] = item.findtext(
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res')
if favorite_type == SONOS_FAVORITES:
favorite['meta'] = item.findtext(
'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD')
favorites.append(favorite)
result['total'] = response['TotalMatches']
result['returned'] = len(favorites)
result['favorites'] = favorites
return result | Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return. | juraj-google-style |
def Contains(self, value):
self._awql = self._CreateSingleValueCondition(value, 'CONTAINS')
return self._query_builder | Sets the type of the WHERE clause as "contains".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to. | codesearchnet |
def ToDebugString(self, indentation_level=1):
indentation = ' ' * indentation_level
text_parts = ['{0:s}path segment index: {1:d}\n'.format(
indentation, self.path_segment_index)]
for path_segment, scan_object in self._path_segments.items():
text_parts.append('{0:s}path segment: {1:s}\n'.format(
indentation, path_segment))
if isinstance(scan_object, PathFilterScanTreeNode):
text_parts.append('{0:s}scan tree node:\n'.format(indentation))
text_parts.append(scan_object.ToDebugString(indentation_level + 1))
elif isinstance(scan_object, py2to3.STRING_TYPES):
text_parts.append('{0:s}path: {1:s}\n'.format(
indentation, scan_object))
text_parts.append('{0:s}default value:\n'.format(indentation))
if isinstance(self.default_value, PathFilterScanTreeNode):
text_parts.append('{0:s}scan tree node:\n'.format(indentation))
text_parts.append(self.default_value.ToDebugString(indentation_level + 1))
elif isinstance(self.default_value, py2to3.STRING_TYPES):
text_parts.append('{0:s}pattern: {1:s}\n'.format(
indentation, self.default_value))
text_parts.append('\n')
return ''.join(text_parts) | Converts the path filter scan tree node into a debug string.
Args:
indentation_level: an integer containing the text indentation level.
Returns:
A string containing a debug representation of the path filter scan
tree node. | juraj-google-style |
def path_is_ignored(self, path):
try:
self.run('check-ignore', '--quiet', path)
except CommandError as e:
if (e.retcode == 1):
return False
else:
raise e
return True | Given a path, check if the path would be ignored.
Returns:
boolean | codesearchnet |
def file_content(self, file_content, update_if_exists=True):
if not self.can_update():
self._tcex.handle_error(910, [self.type])
self._data['fileContent'] = file_content
return self.tc_requests.upload(
self.api_type,
self.api_sub_type,
self.unique_id,
file_content,
update_if_exists=update_if_exists,
) | Updates the file content.
Args:
file_content: The file_content to upload.
update_if_exists:
Returns: | juraj-google-style |
def list(name, default=None, allow_none=False, fallback=None, separator=','):
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.list):
return value
elif isinstance(value, builtins.str):
return _str_to_list(value, separator)
elif value is None and allow_none:
return None
else:
return [builtins.str(value)] | Get a list of strings or the default.
The individual list elements are whitespace-stripped.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
separator: The list item separator character or pattern | juraj-google-style |
def get_metric_by_name(self, metric_name, **kwargs):
return self._get_object_by_name(self._METRIC_ENDPOINT_SUFFIX,
metric_name,
**kwargs) | get a metric by name
Args:
metric_name (string): name of metric
Returns:
dictionary of response | juraj-google-style |
def format_level_0_memory(memory):
formatted_memory = _list_to_complex_array(memory)
if not 2 <= len(formatted_memory.shape) <= 3:
raise QiskitError('Level zero memory is not of correct shape.')
return formatted_memory | Format an experiment result memory object for measurement level 0.
Args:
memory (list): Memory from experiment with `meas_level==1`. `avg` or
`single` will be inferred from shape of result memory.
Returns:
np.ndarray: Measurement level 0 complex numpy array
Raises:
QiskitError: If the returned numpy array does not have 2 (avg) or 3 (single)
indicies. | juraj-google-style |
def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val, penalty_val, learning_rate_val):
step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val, self.eig_num_iter_placeholder: eig_num_iter_val, self.smooth_placeholder: smooth_val, self.penalty_placeholder: penalty_val, self.learning_rate: learning_rate_val}
if (self.params['eig_type'] == 'SCIPY'):
(current_eig_vector, self.current_eig_val_estimate) = self.get_scipy_eig_vec()
step_feed_dict.update({self.eig_vec_estimate: current_eig_vector})
elif (self.params['eig_type'] == 'LZS'):
step_feed_dict.update({self.dual_object.m_min_vec_ph: self.dual_object.m_min_vec_estimate})
self.sess.run(self.train_step, feed_dict=step_feed_dict)
[_, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate] = self.sess.run([self.proj_step, self.eig_vec_estimate, self.eig_val_estimate], feed_dict=step_feed_dict)
if ((self.current_step % self.params['print_stats_steps']) == 0):
[self.current_total_objective, self.current_unconstrained_objective, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate, self.current_nu] = self.sess.run([self.total_objective, self.dual_object.unconstrained_objective, self.eig_vec_estimate, self.eig_val_estimate, self.dual_object.nu], feed_dict=step_feed_dict)
stats = {'total_objective': float(self.current_total_objective), 'unconstrained_objective': float(self.current_unconstrained_objective), 'min_eig_val_estimate': float(self.current_eig_val_estimate)}
tf.logging.info('Current inner step: %d, optimization stats: %s', self.current_step, stats)
if (self.params['stats_folder'] is not None):
stats = json.dumps(stats)
filename = os.path.join(self.params['stats_folder'], (str(self.current_step) + '.json'))
with tf.gfile.Open(filename) as file_f:
file_f.write(stats)
if (((self.current_step % self.params['projection_steps']) == 0) and (self.current_unconstrained_objective < 0)):
nu = self.sess.run(self.dual_object.nu)
dual_feed_dict = {self.dual_object.h_min_vec_ph: self.dual_object.h_min_vec_estimate}
(_, min_eig_val_h_lz) = self.dual_object.get_lanczos_eig(compute_m=False, feed_dict=dual_feed_dict)
projected_dual_feed_dict = {self.dual_object.projected_dual.nu: nu, self.dual_object.projected_dual.min_eig_val_h: min_eig_val_h_lz}
if self.dual_object.projected_dual.compute_certificate(self.current_step, projected_dual_feed_dict):
return True
return False | Run one step of gradient descent for optimization.
Args:
eig_init_vec_val: Start value for eigen value computations
eig_num_iter_val: Number of iterations to run for eigen computations
smooth_val: Value of smoothness parameter
penalty_val: Value of penalty for the current step
learning_rate_val: Value of learning rate
Returns:
found_cert: True is negative certificate is found, False otherwise | codesearchnet |
def print_error_messages_raylet(task_error_queue, threads_stopped):
while True:
if threads_stopped.is_set():
return
try:
(error, t) = task_error_queue.get(block=False)
except queue.Empty:
threads_stopped.wait(timeout=0.01)
continue
while ((t + UNCAUGHT_ERROR_GRACE_PERIOD) > time.time()):
threads_stopped.wait(timeout=1)
if threads_stopped.is_set():
break
if (t < (last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD)):
logger.debug('Suppressing error from worker: {}'.format(error))
else:
logger.error('Possible unhandled error from worker: {}'.format(error)) | Prints message received in the given output queue.
This checks periodically if any un-raised errors occured in the background.
Args:
task_error_queue (queue.Queue): A queue used to receive errors from the
thread that listens to Redis.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit. | codesearchnet |
def to_hdf5(ramon, hdf5=None):
if issubclass(type(ramon), RAMONBase) is False:
raise InvalidRAMONError("Invalid RAMON supplied to ramon.to_hdf5.")
import h5py
import numpy
if hdf5 is None:
tmpfile = tempfile.NamedTemporaryFile(delete=False)
else:
tmpfile = hdf5
with h5py.File(tmpfile.name, "a") as hdf5:
grp = hdf5.create_group(str(ramon.id))
grp.create_dataset("ANNOTATION_TYPE", (1,),
numpy.uint32,
data=AnnotationType.get_int(type(ramon)))
if hasattr(ramon, 'cutout'):
if ramon.cutout is not None:
grp.create_dataset('CUTOUT', ramon.cutout.shape,
ramon.cutout.dtype, data=ramon.cutout)
grp.create_dataset('RESOLUTION', (1,),
numpy.uint32, data=ramon.resolution)
grp.create_dataset('XYZOFFSET', (3,),
numpy.uint32, data=ramon.xyz_offset)
metadata = grp.create_group('METADATA')
metadata.create_dataset('AUTHOR', (1,),
dtype=h5py.special_dtype(vlen=str),
data=ramon.author)
fstring = StringIO()
csvw = csv.writer(fstring, delimiter=',')
csvw.writerows([r for r in six.iteritems(ramon.kvpairs)])
metadata.create_dataset('KVPAIRS', (1,),
dtype=h5py.special_dtype(vlen=str),
data=fstring.getvalue())
metadata.create_dataset('CONFIDENCE', (1,), numpy.float,
data=ramon.confidence)
metadata.create_dataset('STATUS', (1,), numpy.uint32,
data=ramon.status)
if hasattr(ramon, 'segments'):
metadata.create_dataset('SEGMENTS',
data=numpy.asarray(ramon.segments,
dtype=numpy.uint32))
if hasattr(ramon, 'synapse_type'):
metadata.create_dataset('SYNAPSE_TYPE', (1,), numpy.uint32,
data=ramon.synapse_type)
if hasattr(ramon, 'weight'):
metadata.create_dataset('WEIGHT', (1,),
numpy.float, data=ramon.weight)
if hasattr(ramon, 'neuron'):
metadata.create_dataset('NEURON', (1,),
numpy.uint32, data=ramon.neuron)
if hasattr(ramon, 'segmentclass'):
metadata.create_dataset('SEGMENTCLASS', (1,), numpy.uint32,
data=ramon.segmentclass)
if hasattr(ramon, 'synapses'):
metadata.create_dataset('SYNAPSES', (len(ramon.synapses),),
numpy.uint32, data=ramon.synapses)
if hasattr(ramon, 'organelles'):
metadata.create_dataset('ORGANELLES',
(len(ramon.organelles),),
numpy.uint32,
data=ramon.organelles)
if hasattr(ramon, 'organelle_class'):
metadata.create_dataset('ORGANELLECLASS', (1,),
numpy.uint32,
data=ramon.organelle_class)
hdf5.flush()
tmpfile.seek(0)
return tmpfile
return False | Exports a RAMON object to an HDF5 file object.
Arguments:
ramon (RAMON): A subclass of RAMONBase
hdf5 (str): Export filename
Returns:
hdf5.File
Raises:
InvalidRAMONError: if you pass a non-RAMON object | juraj-google-style |
def sync_entities(*model_objs):
if sync_entities.defer:
if not model_objs:
sync_entities.buffer[None] = None
else:
for model_obj in model_objs:
sync_entities.buffer[(model_obj.__class__, model_obj.pk)] = model_obj
return False
EntitySyncer(*model_objs).sync() | Syncs entities
Args:
model_objs (List[Model]): The model objects to sync. If empty, all entities will be synced | juraj-google-style |
def __or__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.logical_or, tf.bool) | Returns a TensorFluent for the or logical operator.
Args:
self: The first operand.
other: The second operand.
Returns:
A TensorFluent wrapping the operator's output. | juraj-google-style |
def wiki_2x2_base():
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.shared_embedding_and_softmax_weights = False
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
hparams.batch_size = 32
hparams.learning_rate_schedule = 'rsqrt_decay'
hparams.mesh_shape = 'all:8'
hparams.layout = 'batch:all;experts:all'
moe.set_default_moe_hparams(hparams)
hparams.moe_num_experts = 16
hparams.moe_hidden_size = 8192
hparams.decoder_layers = (['att', 'drd'] * 6)
hparams.d_model = 1024
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.num_heads = 4
return hparams | Set of architectural experiments - language model on wikipedia on a 2x2.
1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!
Returns:
a hparams | codesearchnet |
class DetaHungarianMatcher(nn.Module):
def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1):
super().__init__()
requires_backends(self, ['scipy'])
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
if class_cost == 0 and bbox_cost == 0 and (giou_cost == 0):
raise ValueError("All costs of the Matcher can't be 0")
@torch.no_grad()
def forward(self, outputs, targets):
batch_size, num_queries = outputs['logits'].shape[:2]
out_prob = outputs['logits'].flatten(0, 1).sigmoid()
out_bbox = outputs['pred_boxes'].flatten(0, 1)
target_ids = torch.cat([v['class_labels'] for v in targets])
target_bbox = torch.cat([v['boxes'] for v in targets])
alpha = 0.25
gamma = 2.0
neg_cost_class = (1 - alpha) * out_prob ** gamma * -(1 - out_prob + 1e-08).log()
pos_cost_class = alpha * (1 - out_prob) ** gamma * -(out_prob + 1e-08).log()
class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids]
bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
sizes = [len(v['boxes']) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] | This class computes an assignment between the targets and the predictions of the network.
For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more
predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
Args:
class_cost:
The relative weight of the classification error in the matching cost.
bbox_cost:
The relative weight of the L1 error of the bounding box coordinates in the matching cost.
giou_cost:
The relative weight of the giou loss of the bounding box in the matching cost. | github-repos |
def __init__(self, variable_name, inferred_type):
variable_name = ensure_unicode_string(variable_name)
super(Variable, self).__init__(variable_name, inferred_type)
self.variable_name = variable_name
self.inferred_type = inferred_type
self.validate() | Construct a new Variable object for the given variable name.
Args:
variable_name: string, should start with '$' and then obey variable naming rules
(see validate_safe_string())
inferred_type: GraphQL type object, specifying the inferred type of the variable
Returns:
new Variable object | juraj-google-style |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3):
if (kmip_version < enums.KMIPVersion.KMIP_1_3):
raise exceptions.VersionNotSupported('KMIP {} does not support the ProfileInformation object.'.format(kmip_version.value))
super(ProfileInformation, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.PROFILE_NAME, local_buffer):
profile_name = primitives.Enumeration(enums.ProfileName, tag=enums.Tags.PROFILE_NAME)
profile_name.read(local_buffer, kmip_version=kmip_version)
self._profile_name = profile_name
else:
raise exceptions.InvalidKmipEncoding('The ProfileInformation encoding is missing the profile name.')
if self.is_tag_next(enums.Tags.SERVER_URI, local_buffer):
server_uri = primitives.TextString(tag=enums.Tags.SERVER_URI)
server_uri.read(local_buffer, kmip_version=kmip_version)
self._server_uri = server_uri
if self.is_tag_next(enums.Tags.SERVER_PORT, local_buffer):
server_port = primitives.Integer(tag=enums.Tags.SERVER_PORT)
server_port.read(local_buffer, kmip_version=kmip_version)
self._server_port = server_port
self.is_oversized(local_buffer) | Read the data encoding the ProfileInformation structure and decode it
into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the profile name is missing from
the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the ProfileInformation structure. | codesearchnet |
def ReadClientMetadata(self, client_id):
result = self.MultiReadClientMetadata([client_id])
try:
return result[client_id]
except KeyError:
raise UnknownClientError(client_id) | Reads the ClientMetadata record for a single client.
Args:
client_id: A GRR client id string, e.g. "C.ea3b2b71840d6fa7".
Returns:
An rdfvalues.object.ClientMetadata object.
Raises:
UnknownClientError: if no client with corresponding id was found. | codesearchnet |
def _format_device(var):
if var.dtype.name.endswith('_ref'):
resource_var_annotation = '(legacy)'
else:
resource_var_annotation = '(resource)'
if var.device:
return '{} {}'.format(var.device, resource_var_annotation)
else:
return resource_var_annotation | Returns the device with an annotation specifying `ResourceVariable`.
"legacy" means a normal tf.Variable while "resource" means a ResourceVariable.
For example:
`(legacy)`
`(resource)`
`/job:learner/task:0/device:CPU:* (legacy)`
`/job:learner/task:0/device:CPU:* (resource)`
Args:
var: The Tensorflow Variable to print. | codesearchnet |
def rename_dimension(x, old_name, new_name):
return reshape(x, x.shape.rename_dimension(old_name, new_name)) | Reshape a Tensor, renaming one dimension.
Args:
x: a Tensor
old_name: a string
new_name: a string
Returns:
a Tensor | codesearchnet |
def absolute_name(self, depth=0):
node, node_depth = self, self.depth
if depth < 1:
depth = node_depth
while node_depth > depth and node.package is not None:
node = node.package
node_depth -= 1
names = []
while node is not None:
names.append(node.name)
node = node.package
return '.'.join(reversed(names)) | Return the absolute name of the node.
Concatenate names from root to self within depth.
Args:
depth (int): maximum depth to go to.
Returns:
str: absolute name of the node (until given depth is reached). | juraj-google-style |
def economic_qs(K, epsilon=sqrt(finfo(float).eps)):
(S, Q) = eigh(K)
nok = (abs(max(Q[0].min(), Q[0].max(), key=abs)) < epsilon)
nok = (nok and (abs(max(K.min(), K.max(), key=abs)) >= epsilon))
if nok:
from scipy.linalg import eigh as sp_eigh
(S, Q) = sp_eigh(K)
ok = (S >= epsilon)
nok = logical_not(ok)
S0 = S[ok]
Q0 = Q[(:, ok)]
Q1 = Q[(:, nok)]
return ((Q0, Q1), S0) | r"""Economic eigen decomposition for symmetric matrices.
A symmetric matrix ``K`` can be decomposed in
:math:`\mathrm Q_0 \mathrm S_0 \mathrm Q_0^\intercal + \mathrm Q_1\
\mathrm S_1 \mathrm Q_1^ \intercal`, where :math:`\mathrm S_1` is a zero
matrix with size determined by ``K``'s rank deficiency.
Args:
K (array_like): Symmetric matrix.
epsilon (float): Eigen value threshold. Default is
``sqrt(finfo(float).eps)``.
Returns:
tuple: ``((Q0, Q1), S0)``. | codesearchnet |
def generate_sample_set(self, tags=None):
if isinstance(tags, str):
tags = [tags]
md5_list = self.data_store.tag_match(tags)
return self.store_sample_set(md5_list) | Generate a sample_set that maches the tags or all if tags are not specified.
Args:
tags: Match samples against this tag list (or all if not specified)
Returns:
The sample_set of those samples matching the tags | codesearchnet |
def get_rbounds(step):
if step.geom is not None:
rcmb = step.geom.rcmb
else:
rcmb = step.sdat.par['geometry']['r_cmb']
if step.sdat.par['geometry']['shape'].lower() == 'cartesian':
rcmb = 0
rcmb = max(rcmb, 0)
return rcmb, rcmb + 1 | Radial or vertical position of boundaries.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of floats: radial or vertical positions of boundaries of the
domain. | juraj-google-style |
def ws004c(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `ws004c`'.format(value))
self._ws004c = value | Corresponds to IDD Field `ws004c`
Args:
value (float): value for IDD Field `ws004c`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def read_cs_g0_contribution(self):
header_pattern = '^\\s+G\\=0 CONTRIBUTION TO CHEMICAL SHIFT \\(field along BDIR\\)\\s+$\\n^\\s+-{50,}$\\n^\\s+BDIR\\s+X\\s+Y\\s+Z\\s*$\\n^\\s+-{50,}\\s*$\\n'
row_pattern = ('(?:\\d+)\\s+' + '\\s+'.join((['([-]?\\d+\\.\\d+)'] * 3)))
footer_pattern = '\\s+-{50,}\\s*$'
self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float, last_one_only=True, attribute_name='cs_g0_contribution') | Parse the G0 contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list. | codesearchnet |
def get_atoms(structure, **kwargs):
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True,
cell=cell, **kwargs) | Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
**kwargs: other keyword args to pass into the ASE Atoms constructor
Returns:
ASE Atoms object | juraj-google-style |
def get_redirect(paths):
if isinstance(paths, str):
paths = [paths]
for path in paths:
url, permanent = get_alias(path)
if url:
return redirect(url, 301 if permanent else 302)
url, permanent = current_app.get_path_regex(path)
if url:
return redirect(url, 301 if permanent else 302)
return None | Get a redirect from a path or list of paths
Arguments:
paths -- either a single path string, or a list of paths to check
Returns: a flask.redirect() result | juraj-google-style |
def _recover_shape_information(self, inputs, outputs):
batch_size_value = inputs.get_shape()[0]
if self._data_format.startswith('NC'):
output_shape_value = ((batch_size_value, self.output_channels) + self.output_shape)
elif (self._data_format.startswith('N') and self._data_format.endswith('C')):
output_shape_value = (((batch_size_value,) + self.output_shape) + (self.output_channels,))
outputs.set_shape(output_shape_value)
return outputs | Recover output tensor shape value to enable shape inference.
The batch size of `inputs` isn't preserved by the convolution op. Calculate
what the proper output shape will be for `outputs`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
outputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`. The output of `inputs` from a transpose
convolution op.
Returns:
outputs: The passed-in `outputs` with all shape information filled in. | codesearchnet |
def to_string(cls, error_code):
if error_code == cls.ZONE_NOT_FOUND_ERROR:
return 'Zone not found'
return super(JLinkWriteErrors, cls).to_string(error_code) | Returns the string message for the given ``error_code``.
Args:
cls (JLinkWriteErrors): the ``JLinkWriteErrors`` class
error_code (int): error code to convert
Returns:
An error string corresponding to the error code.
Raises:
ValueError: if the error code is invalid. | juraj-google-style |
def GetPreviousNonBlankLine(clean_lines, linenum):
prevlinenum = (linenum - 1)
while (prevlinenum >= 0):
prevline = clean_lines.elided[prevlinenum]
if (not IsBlankLine(prevline)):
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', (- 1)) | Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line. | codesearchnet |
def process(self, body, url=None, sig=None):
self.request = RequestBody()
self.response = ResponseBody()
self.request.parse(body)
app_id = self.request.session.application.application_id
stamp = self.request.request.timestamp
if (not self.valid.request(app_id, body, stamp, url, sig)):
return False
self.pass_session_attributes()
self.dispatch()
if (self.request.request.type == 'SessionEndedRequest'):
self.terminate()
return self.response.to_json() | Process request body given skill logic.
To validate a request, both, url and sig are required.
Attributes received through body will be automatically added to the
response.
Args:
body: str. HTTP request body.
url: str. SignatureCertChainUrl header value sent by request.
PEM-encoded X.509 certificate chain that Alexa used to sign the
message.
sig: str. Signature header value sent by request. Base64-encoded
signature of the request body.
Return:
str or bool: HTTP response body or False if the request is invalid. | codesearchnet |
def segment_to_vector(self, seg):
ft_dict = {ft: val for (val, ft) in self.fts(seg)}
return [ft_dict[name] for name in self.names] | Given a Unicode IPA segment, return a list of feature specificiations
in cannonical order.
Args:
seg (unicode): IPA consonant or vowel
Returns:
list: feature specifications ('+'/'-'/'0') in the order from
`FeatureTable.names` | juraj-google-style |
def from_maybe_serialized(source: Union[Any, str], value_type: Optional[Type[Any]]=None) -> Any:
if isinstance(source, str):
if source.endswith('.json'):
value = symbolic.load(source)
else:
value = symbolic.from_json_str(source)
else:
value = source
if value_type is not None and (not isinstance(value, value_type)):
raise TypeError(f'Loaded value {value!r} is not an instance of {value_type!r}.')
return value | Load value from maybe serialized form (e.g. JSON file or JSON string).
Args:
source: Source of value. It can be value (non-string type) itself, or a
filepath, or a JSON string from where the value will be loaded.
value_type: An optional type to constrain the value.
Returns:
Value from source. | github-repos |
def _CalculateHashDataStream(self, file_entry, data_stream_name):
hash_context = hashlib.sha256()
try:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
except IOError as exception:
logging.warning((
'Unable to open path specification:\n{0:s}'
'with error: {1!s}').format(
file_entry.path_spec.comparable, exception))
return None
if not file_object:
return None
try:
data = file_object.read(self._READ_BUFFER_SIZE)
while data:
hash_context.update(data)
data = file_object.read(self._READ_BUFFER_SIZE)
except IOError as exception:
logging.warning((
'Unable to read from path specification:\n{0:s}'
'with error: {1!s}').format(
file_entry.path_spec.comparable, exception))
return None
finally:
file_object.close()
return hash_context.hexdigest() | Calculates a message digest hash of the data of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): name of the data stream.
Returns:
bytes: digest hash or None. | juraj-google-style |
def set_dataset_date(self, dataset_date, dataset_end_date=None, date_format=None):
parsed_date = self._parse_date(dataset_date, date_format)
if (dataset_end_date is None):
self.set_dataset_date_from_datetime(parsed_date)
else:
parsed_end_date = self._parse_date(dataset_end_date, date_format)
self.set_dataset_date_from_datetime(parsed_date, parsed_end_date) | Set dataset date from string using specified format. If no format is supplied, the function will guess.
For unambiguous formats, this should be fine.
Args:
dataset_date (str): Dataset date string
dataset_end_date (Optional[str]): Dataset end date string
date_format (Optional[str]): Date format. If None is given, will attempt to guess. Defaults to None.
Returns:
None | codesearchnet |
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: Optional[bool]=False) -> Tuple[tf.Tensor]:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs | Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. | github-repos |
def past_stop_threshold(stop_threshold, eval_metric):
if stop_threshold is None:
return False
if not isinstance(stop_threshold, numbers.Number):
raise ValueError("Threshold for checking stop conditions must be a number.")
if not isinstance(eval_metric, numbers.Number):
raise ValueError("Eval metric being checked against stop conditions "
"must be a number.")
if eval_metric >= stop_threshold:
tf.logging.info(
"Stop threshold of {} was passed with metric value {}.".format(
stop_threshold, eval_metric))
return True
return False | Return a boolean representing whether a model should be stopped.
Args:
stop_threshold: float, the threshold above which a model should stop
training.
eval_metric: float, the current value of the relevant metric to check.
Returns:
True if training should stop, False otherwise.
Raises:
ValueError: if either stop_threshold or eval_metric is not a number | juraj-google-style |
def __init__(self, value_set_codes_table: Optional[bigquery.TableReference]=None, value_set_codes_definitions: Optional[fhir_package.FhirPackageManager]=None) -> None:
self._value_set_codes_table = value_set_codes_table
self._value_set_codes_definitions = value_set_codes_definitions
self._use_resource_alias = None | Creates a BigQuerySqlInterpreter.
Args:
value_set_codes_table: The name of the database table containing value set
code definitions. Used when building SQL for memberOf expressions. If
given, value set definitions needed for memberOf expressions will be
retrieved from this table if they can not be found in
`value_set_codes_definitions`. If neither this nor
`value_set_codes_definitions` is given, no memberOf SQL will be
generated.
value_set_codes_definitions: A package manager containing value set
definitions which can be used to build SQL for memberOf expressions.
These value set definitions can be consulted in favor of using an
external `value_set_codes_table`. If neither this nor
`value_set_codes_definitions` is given, no memberOf SQL will be
generated. | github-repos |
def get(self, blocking=True):
if self.closed:
raise PoolAlreadyClosedError('Connection pool is already closed.')
if (not self.limiter.acquire(blocking=blocking)):
return None
c = None
try:
c = self.idle_conns.pop()
except IndexError:
try:
c = self.connect_func()
except Exception:
self.limiter.release()
raise
return _ConnectionProxy(self, c) | Gets a connection.
Args:
blocking: Whether to block when max_size connections are already in use.
If false, may return None.
Returns:
A connection to the database.
Raises:
PoolAlreadyClosedError: if close() method was already called on
this pool. | codesearchnet |
def create_multiple_expectations(df, columns, expectation_type, *args, **kwargs):
expectation = getattr(df, expectation_type)
results = list()
for column in columns:
results.append(expectation(column, *args, **kwargs))
return results | Creates an identical expectation for each of the given columns with the specified arguments, if any.
Args:
df (great_expectations.dataset): A great expectations dataset object.
columns (list): A list of column names represented as strings.
expectation_type (string): The expectation type.
Raises:
KeyError if the provided column does not exist.
AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset.
Returns:
A list of expectation results. | codesearchnet |
def load_model_from_hdf5(filepath, custom_objects=None, compile=True):
if h5py is None:
raise ImportError('`load_model` requires h5py.')
if not custom_objects:
custom_objects = {}
opened_new_file = not isinstance(filepath, h5py.File)
if opened_new_file:
f = h5py.File(filepath, mode='r')
else:
f = filepath
model = None
try:
model_config = f.attrs.get('model_config')
if model_config is None:
raise ValueError('No model found in config file.')
if hasattr(model_config, 'decode'):
model_config = model_config.decode('utf-8')
model_config = json_utils.decode(model_config)
model = model_config_lib.model_from_config(model_config, custom_objects=custom_objects)
load_weights_from_hdf5_group(f['model_weights'], model.layers)
if compile:
training_config = f.attrs.get('training_config')
if hasattr(training_config, 'decode'):
training_config = training_config.decode('utf-8')
if training_config is None:
logging.warning('No training configuration found in the save file, so the model was *not* compiled. Compile it manually.')
return model
training_config = json_utils.decode(training_config)
model.compile(**saving_utils.compile_args_from_training_config(training_config, custom_objects), from_serialized=True)
saving_utils.try_build_compiled_arguments(model)
if 'optimizer_weights' in f:
try:
model.optimizer._create_all_weights(model.trainable_variables)
except (NotImplementedError, AttributeError):
logging.warning('Error when creating the weights of optimizer {}, making it impossible to restore the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.')
optimizer_weight_values = load_optimizer_weights_from_hdf5_group(f)
try:
model.optimizer.set_weights(optimizer_weight_values)
except ValueError:
logging.warning('Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.')
finally:
if opened_new_file:
f.close()
return model | Loads a model saved via `save_model_to_hdf5`.
Args:
filepath: One of the following:
- String, path to the saved model
- `h5py.File` object from which to load the model
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
compile: Boolean, whether to compile the model
after loading.
Returns:
A Keras model instance. If an optimizer was found
as part of the saved model, the model is already
compiled. Otherwise, the model is uncompiled and
a warning will be displayed. When `compile` is set
to False, the compilation is omitted without any
warning.
Raises:
ImportError: if h5py is not available.
ValueError: In case of an invalid savefile. | github-repos |
def _setup_transitions(tdef, states, prev=()):
trs = list(prev)
for transition in tdef:
if (len(transition) == 3):
(name, source, target) = transition
if (is_string(source) or isinstance(source, State)):
source = [source]
source = [states[src] for src in source]
target = states[target]
tr = Transition(name, source, target)
else:
raise TypeError(("Elements of the 'transition' attribute of a workflow should be three-tuples; got %r instead." % (transition,)))
if any(((prev_tr.name == tr.name) for prev_tr in trs)):
trs = [(tr if (prev_tr.name == tr.name) else prev_tr) for prev_tr in trs]
else:
trs.append(tr)
return TransitionList(trs) | Create a TransitionList object from a 'transitions' Workflow attribute.
Args:
tdef: list of transition definitions
states (StateList): already parsed state definitions.
prev (TransitionList): transition definitions from a parent.
Returns:
TransitionList: the list of transitions defined in the 'tdef' argument. | codesearchnet |
def find_next_punctuation(text: str, start_idx=0):
for i in range(start_idx, len(text)):
if text[i] in ['.', '?', '!', '\n']:
return i
return None | Find the index of the next punctuation mark.
Args:
text (`str`):
String to examine
start_idx (`int`, *optional*)
Index where to start | github-repos |
def from_sub_model_configs(cls, semantic_config: BarkSemanticConfig, coarse_acoustics_config: BarkCoarseConfig, fine_acoustics_config: BarkFineConfig, codec_config: PretrainedConfig, **kwargs):
return cls(semantic_config=semantic_config.to_dict(), coarse_acoustics_config=coarse_acoustics_config.to_dict(), fine_acoustics_config=fine_acoustics_config.to_dict(), codec_config=codec_config.to_dict(), **kwargs) | Instantiate a [`BarkConfig`] (or a derived class) from bark sub-models configuration.
Returns:
[`BarkConfig`]: An instance of a configuration object | github-repos |
def _FormatHostname(self, event):
hostname = self._output_mediator.GetHostname(event)
return self._SanitizeField(hostname) | Formats the hostname.
Args:
event (EventObject): event.
Returns:
str: formatted hostname field. | juraj-google-style |
def _replace_deferred(self, arg, context):
if isinstance(arg, UnboundVariable):
return context[arg]
elif isinstance(arg, _DeferredLayer):
return arg._construct(context)
elif isinstance(arg, tuple):
return tuple((self._replace_deferred(x, context) for x in arg))
elif (isinstance(arg, collections.Sequence) and
not isinstance(arg, six.string_types)):
return [self._replace_deferred(x, context) for x in arg]
elif isinstance(arg, collections.Mapping):
return {k: self._replace_deferred(v, context)
for k, v in six.iteritems(arg)}
else:
return arg | This replaces all deferred nodes (UnboundVariables and _DeferredLayers).
If arg is a sequence or a dict, then it's deferred values are also replaced.
Args:
arg: The argument to replace. If a list or a dict, then all items are also
replaced.
context: The context for this replacement.
Returns:
The replaced values or arg if it is not a deferred node. | juraj-google-style |
def from_moy(cls, moy, leap_year=False):
if not leap_year:
num_of_minutes_until_month = (0, 44640, 84960, 129600, 172800, 217440,
260640, 305280, 349920, 393120, 437760,
480960, 525600)
else:
num_of_minutes_until_month = (0, 44640, 84960 + 1440, 129600 + 1440,
172800 + 1440, 217440 + 1440, 260640 + 1440,
305280 + 1440, 349920 + 1440, 393120 + 1440,
437760 + 1440, 480960 + 1440, 525600 + 1440)
for monthCount in range(12):
if int(moy) < num_of_minutes_until_month[monthCount + 1]:
month = monthCount + 1
break
try:
day = int((moy - num_of_minutes_until_month[month - 1]) / (60 * 24)) + 1
except UnboundLocalError:
raise ValueError(
"moy must be positive and smaller than 525600. Invalid input %d" % (moy)
)
else:
hour = int((moy / 60) % 24)
minute = int(moy % 60)
return cls(month, day, hour, minute, leap_year) | Create Ladybug Datetime from a minute of the year.
Args:
moy: An integer value 0 <= and < 525600 | juraj-google-style |
def _get_config_instance(group_or_term, session, **kwargs):
path = group_or_term._get_path()
cached = group_or_term._top._cached_configs.get(path)
if cached:
config = cached
created = False
else:
(config, created) = get_or_create(session, Config, **kwargs)
return (config, created) | Finds appropriate config instance and returns it.
Args:
group_or_term (Group or Term):
session (Sqlalchemy session):
kwargs (dict): kwargs to pass to get_or_create.
Returns:
tuple of (Config, bool): | codesearchnet |
def intersects(self, rect, edges=False):
if ((self.bottom > rect.top) or (self.top < rect.bottom) or (self.left > rect.right) or (self.right < rect.left)):
return False
if (not edges):
if ((self.bottom == rect.top) or (self.top == rect.bottom) or (self.left == rect.right) or (self.right == rect.left)):
return False
if (((self.left == rect.right) and (self.bottom == rect.top)) or ((self.left == rect.right) and (rect.bottom == self.top)) or ((rect.left == self.right) and (self.bottom == rect.top)) or ((rect.left == self.right) and (rect.bottom == self.top))):
return False
return True | Detect intersections between this rectangle and rect.
Args:
rect (Rectangle): Rectangle to test for intersections.
edges (bool): Accept edge touching rectangles as intersects or not
Returns:
bool: True if the rectangles intersect, False otherwise | codesearchnet |
def DeregisterMountPoint(cls, mount_point):
if mount_point not in cls._mount_points:
raise KeyError('Mount point: {0:s} not set.'.format(mount_point))
del cls._mount_points[mount_point] | Deregisters a path specification mount point.
Args:
mount_point (str): mount point identifier.
Raises:
KeyError: if the corresponding mount point is not set. | juraj-google-style |
def path_get_origin(p: tcod.path.AStar) -> Tuple[(int, int)]:
x = ffi.new('int *')
y = ffi.new('int *')
lib.TCOD_path_get_origin(p._path_c, x, y)
return (x[0], y[0]) | Get the current origin position.
This point moves when :any:`path_walk` returns the next x,y step.
Args:
p (AStar): An AStar instance.
Returns:
Tuple[int, int]: An (x, y) point. | codesearchnet |
def check_required_tags_compliance(self, resource):
missing_tags = []
notes = []
resource_tags = {tag.key.lower(): tag.value for tag in resource.tags}
if resource.resource_type in self.alert_schedule:
target_accounts = self.alert_schedule[resource.resource_type]['scope']
else:
target_accounts = self.alert_schedule['*']['scope']
if not (resource.account.account_name in target_accounts or '*' in target_accounts):
return missing_tags, notes
if self.audit_ignore_tag.lower() in resource_tags:
return missing_tags, notes
required_tags = list(self.required_tags)
if self.gdpr_enabled and resource.account.account_name in self.gdpr_accounts:
required_tags.append(self.gdpr_tag)
for key in [tag.lower() for tag in required_tags]:
if key not in resource_tags:
missing_tags.append(key)
elif not self.validate_tag(key, resource_tags[key]):
missing_tags.append(key)
notes.append('{} tag is not valid'.format(key))
return missing_tags, notes | Check whether a resource is compliance
Args:
resource: A single resource
Returns:
`(list, list)`
A tuple contains missing tags (if there were any) and notes | juraj-google-style |
def is_date(v) -> (bool, date):
if isinstance(v, date):
return True, v
try:
reg = r'^([0-9]{4})(?:-(0[1-9]|1[0-2])(?:-(0[1-9]|[1-2][0-9]|3[0-1])(?:T' \
r'([0-5][0-9])(?::([0-5][0-9])(?::([0-5][0-9]))?)?)?)?)?$'
match = re.match(reg, v)
if match:
groups = match.groups()
patterns = ['%Y', '%m', '%d', '%H', '%M', '%S']
d = datetime.strptime('-'.join([x for x in groups if x]),
'-'.join([patterns[i] for i in range(len(patterns)) if groups[i]]))
return True, d
except:
pass
return False, v | Boolean function for checking if v is a date
Args:
v:
Returns: bool | juraj-google-style |
def clinvar_export(store, institute_id, case_name, variant_id):
(institute_obj, case_obj) = institute_and_case(store, institute_id, case_name)
pinned = [(store.variant(variant_id) or variant_id) for variant_id in case_obj.get('suspects', [])]
variant_obj = store.variant(variant_id)
return dict(today=str(date.today()), institute=institute_obj, case=case_obj, variant=variant_obj, pinned_vars=pinned) | Gather the required data for creating the clinvar submission form
Args:
store(scout.adapter.MongoAdapter)
institute_id(str): Institute ID
case_name(str): case ID
variant_id(str): variant._id
Returns:
a dictionary with all the required data (case and variant level) to pre-fill in fields in the clinvar submission form | codesearchnet |
class ZoeDepthPreActResidualLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.use_batch_norm = config.use_batch_norm_in_fusion_residual
use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm
self.activation1 = nn.ReLU()
self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)
self.activation2 = nn.ReLU()
self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)
if self.use_batch_norm:
self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size, eps=config.batch_norm_eps)
self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size, eps=config.batch_norm_eps)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
residual = hidden_state
hidden_state = self.activation1(hidden_state)
hidden_state = self.convolution1(hidden_state)
if self.use_batch_norm:
hidden_state = self.batch_norm1(hidden_state)
hidden_state = self.activation2(hidden_state)
hidden_state = self.convolution2(hidden_state)
if self.use_batch_norm:
hidden_state = self.batch_norm2(hidden_state)
return hidden_state + residual | ResidualConvUnit, pre-activate residual unit.
Args:
config (`[ZoeDepthConfig]`):
Model configuration class defining the model architecture. | github-repos |
def send(url, data):
validate(data)
return requests.post(url, json=data) | Sends an incoming message
Args:
url(str): the incoming hook url
data(dict): the sending data
Returns:
requests.Response | juraj-google-style |
def _open_streaming_interface(self, connection_id, callback):
try:
context = self.connections.get_context(connection_id)
except ArgumentError:
callback(connection_id, self.id, False, "Could not find connection information")
return
self._logger.info("Attempting to enable streaming")
self.connections.begin_operation(connection_id, 'open_interface', callback, self.get_config('default_timeout'))
try:
characteristic = context['services'][TileBusService][StreamingChar]
except KeyError:
self.connections.finish_operation(
connection_id,
False,
"Can't find characteristic to open streaming interface"
)
return
context['parser'] = IOTileReportParser(report_callback=self._on_report, error_callback=self._on_report_error)
context['parser'].context = connection_id
def on_report_chunk_received(report_chunk):
context['parser'].add_data(report_chunk)
self._register_notification_callback(
context['connection_handle'],
characteristic.value_handle,
on_report_chunk_received
)
self.bable.set_notification(
enabled=True,
connection_handle=context['connection_handle'],
characteristic=characteristic,
on_notification_set=[self._on_interface_opened, context],
on_notification_received=self._on_notification_received,
timeout=1.0,
sync=False
) | Enable streaming interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason) | juraj-google-style |
async def set_typing(self, typing=hangouts_pb2.TYPING_TYPE_STARTED):
try:
await self._client.set_typing(
hangouts_pb2.SetTypingRequest(
request_header=self._client.get_request_header(),
conversation_id=hangouts_pb2.ConversationId(id=self.id_),
type=typing,
)
)
except exceptions.NetworkError as e:
logger.warning('Failed to set typing status: {}'.format(e))
raise | Set your typing status in this conversation.
Args:
typing: (optional) ``TYPING_TYPE_STARTED``, ``TYPING_TYPE_PAUSED``,
or ``TYPING_TYPE_STOPPED`` to start, pause, or stop typing,
respectively. Defaults to ``TYPING_TYPE_STARTED``.
Raises:
.NetworkError: If typing status cannot be set. | juraj-google-style |
def universal_transformer_basic(layer_inputs, step, hparams, ffn_unit, attention_unit):
(state, inputs, memory) = tf.unstack(layer_inputs, num=None, axis=0, name='unstack')
new_state = step_preprocess(state, step, hparams)
for i in range(hparams.num_inrecurrence_layers):
with tf.variable_scope(('rec_layer_%d' % i)):
new_state = ffn_unit(attention_unit(new_state))
return (new_state, inputs, memory) | Basic Universal Transformer.
This model is pretty similar to the vanilla transformer in which weights are
shared between layers. For some tasks, this simple idea brings a
generalization that is not achievable by playing with the size of the model
or drop_out parameters in the vanilla transformer.
Args:
layer_inputs:
- state: state
step: indicates number of steps taken so far
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
layer_output:
new_state: new state | codesearchnet |
def __call__(self, inputs, state, scope=None):
if scope is not None:
with vs.variable_scope(scope, custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
scope_attrname = 'rnncell_scope'
scope = getattr(self, scope_attrname, None)
if scope is None:
scope = vs.variable_scope(vs.get_variable_scope(), custom_getter=self._rnn_get_variable)
setattr(self, scope_attrname, scope)
with scope:
return super(RNNCell, self).__call__(inputs, state) | Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple with
shapes `[batch_size, s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`. | github-repos |
def _parse_header(self, data):
(magic, word_size, byte_order, version, osabi, abi_version, _), data = \
unpack('4sBBBBB7s', data[:16]), data[16:]
assert magic == self._ELF_MAGIC, 'Missing ELF magic'
assert word_size in (1, 2), 'Invalid word size'
assert byte_order in (1, 2), 'Invalid byte order'
assert version == 1, 'Invalid version'
self.osabi = self.OSABI(osabi)
self.abi_version = abi_version
endian = Target.Endian(byte_order - 1)
(type_, machine, version), data = unpack('HHI', data[:8], endian=endian), data[8:]
try:
self.type = self.Type(type_)
except ValueError:
self.type = self.Type.unknown
try:
self.machine = ELF.Machine(machine)
except ValueError:
self.machine = ELF.Machine.unknown
assert version == 1, 'Invalid version'
if self.machine is ELF.Machine.i386:
arch = Target.Arch.x86
assert word_size == 1, 'Unexpected ELF64 for machine type x86'
assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86'
elif self.machine is ELF.Machine.x86_64:
arch = Target.Arch.x86
assert word_size == 2, 'Unexpected ELF32 for machine type x64_64'
assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86'
elif self.machine is ELF.Machine.arm:
arch = Target.Arch.arm
assert word_size == 1, 'Unexpected ELF64 for machine type arm'
elif self.machine is ELF.Machine.aarch64:
arch = Target.Arch.arm
assert word_size == 2, 'Unexpected ELF32 for machine type aarch64'
else:
arch = Target.Arch.unknown
self.arch = arch
self.bits = 32 * word_size
self.endian = endian
if self.bits == 32:
fmt = 'IIIIHHHHHH'
else:
fmt = 'QQQIHHHHHH'
fmt_size = pack_size(fmt)
(self.entry, self.phoff, self.shoff, self.flags, self.hsize, self.phentsize,
self.phnum, self.shentsize, self.shnum, self.shstrndx) = \
unpack(fmt, data[:fmt_size], target=self) | Parse the ELF header in ``data`` and populate the properties.
Args:
data(bytes): The ELF header. | juraj-google-style |
def _prepare_init_params_from_job_description(cls, job_details):
init_params = dict()
init_params['model_name'] = job_details['ModelName']
init_params['instance_count'] = job_details['TransformResources']['InstanceCount']
init_params['instance_type'] = job_details['TransformResources']['InstanceType']
init_params['volume_kms_key'] = job_details['TransformResources'].get('VolumeKmsKeyId')
init_params['strategy'] = job_details.get('BatchStrategy')
init_params['assemble_with'] = job_details['TransformOutput'].get('AssembleWith')
init_params['output_path'] = job_details['TransformOutput']['S3OutputPath']
init_params['output_kms_key'] = job_details['TransformOutput'].get('KmsKeyId')
init_params['accept'] = job_details['TransformOutput'].get('Accept')
init_params['max_concurrent_transforms'] = job_details.get('MaxConcurrentTransforms')
init_params['max_payload'] = job_details.get('MaxPayloadInMB')
init_params['base_transform_job_name'] = job_details['TransformJobName']
return init_params | Convert the transform job description to init params that can be handled by the class constructor
Args:
job_details (dict): the returned job details from a describe_transform_job API call.
Returns:
dict: The transformed init_params | codesearchnet |
def __init__(self, name=None, settings=None, instruments=None, scripts=None, log_function=None, data_path=None):
QObject.__init__(self)
self._script_class = self.__class__.__name__
if name is None:
name = self.__class__.__name__
self.name = name
self._instruments = {}
if instruments is None:
instruments = {}
else:
assert isinstance(instruments, dict)
assert set(self._INSTRUMENTS.keys()) <= set(instruments.keys())
self.data_path = data_path
self.instruments = {key: instruments[key] for key in list(self._INSTRUMENTS.keys())}
self._scripts = {}
if scripts is None:
scripts = {}
self.scripts = scripts
self.start_time = datetime.datetime.now()
self.end_time = self.start_time - datetime.timedelta(seconds=1)
self._settings = deepcopy(Parameter(self._DEFAULT_SETTINGS + Script._DEFAULT_SETTINGS))
self._settings.update({'tag':self.name.lower()})
if settings is not None:
self.update(settings)
self._abort = False
self.is_running = False
self.data = {}
self.log_data = deque()
self.log_function = log_function
self._plot_refresh = True
self.progress = None
self._current_subscript_stage = {
'current_subscript': None,
'subscript_exec_count':{},
'subscript_exec_duration':{}
} | executes scripts and stores script parameters and settings
Args:
name (optional): name of script, if not provided take name of function
settings (optional): a Parameter object that contains all the information needed in the script
instruments (optional): instruments used in the script
scripts (optional): sub_scripts used in the script
log_function(optional): function reference that takes a string | juraj-google-style |
def stop_standing_subprocess(proc):
logging.debug('Stopping standing subprocess %d', proc.pid)
_kill_process_tree(proc)
if proc.stdout:
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
proc.wait()
logging.debug('Stopped standing subprocess %d', proc.pid) | Stops a subprocess started by start_standing_subprocess.
Before killing the process, we check if the process is running, if it has
terminated, Error is raised.
Catches and ignores the PermissionError which only happens on Macs.
Args:
proc: Subprocess to terminate.
Raises:
Error: if the subprocess could not be stopped. | github-repos |
def __init__(self, name='', declarations=None):
scopedef.scopedef_t.__init__(self, name)
if not declarations:
declarations = []
self._declarations = declarations | Creates an object that describes a C++ namespace declaration.
Args:
name (str): name of the namespace
declarations (list[declaration_t]): list of declarations | juraj-google-style |
def type_decisioner(marc_xml, mono_callback, multimono_callback, periodical_callback):
marc_xml = _read_content_or_path(marc_xml)
record = MARCXMLRecord(marc_xml)
if (record.is_monographic or record.is_single_unit):
return mono_callback()
elif record.is_multi_mono:
return multimono_callback()
elif record.is_continuing:
return periodical_callback()
raise ValueError("Can't identify type of the `marc_xml`!") | Detect type of the `marc_xml`. Call proper callback.
Args:
marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
mono_callback (fn reference): Callback in case of monographic
publications.
multimono_callback (fn reference): Callback used in case of
multi-monographic publications.
periodical_callback (fn reference): Callback used in case of periodical
publications.
Returns:
obj: Content returned by the callback.
Raises:
ValueError: In case that type couldn't be detected. | codesearchnet |
def _deserialize_audience(audience_map):
for audience in audience_map.values():
condition_structure, condition_list = condition_helper.loads(audience.conditions)
audience.__dict__.update({
'conditionStructure': condition_structure,
'conditionList': condition_list
})
return audience_map | Helper method to de-serialize and populate audience map with the condition list and structure.
Args:
audience_map: Dict mapping audience ID to audience object.
Returns:
Dict additionally consisting of condition list and structure on every audience object. | juraj-google-style |
def call_servo(examples, serving_bundle):
parsed_url = urlparse('http:
channel = implementations.insecure_channel(parsed_url.hostname,
parsed_url.port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
if serving_bundle.use_predict:
request = predict_pb2.PredictRequest()
elif serving_bundle.model_type == 'classification':
request = classification_pb2.ClassificationRequest()
else:
request = regression_pb2.RegressionRequest()
request.model_spec.name = serving_bundle.model_name
if serving_bundle.model_version is not None:
request.model_spec.version.value = serving_bundle.model_version
if serving_bundle.signature is not None:
request.model_spec.signature_name = serving_bundle.signature
if serving_bundle.use_predict:
request.inputs[serving_bundle.predict_input_tensor].CopyFrom(
tf.compat.v1.make_tensor_proto(
values=[ex.SerializeToString() for ex in examples],
dtype=types_pb2.DT_STRING))
else:
request.input.example_list.examples.extend(examples)
if serving_bundle.use_predict:
return common_utils.convert_predict_response(
stub.Predict(request, 30.0), serving_bundle)
elif serving_bundle.model_type == 'classification':
return stub.Classify(request, 30.0)
else:
return stub.Regress(request, 30.0) | Send an RPC request to the Servomatic prediction service.
Args:
examples: A list of examples that matches the model spec.
serving_bundle: A `ServingBundle` object that contains the information to
make the serving request.
Returns:
A ClassificationResponse or RegressionResponse proto. | juraj-google-style |
def whatIfOrder(self, contract: Contract, order: Order) -> OrderState:
return self._run(self.whatIfOrderAsync(contract, order)) | Retrieve commission and margin impact without actually
placing the order. The given order will not be modified in any way.
This method is blocking.
Args:
contract: Contract to test.
order: Order to test. | juraj-google-style |
def to_ast(self):
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = '\n ag__.ConversionOptions(\n recursive=recursive_val,\n user_requested=user_requested_val,\n optional_features=optional_features_val,\n internal_convert_user_code=internal_convert_user_code_val)\n '
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(('ag__.{}'.format(str(v)) for v in values))))
expr_ast = templates.replace(template, recursive_val=parser.parse_expression(str(self.recursive)), user_requested_val=parser.parse_expression(str(self.user_requested)), internal_convert_user_code_val=parser.parse_expression(str(self.internal_convert_user_code)), optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value | Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Returns:
ast.Node | github-repos |
def GetFeatureService(self, itemId, returnURLOnly=False):
admin = None
item = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
if (self._securityHandler.valid == False):
self._valid = self._securityHandler.valid
self._message = self._securityHandler.message
return None
item = admin.content.getItem(itemId=itemId)
if (item.type == 'Feature Service'):
if returnURLOnly:
return item.url
else:
fs = arcrest.agol.FeatureService(url=item.url, securityHandler=self._securityHandler)
if ((fs.layers is None) or (len(fs.layers) == 0)):
fs = arcrest.ags.FeatureService(url=item.url)
return fs
return None
except:
(line, filename, synerror) = trace()
raise common.ArcRestHelperError({'function': 'GetFeatureService', 'line': line, 'filename': filename, 'synerror': synerror})
finally:
admin = None
item = None
del item
del admin
gc.collect() | Obtains a feature service by item ID.
Args:
itemId (str): The feature service's item ID.
returnURLOnly (bool): A boolean value to return the URL of the feature service. Defaults to ``False``.
Returns:
When ``returnURLOnly`` is ``True``, the URL of the feature service is returned.
When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. | codesearchnet |
def limits(self, clip_negative=True):
min, max = dtype_range[self.as_numpy_dtype]
if clip_negative:
min = 0
return min, max | Return intensity limits, i.e. (min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
min, max : tuple
Lower and upper intensity limits. | juraj-google-style |
def EnablePlugins(self, plugin_includes):
super(SyslogParser, self).EnablePlugins(plugin_includes)
self._plugin_by_reporter = {}
for plugin in self._plugins:
self._plugin_by_reporter[plugin.REPORTER] = plugin | Enables parser plugins.
Args:
plugin_includes (list[str]): names of the plugins to enable, where None
or an empty list represents all plugins. Note that the default plugin
is handled separately. | juraj-google-style |
def find_elements_by_name(self, name, update=False) -> Elements:
return self.find_elements(by=By.NAME, value=name, update=update) | Finds multiple elements by name.
Args:
name: The name of the elements to be found.
update: If the interface has changed, this option should be True.
Returns:
A list with elements if any was found. An empty list if not.
Raises:
NoSuchElementException - If the element wasn't found.
Usage:
elements = driver.find_elements_by_name('foo') | codesearchnet |
def _parse_description(self, config):
value = None
match = re.search('description (.+)$', config, re.M)
if match:
value = match.group(1)
return dict(description=value) | Scans the specified config block and returns the description value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the description value retrieved
from the config block. If the description value is not
configured, None is returned as the value. The returned dict
is intended to be merged into the interface resource dict. | codesearchnet |
def __init__(self, num_buckets: int, lower: float, upper: float):
if num_buckets < 2:
raise ValueError(f'num_buckets is {num_buckets}, must be at least 2 for simulated quantization.')
self.num_buckets = num_buckets
self.lower = lower
self.upper = upper | Simulated quantizaiton configuration.
Args:
num_buckets: The number of quantization buckets, must be atleast 2.
lower: The lower bound for the quantization range.
upper: The upper bound for the quantization range.
Returns:
`QuantizationConfig`.
Raises:
ValueError: if `num_buckets` is less than 2. | github-repos |
def set_config(self, key, value):
keyname = "config:" + key
self.kvstore.set(keyname, value) | Set a persistent config key to a value, stored in the registry
Args:
key (string): The key name
value (string): The key value | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.