code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def check_config(config):
for section, expected_section_keys in SECTION_KEYS.items():
section_content = config.get(section)
if not section_content:
raise ConfigurationError("Config file badly formed! Section {} is missing."
.format(section))
elif not _section_is_healthy(section_content, expected_section_keys):
raise ConfigurationError("The {} section of the configuration file is badly formed!"
.format(section)) | Check that all sections of the config contain the keys that they should.
Args:
config (defaultdict): A defaultdict.
Raises:
ConfigurationError | juraj-google-style |
def get_builtin_code_from_operator_code(opcode):
if hasattr(opcode, 'BuiltinCode') and callable(opcode.BuiltinCode):
return max(opcode.BuiltinCode(), opcode.DeprecatedBuiltinCode())
return max(opcode.builtinCode, opcode.deprecatedBuiltinCode) | Return the builtin code of the given operator code.
The following method is introduced to resolve op builtin code shortage
problem. The new builtin operator will be assigned to the extended builtin
code field in the flatbuffer schema. Those methods helps to hide builtin code
details.
Args:
opcode: Operator code.
Returns:
The builtin code of the given operator code. | github-repos |
def get(self, identifier, default=None):
split_label = (tuple(identifier.split('.')) if isinstance(identifier, str) else tuple(identifier))
if (len(split_label) == 1):
identifier = split_label[0]
return self.__dict__.get(identifier, default)
path_item = self
for identifier in split_label:
if ((path_item == default) or (path_item is None)):
return default
path_item = path_item.get(identifier, default)
return path_item | Get a node of the AttrTree using its path string.
Args:
identifier: Path string of the node to return
default: Value to return if no node is found
Returns:
The indexed node of the AttrTree | codesearchnet |
def element_creator(namespace=None):
ELEMENT_MAKER = _objectify.ElementMaker(namespace=namespace, annotate=False)
def create_elem(tag, attr=None, text=None):
':class:`objectify.Element` wrapper with namespace defined.\n\n Args:\n tag (str): Tag name\n attr (dict): Default attributes for tag\n text (str): Text content for the tag\n\n Returns:\n _objectify.ObjectifiedElement: objectify element\n '
if (not attr):
attr = {}
if text:
element = getattr(ELEMENT_MAKER, tag)(text, **attr)
else:
element = getattr(ELEMENT_MAKER, tag)(**attr)
return element
return create_elem | Create a simple namespace-aware objectify element creator.
Args:
namespace (str): Namespace to work in
Returns:
function: Namespace-aware element creator | codesearchnet |
def on_created(self, event):
if (not self._event_error):
self.logger.info(u'Change detected from a create on: %s', event.src_path)
self.compile_dependencies(event.src_path) | Called when a new file or directory is created.
Todo:
This should be also used (extended from another class?) to watch
for some special name file (like ".boussole-watcher-stop" create to
raise a KeyboardInterrupt, so we may be able to unittest the
watcher (click.CliRunner is not able to send signal like CTRL+C
that is required to watchdog observer loop)
Args:
event: Watchdog event, either ``watchdog.events.DirCreatedEvent``
or ``watchdog.events.FileCreatedEvent``. | codesearchnet |
def compute_author_match_score(x_authors, y_authors):
if ((not x_authors) or (not y_authors)):
return 0.0
matches = get_number_of_author_matches(x_authors, y_authors)
max_length = max(len(x_authors), len(y_authors))
return (matches / float(max_length)) | Return the matching score of 2 given lists of authors.
Args:
x_authors (list(dict)): first schema-compliant list of authors.
y_authors (list(dict)): second schema-compliant list of authors.
Returns:
float: matching score of authors. | codesearchnet |
def __init__(self, xid=None, command=None, group_type=None, group_id=None,
buckets=None):
super().__init__(xid)
self.command = command
self.group_type = group_type
self.group_id = group_id
self.buckets = buckets | Create a GroupMod with the optional parameters below.
Args:
xid (int): Header's transaction id. Defaults to random.
command (GroupModCommand): One of OFPGC_*.
group_type (GroupType): One of OFPGT_*.
group_id (int): Group identifier.
buckets (:class:`ListOfBuckets`): The length of the bucket
array is inferred from the length field in the header. | juraj-google-style |
def new(cls, script, commit, params, campaign_dir, overwrite=False):
if (not Path(campaign_dir).is_absolute()):
raise ValueError('Path is not absolute')
if (Path(campaign_dir).exists() and (not overwrite)):
raise FileExistsError('The specified directory already exists')
elif (Path(campaign_dir).exists() and overwrite):
campaign_dir_name = os.path.basename(campaign_dir)
folder_contents = set(os.listdir(campaign_dir))
allowed_files = set((['data', ('%s.json' % campaign_dir_name)] + [os.path.basename(os.path.normpath(f)) for f in glob.glob(os.path.join(campaign_dir, '.*'))]))
if (not folder_contents.issubset(allowed_files)):
raise ValueError('The specified directory cannot be overwritten because it contains user files.')
shutil.rmtree(campaign_dir)
os.makedirs(campaign_dir)
tinydb = TinyDB(os.path.join(campaign_dir, ('%s.json' % os.path.basename(campaign_dir))))
config = {'script': script, 'commit': commit, 'params': sorted(params)}
tinydb.table('config').insert(config)
return cls(tinydb, campaign_dir) | Initialize a new class instance with a set configuration and filename.
The created database has the same name of the campaign directory.
Args:
script (str): the ns-3 name of the script that will be used in this
campaign;
commit (str): the commit of the ns-3 installation that is used to
run the simulations.
params (list): a list of the parameters that can be used on the
script.
campaign_dir (str): The path of the file where to save the DB.
overwrite (bool): Whether or not existing directories should be
overwritten. | codesearchnet |
def add_tools(self, *tools):
for tool in tools:
if (not isinstance(tool, Tool)):
raise ValueError('All arguments to add_tool must be Tool subclasses.')
self.toolbar.tools.append(tool) | Adds tools to the plot.
Args:
*tools (Tool) : the tools to add to the Plot
Returns:
None | codesearchnet |
def __init__(self, module_to_name, members, filename_to_library_map,
path_prefix):
self._module_to_name = module_to_name
self._members = members
self._filename_to_library_map = filename_to_library_map
self._path_prefix = path_prefix | Creates a new Index.
Args:
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
filename_to_library_map: A list of (filename, Library) pairs. The order
corresponds to the order in which the libraries appear in the index.
path_prefix: Prefix to add to links in the index. | juraj-google-style |
def findAllSingle(self, selfValue):
resultList = []
for element in selfValue:
if isinstance(element, Single):
resultList.append(element)
resultList += element.findAllSingle()
return resultList | Looks for all the single values and subclasses *recursively* and returns a list of them
Args:
selfValue: A list of single, str, int. Normally just ``self.value``
Returns:
list: A list contains only singles and subclasses. | juraj-google-style |
def forward(self, key_value_states, hidden_states, attn_mask=None):
query = self.q_proj(self.layer_norm(hidden_states))
key_value_states = self.layer_norm_kv(key_value_states)
key = self.k_proj(key_value_states)
value = self.v_proj(key_value_states)
attn_output, _ = self.multihead_attn(query, key, value, attn_mask=attn_mask)
attn_output = self.dropout(self.linear(attn_output))
return attn_output | Forward pass of the AriaCrossAttention module.
Args:
key_value_states (`torch.Tensor`):
Input tensor for key and value.
hidden_states (`torch.Tensor`):
Input tensor for query.
attn_mask (`torch.Tensor`, *optional*, defaults to None):
Attention mask.
Returns:
torch.Tensor:
Output tensor after cross-attention. | github-repos |
def write_compartments(self, stream, compartments, adjacencies,
properties=None):
def convert(entry):
return self.convert_compartment_entry(
entry, adjacencies.get(entry.id))
self._write_entries(stream, compartments, convert, properties) | Write iterable of compartments as YAML object to stream.
Args:
stream: File-like object.
compartments: Iterable of compartment entries.
adjacencies: Dictionary mapping IDs to adjacent compartment IDs.
properties: Set of compartment properties to output (or None to
output all). | juraj-google-style |
def _reduce_pseudo_inverse(nodes):
_, num_nodes = np.shape(nodes)
if num_nodes == 2:
reduction = _REDUCTION0
denom = _REDUCTION_DENOM0
elif num_nodes == 3:
reduction = _REDUCTION1
denom = _REDUCTION_DENOM1
elif num_nodes == 4:
reduction = _REDUCTION2
denom = _REDUCTION_DENOM2
elif num_nodes == 5:
reduction = _REDUCTION3
denom = _REDUCTION_DENOM3
else:
raise _helpers.UnsupportedDegree(num_nodes - 1, supported=(1, 2, 3, 4))
result = _helpers.matrix_product(nodes, reduction)
result /= denom
return result | Performs degree-reduction for a B |eacute| zier curve.
Does so by using the pseudo-inverse of the degree elevation
operator (which is overdetermined).
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): The nodes in the curve.
Returns:
numpy.ndarray: The reduced nodes.
Raises:
.UnsupportedDegree: If the degree is not 1, 2, 3 or 4. | juraj-google-style |
def get(self, key, mem_map=True):
self.raise_error_if_not_open()
if (key in self._file):
data = self._file[key]
if (not mem_map):
data = data[()]
return data
else:
return None | Read and return the data stored for the given key.
Args:
key (str): The key to read the data from.
mem_map (bool): If ``True`` returns the data as
memory-mapped array, otherwise a copy is returned.
Note:
The container has to be opened in advance.
Returns:
numpy.ndarray: The stored data. | codesearchnet |
def __init__(self, desc='Loading...', end='', timeout=0.1):
self.desc = desc
self.end = end
self.timeout = timeout
self._thread = Thread(target=self._animate, daemon=True)
self.steps = ['⢿', '⣻', '⣽', '⣾', '⣷', '⣯', '⣟', '⡿']
self.done = False | A loader-like context manager
Args:
desc (str, optional): The loader's description. Defaults to "Loading...".
end (str, optional): Final print. Defaults to "Done!".
timeout (float, optional): Sleep time between prints. Defaults to 0.1. | github-repos |
def ikey(self, value):
if value == self._defaults['iKey'] and 'iKey' in self._values:
del self._values['iKey']
else:
self._values['iKey'] = value | The ikey property.
Args:
value (string). the property value. | juraj-google-style |
def extend(self, base: 'ValueSpec') -> 'ValueSpec': | Extends a base spec with current spec's rules.
Args:
base: Base ValueSpec to extend.
Returns:
ValueSpec itself.
Raises:
TypeError: When this value spec cannot extend from base. | github-repos |
def __init__(self, *elements, **kwargs):
if not all([isinstance(e, Row) or issubclass(type(e), Box)
for e in elements]):
raise TypeError('All elements of Column must '
'be Row or Box instances')
width = kwargs.pop('width', 12)
if width not in range(1, 13):
raise ValueError('Column width must be between 1 and 12')
self.type = 'column'
self.elements = elements
self.width = width | Init method.
Args:
*elements (): the rows or boxes.
**kwargs: the width can be passed through the keyword args [1-12]. | juraj-google-style |
def swap(self, old_chunks, new_chunk):
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:(indexes[(- 1)] + 1)]
self.insert(indexes[0], new_chunk) | Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted. | codesearchnet |
def plot(self, pts_per_edge, color=None, ax=None):
if ax is None:
ax = _plot_helpers.new_axis()
_plot_helpers.add_patch(ax, color, pts_per_edge, *self._edges)
return ax | Plot the current curved polygon.
Args:
pts_per_edge (int): Number of points to plot per curved edge.
color (Optional[Tuple[float, float, float]]): Color as RGB profile.
ax (Optional[matplotlib.artist.Artist]): matplotlib axis object
to add plot to.
Returns:
matplotlib.artist.Artist: The axis containing the plot. This
may be a newly created axis. | juraj-google-style |
def get_changed_files(self) -> List[str]:
out = shell_tools.output_of('git', 'diff', '--name-only', self.compare_commit_id, self.actual_commit_id, '--', cwd=self.destination_directory)
return [e for e in out.split('\n') if e.strip()] | Get the files changed on one git branch vs another.
Returns:
List[str]: File paths of changed files, relative to the git repo
root. | codesearchnet |
def Format(pb):
if isinstance(pb, message.Message):
return dict(((desc.number, value) for desc, value in pb.ListFields()))
elif _IsMap(pb):
return dict(pb.items())
elif _IsRepeatedContainer(pb):
return dict(enumerate(list(pb)))
else:
return pb | Returns a dictionary or unchanged pb bases on its type.
Specifically, this function returns a dictionary that maps tag
number (for messages) or element index (for repeated fields) to
value, or just pb unchanged if it's neither.
Args:
pb: A proto2 message or a primitive.
Returns:
A dict or unchanged pb. | github-repos |
def stop(self, name: str) -> None:
if not self._timing:
return
now = get_now_utc_pendulum()
if not self._stack:
raise AssertionError("MultiTimer.stop() when nothing running")
if self._stack[-1] != name:
raise AssertionError(
"MultiTimer.stop({}) when {} is running".format(
repr(name), repr(self._stack[-1])))
self._totaldurations[name] += now - self._starttimes[name]
self._stack.pop()
if self._stack:
last = self._stack[-1]
self._starttimes[last] = now | Stop a named timer.
Args:
name: timer to stop | juraj-google-style |
async def selfplay(state, flagfile='selfplay'):
output_dir = os.path.join(fsdb.selfplay_dir(), state.output_model_name)
holdout_dir = os.path.join(fsdb.holdout_dir(), state.output_model_name)
lines = (await run('bazel-bin/cc/selfplay', '--flagfile={}.flags'.format(os.path.join(FLAGS.flags_dir, flagfile)), '--model={}'.format(state.best_model_path), '--output_dir={}'.format(output_dir), '--holdout_dir={}'.format(holdout_dir), '--seed={}'.format(state.seed)))
result = '\n'.join(lines[(- 6):])
logging.info(result)
stats = parse_win_stats_table(result, 1)[0]
num_games = stats.total_wins
logging.info('Black won %0.3f, white won %0.3f', (stats.black_wins.total / num_games), (stats.white_wins.total / num_games))
pattern = os.path.join(output_dir, '*', '*.zz')
random.seed(state.seed)
tf.set_random_seed(state.seed)
np.random.seed(state.seed)
buffer = example_buffer.ExampleBuffer(sampling_frac=1.0)
logging.info('Writing golden chunk from "{}"'.format(pattern))
buffer.parallel_fill(tf.gfile.Glob(pattern))
buffer.flush(os.path.join(fsdb.golden_chunk_dir(), (state.output_model_name + '.tfrecord.zz'))) | Run selfplay and write a training chunk to the fsdb golden_chunk_dir.
Args:
state: the RL loop State instance.
flagfile: the name of the flagfile to use for selfplay, either 'selfplay'
(the default) or 'boostrap'. | codesearchnet |
def first_dna(self) -> geno.DNA:
if self.next_dna.__code__ is CustomHyper.next_dna.__code__:
raise NotImplementedError(f'{self.__class__!r} must implement method `next_dna` to be used in dynamic evaluation mode.')
return self.next_dna(None) | Returns the first DNA of current sub-space.
Returns:
A string-valued DNA. | github-repos |
def validate_context(
self, context: Mapping[str, Any]
) -> Tuple[bool, List[Tuple[str, str]]]:
url = f'{self.endpoint}/terms/{context["id"]}'
res = requests.get(url)
if res.status_code == 200:
return (True, [])
else:
return (False, [("WARNING", f'Context {context["id"]} not found at {url}')]) | Validate context
Args:
context (Mapping[str, Any]): context dictionary of type, id and label
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")] | juraj-google-style |
def compute(self, x_arr, y_arr):
y_arr += 1e-08
return np.sum(x_arr * np.log(x_arr / y_arr), axis=-1) | Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances. | juraj-google-style |
def check_version(version, range_=None):
if (range_ and (version not in range_)):
raise RezBindError(('found version %s is not within range %s' % (str(version), str(range_)))) | Check that the found software version is within supplied range.
Args:
version: Version of the package as a Version object.
range_: Allowable version range as a VersionRange object. | codesearchnet |
def update_model_handler(self, key: str, model_path: str, previous_key: str):
if self._key_to_last_update[key] == model_path:
return
self._key_to_last_update[key] = model_path
if key not in self._mh_map:
self._mh_map[key] = deepcopy(self._mh_map[previous_key])
self._mh_map[key].update_model_path(model_path)
if key in self._tag_map:
tag_to_remove = self._tag_map[key]
shared_handle, model_to_remove = self._proxy_map[tag_to_remove]
shared_handle.release(model_to_remove)
del self._tag_map[key]
del self._proxy_map[tag_to_remove] | Updates the model path of this model handler and removes it from memory so
that it can be reloaded with the updated path. No-ops if no model update
needs to be applied.
Args:
key: the key associated with the model we'd like to update.
model_path: the new path to the model we'd like to load.
previous_key: the key that is associated with the old version of this
model. This will often be the same as the current key, but sometimes
we will want to keep both the old and new models to serve different
cohorts. In that case, the keys should be different. | github-repos |
def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
if self._decrypted_stream_size is None:
self._decrypted_stream_size = self._GetDecryptedStreamSize()
if self._decrypted_stream_size is None:
raise IOError('Invalid decrypted stream size.')
offset += self._decrypted_stream_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
if offset != self._current_offset:
self._current_offset = offset
self._realign_offset = True | Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek.
whence (Optional[int]): value that indicates whether offset is an
absolute or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. | juraj-google-style |
def error(self, status=None):
def decorator(callback):
self._error_handlers[status] = callback
return callback
return decorator | Decorator to add a callback that generates error page.
The *status* parameter specifies the HTTP response status code
for which the decorated callback should be invoked. If the
*status* argument is not specified, then the decorated callable
is considered to be a fallback callback.
A fallback callback, when defined, is invoked to generate the
error page for any HTTP response representing an error when
there is no error handler defined explicitly for the response
code of the HTTP response.
Arguments:
status(int, optional): HTTP response status code.
Returns:
function: Decorator function to add error handler. | codesearchnet |
def get_decomposition(self, comp):
(facet, simplex) = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
return {self.qhull_entries[f]: amt for (f, amt) in zip(facet, decomp_amts) if (abs(amt) > PhaseDiagram.numerical_tol)} | Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount} | codesearchnet |
def tags():
return shell.run('git tag --sort=v:refname', capture=True, never_pretend=True).stdout.strip().splitlines() | Returns all tags in the repo.
Returns:
list[str]: List of all tags in the repo, sorted as versions.
All tags returned by this function will be parsed as if the contained
versions (using ``v:refname`` sorting). | codesearchnet |
def constant(x: A) -> Callable[..., A]:
def constanted(*args, **kwargs):
return x
return constanted | Produce a function that always returns a supplied value.
Args:
x: Any object.
Returns:
A function that accepts any number of positional and keyword arguments, discards them, and returns ``x``. | juraj-google-style |
def getctime(self, path=None, client_kwargs=None, header=None):
return self._getctime_from_header(
self.head(path, client_kwargs, header)) | Return the creation time of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
(see the time module). | juraj-google-style |
def check_alive(self, worker_name):
if self._context_handle:
return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)
else:
raise ValueError('Context is not initialized.') | Checks whether a remote worker is alive or not.
Args:
worker_name: a string representing the remote worker. It must be a fully
specified name like "/job:worker/replica:0/task:0".
Returns:
a boolean indicating whether the remote worker is alive or not.
Raises:
ValueError: if context is not initialized. | github-repos |
def document(self, document_id=None):
if document_id is None:
document_id = _auto_id()
child_path = self._path + (document_id,)
return self._client.document(*child_path) | Create a sub-document underneath the current collection.
Args:
document_id (Optional[str]): The document identifier
within the current collection. If not provided, will default
to a random 20 character string composed of digits,
uppercase and lowercase and letters.
Returns:
~.firestore_v1beta1.document.DocumentReference: The child
document. | juraj-google-style |
def load(self, raw):
try:
self._load(raw)
except (KeyError, ValueError) as e:
raise_from(exception.ParseException('Parse error in %s' % (type(self)), raw), e) | Unserialize from raw representation. (Wrapper)
Args:
raw (dict): Raw.
Raises:
ParseException: If there was an error parsing data. | juraj-google-style |
def _GetCompressedStreamTypes(self, mediator, path_spec):
try:
type_indicators = analyzer.Analyzer.GetCompressedStreamTypeIndicators(
path_spec, resolver_context=mediator.resolver_context)
except IOError as exception:
type_indicators = []
warning_message = (
'analyzer failed to determine compressed stream type indicators '
'with error: {0!s}').format(exception)
mediator.ProduceExtractionWarning(warning_message, path_spec=path_spec)
return type_indicators | Determines if a data stream contains a compressed stream such as: gzip.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification of the data stream.
Returns:
list[str]: dfVFS compressed stream type indicators found in
the data stream. | juraj-google-style |
def __setitem__(self, key, layout):
if key in self._layout_map:
raise ValueError(f'{key} already exist in the LayoutMap with value {self._layout_map[key]}. Please make sure to not use duplicated keys.')
if isinstance(layout, tuple):
layout = TensorLayout(axes=layout, device_mesh=None)
if not isinstance(layout, TensorLayout):
raise ValueError(f'{layout} should be a TensorLayout type, got {type(layout)}')
self._maybe_populate_device_mesh(layout)
self._layout_map[key] = layout | Insert TensorLayout to the LayoutMap.
Args:
key: String key for the `TensorLayout`.
layout: The `TensorLayout`. As a shortcut, tuple of string and None
are also acceptable, and will be converted to `TensorLayout`. | github-repos |
def number_of_decimals(num):
r
exp = decimal.Decimal(str(num)).as_tuple().exponent
return max(0, -exp) | r"""
Args:
num (float):
References:
stackoverflow.com/questions/6189956/finding-decimal-places
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> num = 15.05
>>> result = number_of_decimals(num)
>>> print(result)
2 | juraj-google-style |
def sg_symbol_from_int_number(int_number, hexagonal=True):
syms = []
for (n, v) in get_symm_data('space_group_encoding').items():
if (v['int_number'] == int_number):
syms.append(n)
if (len(syms) == 0):
raise ValueError('Invalid international number!')
if (len(syms) == 2):
if hexagonal:
syms = list(filter((lambda s: s.endswith('H')), syms))
else:
syms = list(filter((lambda s: (not s.endswith('H'))), syms))
return syms.pop() | Obtains a SpaceGroup name from its international number.
Args:
int_number (int): International number.
hexagonal (bool): For rhombohedral groups, whether to return the
hexagonal setting (default) or rhombohedral setting.
Returns:
(str) Spacegroup symbol | codesearchnet |
def time2timestr(time, fmt='hhmmss'):
if fmt.count(':') == 2:
if not fmt.index('h') < fmt.index('m') < fmt.index('s'):
raise ValueError('Invalid format string. {}'.format(
VALID_TIME_FORMATS_TEXT))
h, m, s = fmt.split(':')
elif fmt.count(':') == 1:
if not fmt.index('h') < fmt.index('m'):
raise ValueError('Invalid format string. {}'.format(
VALID_TIME_FORMATS_TEXT))
h, m = fmt.split(':')
s = None
elif any(c not in 'hms' for c in fmt) or len(fmt) != 6:
raise ValueError('Invalid character in format string. {}'.format(
VALID_TIME_FORMATS_TEXT))
else:
if not fmt.index('h') < fmt.index('m') < fmt.index('s'):
raise ValueError('Invalid format string. {}'.format(
VALID_TIME_FORMATS_TEXT))
h, m, s = fmt[:-4], fmt[-4:-2], fmt[-2:]
for string, char in ((h, 'h'), (m, 'm'), (s, 's')):
if string is not None and any(c != char for c in string):
raise ValueError('Invalid date format: {} is not {}'.\
format(char, string))
if len(h) == 2:
fmt = fmt.replace('hh', '%H', 1)
elif len(h) == 1:
fmt = fmt.replace('h', 'X%H', 1)
else:
raise ValueError('Invalid format string, hour must have 1 or 2 digits')
if len(m) == 2:
fmt = fmt.replace('mm', '%M', 1)
else:
raise ValueError('Invalid format string, minutes must have 2 digits')
if s is not None and len(s) == 2:
fmt = fmt. replace('ss', '%S', 1)
elif s is not None:
raise ValueError('Invalid format string, seconds must have 2 digits')
return time.strftime(fmt).replace('X0','X').replace('X','') | Turns a datetime.time object into a string. The string must have one of the
formats from VALID_TIME_FORMATS_TEXT to make it compatible with
timestr2time.
Args:
time (datetime.time) the time to be translated
fmt (str) a format string.
Returns:
(str) that represents a time.
Raises:
ValueError if the format is not valid. | juraj-google-style |
def wait_for(self, timeout=None, **kwargs):
if (len(kwargs) == 0):
raise ArgumentError('You must specify at least one message field to wait on')
spec = MessageSpec(**kwargs)
future = self._add_waiter(spec)
future.add_done_callback((lambda x: self._remove_waiter(spec, future)))
return asyncio.wait_for(future, timeout=timeout) | Wait for a specific matching message or timeout.
You specify the message by passing name=value keyword arguments to
this method. The first message received after this function has been
called that has all of the given keys with the given values will be
returned when this function is awaited.
If no matching message is received within the specified timeout (if
given), then asyncio.TimeoutError will be raised.
This function only matches a single message and removes itself once
the message is seen or the timeout expires.
Args:
timeout (float): Optional timeout, defaults to None for no timeout.
**kwargs: Keys to match in the message with their corresponding values.
You must pass at least one keyword argument so there is something
to look for.
Returns:
awaitable: The response | codesearchnet |
def update_batch(self, loss_per_instance):
if (self.batch_indices is None):
raise TensorForceError('Need to call get_batch before each update_batch call.')
for (index, loss) in zip(self.batch_indices, loss_per_instance):
new_priority = ((np.abs(loss) + self.prioritization_constant) ** self.prioritization_weight)
self.observations._move(index, new_priority)
self.none_priority_index += 1 | Computes priorities according to loss.
Args:
loss_per_instance: | codesearchnet |
def rename_keys(d: Dict[str, Any], mapping: Dict[str, str]) -> Dict[str, Any]:
result = {}
for k, v in d.items():
if k in mapping:
k = mapping[k]
result[k] = v
return result | Returns a copy of the dictionary ``d`` with its keys renamed according to
``mapping``.
Args:
d: the starting dictionary
mapping: a dictionary of the format ``{old_key_name: new_key_name}``
Returns:
a new dictionary
Keys that are not in ``mapping`` are left unchanged.
The input parameters are not modified. | juraj-google-style |
def copy_submission_locally(self, cloud_path):
local_path = os.path.join(self.download_dir, os.path.basename(cloud_path))
cmd = ['gsutil', 'cp', cloud_path, local_path]
if (subprocess.call(cmd) != 0):
logging.error("Can't copy submission locally")
return None
return local_path | Copies submission from Google Cloud Storage to local directory.
Args:
cloud_path: path of the submission in Google Cloud Storage
Returns:
name of the local file where submission is copied to | codesearchnet |
def ExtractEventsFromSources(self):
self._CheckStorageFile(self._storage_file_path, warn_about_existing=True)
scan_context = self.ScanSource(self._source_path)
self._source_type = scan_context.source_type
self._status_view.SetMode(self._status_view_mode)
self._status_view.SetSourceInformation(self._source_path, self._source_type, artifact_filters=self._artifact_filters, filter_file=self._filter_file)
status_update_callback = self._status_view.GetExtractionStatusUpdateCallback()
self._output_writer.Write('\n')
self._status_view.PrintExtractionStatusHeader(None)
self._output_writer.Write('Processing started.\n')
session = engine.BaseEngine.CreateSession(artifact_filter_names=self._artifact_filters, command_line_arguments=self._command_line_arguments, debug_mode=self._debug_mode, filter_file_path=self._filter_file, preferred_encoding=self.preferred_encoding, preferred_time_zone=self._preferred_time_zone, preferred_year=self._preferred_year)
storage_writer = storage_factory.StorageFactory.CreateStorageWriter(self._storage_format, session, self._storage_file_path)
if (not storage_writer):
raise errors.BadConfigOption('Unsupported storage format: {0:s}'.format(self._storage_format))
single_process_mode = self._single_process_mode
if (self._source_type == dfvfs_definitions.SOURCE_TYPE_FILE):
single_process_mode = True
if single_process_mode:
extraction_engine = single_process_engine.SingleProcessEngine()
else:
extraction_engine = multi_process_engine.TaskMultiProcessEngine(use_zeromq=self._use_zeromq)
if (self._source_type in self._SOURCE_TYPES_TO_PREPROCESS):
self._PreprocessSources(extraction_engine)
configuration = self._CreateProcessingConfiguration(extraction_engine.knowledge_base)
self._SetExtractionParsersAndPlugins(configuration, session)
self._SetExtractionPreferredTimeZone(extraction_engine.knowledge_base)
try:
filter_find_specs = extraction_engine.BuildFilterFindSpecs(self._artifact_definitions_path, self._custom_artifacts_path, extraction_engine.knowledge_base, self._artifact_filters, self._filter_file)
except errors.InvalidFilter as exception:
raise errors.BadConfigOption('Unable to build filter specification: {0!s}'.format(exception))
processing_status = None
if single_process_mode:
logger.debug('Starting extraction in single process mode.')
processing_status = extraction_engine.ProcessSources(self._source_path_specs, storage_writer, self._resolver_context, configuration, filter_find_specs=filter_find_specs, status_update_callback=status_update_callback)
else:
logger.debug('Starting extraction in multi process mode.')
processing_status = extraction_engine.ProcessSources(session.identifier, self._source_path_specs, storage_writer, configuration, enable_sigsegv_handler=self._enable_sigsegv_handler, filter_find_specs=filter_find_specs, number_of_worker_processes=self._number_of_extraction_workers, status_update_callback=status_update_callback, worker_memory_limit=self._worker_memory_limit)
self._status_view.PrintExtractionSummary(processing_status) | Processes the sources and extracts events.
Raises:
BadConfigOption: if the storage file path is invalid or the storage
format not supported or an invalid filter was specified.
SourceScannerError: if the source scanner could not find a supported
file system.
UserAbort: if the user initiated an abort. | codesearchnet |
def call(self, inputs, state):
_check_rnn_cell_input_dtypes([inputs, state])
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
c_prev, m_prev = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
input_size = inputs.get_shape().with_rank(2).dims[1].value
if input_size is None:
raise ValueError('Could not infer input size from inputs.get_shape()[-1]')
lstm_matrix = math_ops.matmul(array_ops.concat([inputs, m_prev], 1), self._kernel)
lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)
i, j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._use_peepholes:
c = sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev + sigmoid(i + self._w_i_diag * c_prev) * self._activation(j)
else:
c = sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j)
if self._cell_clip is not None:
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
m = math_ops.matmul(m, self._proj_kernel)
if self._proj_clip is not None:
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
new_state = LSTMStateTuple(c, m) if self._state_is_tuple else array_ops.concat([c, m], 1)
return (m, new_state) | Run one step of LSTM.
Args:
inputs: input Tensor, must be 2-D, `[batch, input_size]`.
state: if `state_is_tuple` is False, this must be a state Tensor, `2-D,
[batch, state_size]`. If `state_is_tuple` is True, this must be a tuple
of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference. | github-repos |
def _bdtr(k, n, p):
ones = tf.ones_like(n - k)
k_eq_n = tf.equal(k, n)
safe_dn = tf.where(k_eq_n, ones, n - k)
dk = tf.math.betainc(a=safe_dn, b=k + 1, x=1 - p)
return tf.where(k_eq_n, ones, dk) | The binomial cumulative distribution function.
Args:
k: floating point `Tensor`.
n: floating point `Tensor`.
p: floating point `Tensor`.
Returns:
`sum_{j=0}^k p^j (1 - p)^(n - j)`. | juraj-google-style |
def strip_name_scope(name: str, export_scope) -> str:
if export_scope:
if export_scope[-1] == '/':
export_scope = export_scope[:-1]
try:
str_to_replace = '([\\^]|loc:@|^)' + export_scope + '[\\/]+(.*)'
return re.sub(str_to_replace, '\\1\\2', compat.as_str(name), count=1)
except TypeError as e:
logging.warning(e)
return name
else:
return name | Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None. | github-repos |
def get_issue_description(test_config_container: TestConfigContainer, metric_container: MetricContainer, change_point_index: int, max_results_to_display: int=5) -> str:
description = []
description.append(_ISSUE_DESCRIPTION_TEMPLATE.format(test_config_container.test_id, test_config_container.metric_name))
if test_config_container.test_name:
description.append('`test_name:` ' + f'{test_config_container.test_name}')
if test_config_container.test_description:
description.append('`Test description:` ' + f'{test_config_container.test_description}')
description.append('```')
runs_to_display = []
max_timestamp_index = min(change_point_index + max_results_to_display, len(metric_container.values) - 1)
min_timestamp_index = max(0, change_point_index - max_results_to_display)
for i in reversed(range(min_timestamp_index, max_timestamp_index + 1)):
row_template = _METRIC_INFO_TEMPLATE.format(metric_container.timestamps[i].ctime(), format(metric_container.values[i], '.2f'))
if i == change_point_index:
row_template += constants._ANOMALY_MARKER
runs_to_display.append(row_template)
description.append(os.linesep.join(runs_to_display))
description.append('```')
return (2 * os.linesep).join(description) | Args:
test_config_container: TestConfigContainer containing test metadata.
metric_container: MetricContainer containing metric data.
change_point_index: Index of the change point in the metric data.
max_results_to_display: Max number of results to display from the change
point index, in both directions of the change point index.
Returns:
str: Description used to fill the GitHub issues description. | github-repos |
def _update_listing_client_kwargs(client_kwargs, max_request_entries):
client_kwargs = client_kwargs.copy()
if max_request_entries:
client_kwargs['num_results'] = max_request_entries
return client_kwargs | Updates client kwargs for listing functions.
Args:
client_kwargs (dict): Client arguments.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
dict: Updated client_kwargs | codesearchnet |
def from_backbone_configs(cls, backbone_config: PretrainedConfig, **kwargs):
return cls(backbone_config=backbone_config, **kwargs) | Instantiate a [`DFineConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model
configuration.
Args:
backbone_config ([`PretrainedConfig`]):
The backbone configuration.
Returns:
[`DFineConfig`]: An instance of a configuration object | github-repos |
def save(self):
if (not os.path.exists(self.paths.virt())):
os.makedirs(self.paths.virt())
self._save_metadata()
self.virt_env.save() | Save this prefix to persistent storage
Returns:
None | codesearchnet |
def predict_task_proba(self, X, t=0, **kwargs):
return self.predict_proba(X, **kwargs)[t] | Predicts probabilistic labels for an input X on task t
Args:
X: The input for the predict_proba method
t: The task index to predict for which to predict probabilities
Returns:
An [n, K_t] tensor of predictions for task t
NOTE: By default, this method calls predict_proba and extracts element
t. If it is possible to predict individual tasks in isolation, however,
this method may be overriden for efficiency's sake. | juraj-google-style |
def can_api_key_access_build(param_name):
build_id = (
request.args.get(param_name, type=int) or
request.form.get(param_name, type=int) or
request.json[param_name])
utils.jsonify_assert(build_id, 'build_id required')
if app.config.get('IGNORE_AUTH'):
api_key = models.ApiKey(
id='anonymous_superuser',
secret='',
superuser=True)
build = models.Build.query.get(build_id)
utils.jsonify_assert(build is not None, 'build must exist', 404)
else:
ops = _get_api_key_ops()
api_key, build = ops.can_access_build(build_id)
return api_key, build | Determines if the current API key can access the build in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
(api_key, build) The API Key and the Build it has access to. | juraj-google-style |
def output(self, _filename):
txt = "Analyze of {}\n".format(self.slither.filename)
txt += self.get_detectors_result()
for contract in self.slither.contracts_derived:
txt += "\nContract {}\n".format(contract.name)
txt += self.is_complex_code(contract)
is_erc20 = contract.is_erc20()
txt += '\tNumber of functions:{}'.format(self._number_functions(contract))
txt += "\tIs ERC20 token: {}\n".format(contract.is_erc20())
if is_erc20:
txt += self.get_summary_erc20(contract)
self.info(txt) | _filename is not used
Args:
_filename(string) | juraj-google-style |
def drift_fn(self):
pass | Python callable calculating instantaneous drift.
The callable should accept two real `Tensor` arguments of the same dtype.
The first argument is the scalar time t, the second argument is the value of
Ito process X - `Tensor` of shape `batch_shape + [dim]`. Here `batch_shape`
is an arbitrary shape. The result is the value of drift a(t, X). The return
value of the callable is a real `Tensor` of the same dtype as the input
arguments and of shape `batch_shape + [dim]`.
Returns:
The instantaneous drift rate callable. | github-repos |
def _validate_bn_layer(self, layer):
if ((not isinstance(layer, tf.keras.layers.BatchNormalization)) and (not isinstance(layer, tf.compat.v1.layers.BatchNormalization))):
raise ValueError('batchnorm_layer must be an instance of BatchNormalization layer.')
if layer.renorm:
raise ValueError('BatchNorm Bijector does not support renormalization.')
if layer.virtual_batch_size:
raise ValueError('BatchNorm Bijector does not support virtual batch sizes.') | Check for valid BatchNormalization layer.
Args:
layer: Instance of `tf.layers.BatchNormalization`.
Raises:
ValueError: If batchnorm_layer argument is not an instance of
`tf.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or
if `batchnorm_layer.virtual_batch_size` is specified. | codesearchnet |
def expand_var(v, env):
if len(v) == 0:
return v
if v[0] == '$':
v = v[1:]
if len(v) and v[0] != '$':
if v in env:
v = env[v]
else:
raise Exception('Cannot expand variable $%s' % v)
return v | If v is a variable reference (for example: '$myvar'), replace it using the supplied
env dictionary.
Args:
v: the variable to replace if needed.
env: user supplied dictionary.
Raises:
Exception if v is a variable reference but it is not found in env. | juraj-google-style |
def create_version(self, version_label):
version_response = self.repo.api.http_request('POST', '%s/fcr:versions' % self.uri, data=None, headers={'Slug':version_label})
if version_response.status_code == 201:
logger.debug('version created: %s' % version_response.headers['Location'])
self._affix_version(version_response.headers['Location'], version_label) | method to create a new version of the resource as it currently stands
- Note: this will create a version based on the current live instance of the resource,
not the local version, which might require self.update() to update.
Args:
version_label (str): label to be used for version
Returns:
(ResourceVersion): instance of ResourceVersion, also appended to self.versions | juraj-google-style |
def broadcast_tensor(self, tensor):
return array_ops.gather(tensor, self.gather_index) | Broadcast from a dense tensor.
It is assumed that the first axis of the dense tensor is indexed by the
source shape, and at the end, the first axis of the dense tensor is
indexed by the destination shape.
Args:
tensor: a dense tensor.
Returns:
A dense tensor. | github-repos |
def write_tabular(obj, filepath):
(_, fn, ext) = splitext2(filepath)
if (ext == '.h5'):
_write_tabular_h5(obj, filepath)
elif (ext == '.pkl'):
_write_tabular_pickle(obj, filepath)
else:
raise NotImplementedError | Write tabular object in HDF5 or pickle format
Args:
obj (array or DataFrame): tabular object to write
filepath (path-like): path to write to; must end in '.h5' or '.pkl' | codesearchnet |
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_buffer = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_buffer,
kmip_version=kmip_version
)
self.length = local_buffer.length()
super(GetAttributeListRequestPayload, self).write(
output_buffer,
kmip_version=kmip_version
)
output_buffer.write(local_buffer.buffer) | Write the data encoding the GetAttributeList request payload to a
stream.
Args:
output_buffer (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0. | juraj-google-style |
def register(self, table):
if table.table_type.is_system:
raise ValueError('Cannot add system table to catalog')
if (not table.table_type.is_shared):
raise ValueError('Cannot add local table to catalog')
if table.is_substitute:
raise ValueError('Cannot add substitute table to catalog')
versions = self.__tables.get(table.name)
if (versions is None):
versions = {}
self.__tables[table.name] = versions
versions[table.version] = table | Adds a shared table to the catalog.
Args:
table (SymbolTable): A non-system, shared symbol table. | codesearchnet |
def insert_values_in_args(args, kwargs, values):
v_iter = iter(values)
new_args = [next(v_iter) if isinstance(arg, ArgumentPlaceholder) else arg for arg in args]
new_kwargs = dict(((k, next(v_iter)) if isinstance(v, ArgumentPlaceholder) else (k, v) for k, v in sorted(kwargs.items())))
return (new_args, new_kwargs) | For internal use only; no backwards-compatibility guarantees.
Replaces all placeholders in args/kwargs with actual values.
Args:
args: A list of positional arguments.
kwargs: A dictionary of keyword arguments.
values: A list of values that will be used to replace placeholder values.
Returns:
A 2-tuple containing a modified list of positional arguments, and a
modified dictionary of keyword arguments. | github-repos |
def _TextJustify(self, text, col_size):
result = []
if ('\n' in text):
for paragraph in text.split('\n'):
result.extend(self._TextJustify(paragraph, col_size))
return result
wrapper = textwrap.TextWrapper(width=(col_size - 2), break_long_words=False, expand_tabs=False)
try:
text_list = wrapper.wrap(text)
except ValueError:
raise TableError('Field too small (minimum width: 3)')
if (not text_list):
return [(' ' * col_size)]
for current_line in text_list:
stripped_len = len(terminal.StripAnsiText(current_line))
ansi_color_adds = (len(current_line) - stripped_len)
if ((stripped_len + 2) > col_size):
raise TableError('String contains words that do not fit in column.')
result.append((' %-*s' % (((col_size - 1) + ansi_color_adds), current_line)))
return result | Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text. | codesearchnet |
def set_all_file_column_labels(self, xlabel=None, ylabel=None):
if (xlabel is not None):
self.general.x_column_label = xlabel
if (ylabel is not None):
self.general.y_column_label = ylabel
if ((xlabel is None) and (ylabel is None)):
warnings.warn(('is not specifying x or y lables even' + 'though column labels function is called.'), UserWarning)
return | Indicate general x,y column labels.
This sets the general x and y column labels into data files for all plots.
It can be overridden for specific plots.
Args:
xlabel/ylabel (str, optional): String indicating column label for x,y values
into the data files. Default is None.
Raises:
UserWarning: If xlabel and ylabel are both not specified,
The user will be alerted, but the code will not stop. | codesearchnet |
def VerifyGitkitToken(self, jwt):
certs = self.rpc_helper.GetPublicCert()
crypt.MAX_TOKEN_LIFETIME_SECS = 30 * 86400
parsed = None
for aud in filter(lambda x: x is not None, [self.project_id, self.client_id]):
try:
parsed = crypt.verify_signed_jwt_with_certs(jwt, certs, aud)
except crypt.AppIdentityError as e:
if "Wrong recipient" not in e.message:
return None
if parsed:
return GitkitUser.FromToken(parsed)
return None | Verifies a Gitkit token string.
Args:
jwt: string, the token to be checked
Returns:
GitkitUser, if the token is valid. None otherwise. | juraj-google-style |
def _CheckLine(self, line):
for rule in self._cur_state:
matched = self._CheckRule(rule, line)
if matched:
for value in matched.groupdict():
self._AssignVar(matched, value)
if self._Operations(rule):
if rule.new_state:
if rule.new_state not in ('End', 'EOF'):
self._cur_state = self.states[rule.new_state]
self._cur_state_name = rule.new_state
break | Passes the line through each rule until a match is made.
Args:
line: A string, the current input line. | juraj-google-style |
def GetValueLength(rd, pos):
rd = bytearray(rd)
key = rd[pos]
if key == LONG_ITEM_ENCODING:
if pos + 1 < len(rd):
return (3, rd[pos + 1])
else:
raise errors.HidError('Malformed report descriptor')
else:
code = key & 0x03
if code <= 0x02:
return (1, code)
elif code == 0x03:
return (1, 4)
raise errors.HidError('Cannot happen') | Get value length for a key in rd.
For a key at position pos in the Report Descriptor rd, return the length
of the associated value. This supports both short and long format
values.
Args:
rd: Report Descriptor
pos: The position of the key in rd.
Returns:
(key_size, data_len) where key_size is the number of bytes occupied by
the key and data_len is the length of the value associated by the key. | juraj-google-style |
def get_associated_profiles(self):
uri = '{}/associatedProfiles'.format(self.data['uri'])
return self._helper.do_get(uri) | Gets the URIs of profiles which are using an Ethernet network.
Args:
id_or_uri: Can be either the logical interconnect group id or the logical interconnect group uri
Returns:
list: URIs of the associated profiles. | codesearchnet |
def add_multiple(self, flags):
if not isinstance(flags, list):
raise TypeError("Expected list of flags, got object of type{}".format(type(flags)))
for flag in flags:
if isinstance(flag, Flag):
self.add_item(flag)
elif isinstance(flag, tuple):
try:
item = Flag(*flag)
self.add_item(item)
except TypeError as e:
raise TypeError("Invalid arguments to initialize a flag definition, expect ({0} [, {1}]) but got {3}"
.format(", ".join(Flag.REQUIRED_FIELDS),
", ".join(Flag.OPTIONAL_FIELDS), flag)) | Add multiple command line flags
Arguments:
flags (:obj:`list` of :obj:`tuple`): List of flags
in tuples (name, flag_type, description, (optional) default)
Raises:
TypeError: Provided wrong arguments or arguments of wrong types, method will raise TypeError | juraj-google-style |
def __init__(self, target_pixels=None, **kwargs):
super(OpenImagesV4Config, self).__init__(**kwargs)
self._target_pixels = target_pixels | BuilderConfig for OpenImagesV4.
Args:
target_pixels: If given, rescale the images so that the number of pixels
is roughly this value.
**kwargs: keyword arguments forward to super. | juraj-google-style |
def get_structures(self, primitive=True):
structures = []
for d in self._cif.data.values():
try:
s = self._get_structure(d, primitive)
if s:
structures.append(s)
except (KeyError, ValueError) as exc:
self.errors.append(str(exc))
warnings.warn(str(exc))
if self.errors:
warnings.warn('Issues encountered while parsing CIF:')
for error in self.errors:
warnings.warn(error)
if (len(structures) == 0):
raise ValueError('Invalid cif file with no structures!')
return structures | Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True. With magnetic CIF files, will return primitive
magnetic cell which may be larger than nuclear primitive cell.
Returns:
List of Structures. | codesearchnet |
def loads(serialized_messages):
try:
messages_dicts = json.loads(serialized_messages)
except ValueError:
_log.error("Loading serialized messages failed.")
raise
messages = []
for message_dict in messages_dicts:
try:
headers = message_dict["headers"]
except KeyError:
_log.error("Message saved without headers.")
raise
try:
MessageClass = get_class(headers["fedora_messaging_schema"])
except KeyError:
_log.error("Message (headers=%r) saved without a schema header.", headers)
raise
try:
body = message_dict["body"]
except KeyError:
_log.error("Message saved without body.")
raise
try:
id = message_dict["id"]
except KeyError:
_log.error("Message saved without id.")
raise
try:
queue = message_dict["queue"]
except KeyError:
_log.warning("Message saved without queue.")
queue = None
try:
topic = message_dict["topic"]
except KeyError:
_log.error("Message saved without topic.")
raise
try:
severity = headers["fedora_messaging_severity"]
except KeyError:
_log.error("Message saved without a severity.")
raise
message = MessageClass(
body=body, topic=topic, headers=headers, severity=severity
)
try:
message.validate()
_log.debug("Successfully validated message %r", message)
except jsonschema.exceptions.ValidationError as e:
_log.error("Message validation of %r failed: %r", message, e)
raise ValidationError(e)
message.queue = queue
message.id = id
messages.append(message)
return messages | Deserialize messages from a JSON formatted str
Args:
serialized_messages (JSON str):
Returns:
list: Deserialized message objects.
Raises:
ValidationError: If deserialized message validation failed.
KeyError: If serialized_messages aren't properly serialized.
ValueError: If serialized_messages is not valid JSON | juraj-google-style |
def context(name=None):
def _context(cls):
annotated(cls, name)
cls.context = True
return cls
return _context | Declare that a class defines a context.
Contexts are for use with HierarchicalShell for discovering
and using functionality from the command line.
Args:
name (str): Optional name for this context if you don't want
to just use the class name. | codesearchnet |
def lighten(self, amount):
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l + amount, 1)
return self.from_hsl(hsl) | Lighten (increase the luminance) of this color.
Args:
amount (float) :
Amount to increase the luminance by (clamped above zero)
Returns:
Color | juraj-google-style |
def __init__(self, timestamp, timestamp_description, data_type=None):
super(TimestampEvent, self).__init__()
self.timestamp = timestamp
self.timestamp_desc = timestamp_description
if data_type:
self.data_type = data_type | Initializes an event.
Args:
timestamp (int): timestamp, which contains the number of microseconds
since January 1, 1970, 00:00:00 UTC.
timestamp_description (str): description of the meaning of the timestamp
value.
data_type (Optional[str]): event data type. If the data type is not set
it is derived from the DATA_TYPE class attribute. | juraj-google-style |
def bullet_base_pose_to_world_pose(self, pose_in_base):
pose_in_base = T.pose2mat(pose_in_base)
base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
pose_in_world = T.pose_in_A_to_pose_in_B(pose_A=pose_in_base, pose_A_in_B=base_pose_in_world)
return T.mat2pose(pose_in_world) | Convert a pose in the base frame to a pose in the world frame.
Args:
pose_in_base: a (pos, orn) tuple.
Returns:
pose_in world: a (pos, orn) tuple. | codesearchnet |
def _TensorArrayReadGrad(op: ops.Operation, grad):
handle = op.inputs[0]
index = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr('dtype')
grad_source = _GetGradSource(grad)
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)
w_g = g.write(index, grad)
return [None, None, w_g.flow] | Gradient for TensorArrayRead.
Args:
op: Forward TensorArrayRead op.
grad: Gradient `Tensor` to TensorArrayRead.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`. | github-repos |
def time_range_to_frame_range(self, start, end, sr):
start_sample = seconds_to_sample(start, sr)
end_sample = seconds_to_sample(end, sr)
return self.sample_to_frame_range(start_sample)[0], self.sample_to_frame_range(end_sample - 1)[1] | Calculate the frames containing samples from the given time range in seconds.
Args:
start (float): Start time in seconds.
end (float): End time in seconds.
sr (int): The sampling rate to use for time-to-sample conversion.
Returns:
tuple: A tuple containing the start and end (exclusive) frame indices. | juraj-google-style |
def get_last_release_time(name, paths=None):
entries = _get_families(name, paths)
max_time = 0
for (repo, family_resource) in entries:
time_ = repo.get_last_release_time(family_resource)
if (time_ == 0):
return 0
max_time = max(max_time, time_)
return max_time | Returns the most recent time this package was released.
Note that releasing a variant into an already-released package is also
considered a package release.
Returns:
int: Epoch time of last package release, or zero if this cannot be
determined. | codesearchnet |
def _add_individual(self, ind_obj):
logger.debug("Adding individual {0} to plugin".format(ind_obj.ind_id))
self.individual_objs.append(ind_obj) | Add a individual to the adapter
Args:
ind_obj (puzzle.models.Individual) | juraj-google-style |
async def verify_docker_worker_task(chain, link):
if (chain != link):
check_interactive_docker_worker(link)
verify_docker_image_sha(chain, link) | Docker-worker specific checks.
Args:
chain (ChainOfTrust): the chain we're operating on
link (ChainOfTrust or LinkOfTrust): the trust object for the signing task.
Raises:
CoTError: on failure. | codesearchnet |
def _read(**kwargs):
pd_obj = BaseFactory.read_csv(**kwargs)
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
reader = pd_obj.read
pd_obj.read = lambda *args, **kwargs: DataFrame(
query_compiler=reader(*args, **kwargs)
)
return pd_obj
return DataFrame(query_compiler=pd_obj) | Read csv file from local disk.
Args:
filepath_or_buffer:
The filepath of the csv file.
We only support local files for now.
kwargs: Keyword arguments in pandas.read_csv | juraj-google-style |
def _set_value_test(self, filler_pipeline_key, value):
self.filled = True
self._filler_pipeline_key = filler_pipeline_key
self._fill_datetime = datetime.datetime.utcnow()
self._value = json.loads(json.dumps(
value, cls=mr_util.JsonEncoder), cls=mr_util.JsonDecoder) | Sets the value of this slot for use in testing.
Args:
filler_pipeline_key: The db.Key of the _PipelineRecord that filled
this slot.
value: The serializable value set for this slot. | juraj-google-style |
def get_label_set(self, type_str=None):
return {v.label_str for v in self.node_gen if (type_str in (None, v.type_str))} | Get a set of label_str for the tree rooted at this node.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
set: The labels of the nodes leading up to this node from the root. | codesearchnet |
def set_data(self, data):
for name in self._fields:
setattr(self, name, data.get(name))
return self | Fills form with data
Args:
data (dict): Data to assign form fields.
Returns:
Self. Form object. | codesearchnet |
def _convert_json(obj):
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val) for (key, val) in six.iteritems(obj)}
elif (isinstance(obj, list) and (len(obj) == 2)):
first = obj[0]
second = obj[1]
if ((first == 'set') and isinstance(second, list)):
return [_convert_json(elem) for elem in second]
elif ((first == 'map') and isinstance(second, list)):
for elem in second:
if ((not isinstance(elem, list)) or (len(elem) != 2)):
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj | Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object. | codesearchnet |
def filter_keys_by_dataset_id(did, key_container):
keys = iter(key_container)
for key in DATASET_KEYS:
if getattr(did, key) is not None:
if key == "wavelength":
keys = [k for k in keys
if (getattr(k, key) is not None and
DatasetID.wavelength_match(getattr(k, key),
getattr(did, key)))]
else:
keys = [k for k in keys
if getattr(k, key) is not None and getattr(k, key)
== getattr(did, key)]
return keys | Filer provided key iterable by the provided `DatasetID`.
Note: The `modifiers` attribute of `did` should be `None` to allow for
**any** modifier in the results.
Args:
did (DatasetID): Query parameters to match in the `key_container`.
key_container (iterable): Set, list, tuple, or dict of `DatasetID`
keys.
Returns (list): List of keys matching the provided parameters in no
specific order. | juraj-google-style |
def _ParseRecordLogline(self, parser_mediator, structure):
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()
try:
datetime_iso8601 = self._GetISO8601String(structure.date_time)
date_time.CopyFromStringISO8601(datetime_iso8601)
except ValueError:
parser_mediator.ProduceExtractionWarning('invalid date time value: {0!s}'.format(structure.date_time))
return
event_data = GoogleDriveSyncLogEventData()
event_data.log_level = structure.log_level
event_data.pid = structure.pid
event_data.thread = structure.thread
event_data.source_code = structure.source_code
event_data.message = structure.message.replace('\n', ' ')
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a logline record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file. | codesearchnet |
def device_type_from_string(cl_device_type_str):
cl_device_type_str = cl_device_type_str.upper()
if hasattr(cl.device_type, cl_device_type_str):
return getattr(cl.device_type, cl_device_type_str)
return None | Converts values like ``gpu`` to a pyopencl device type string.
Supported values are: ``accelerator``, ``cpu``, ``custom``, ``gpu``. If ``all`` is given, None is returned.
Args:
cl_device_type_str (str): The string we want to convert to a device type.
Returns:
cl.device_type: the pyopencl device type. | codesearchnet |
def _get_node(self, loc_descriptor, create_non_existing_nodes=False):
node = self._root_node
for location in loc_descriptor.generate_all_sub_locations():
child = node.get_child_node_or_default(location, None)
if child is None:
if not create_non_existing_nodes:
raise RuntimeError("Node at location '%s' in '%s' does not exist!" % (location, loc_descriptor.to_string()))
else:
child = TreeMapNode(None)
node.set_child_node(location, child)
self._nbr_of_nodes += 1
node = child
return node | Get node corresponding to last location in a :class:`LocationDescriptor` object.
Args:
loc_descriptor: A :class:`LocationDescriptor` object
create_non_existing_nodes (bool): Do we create non existing nodes along the way (including last node)?
Raises:
RuntimeError if a node along the path given in by the :class:`LocationDescriptor` object does not exist
**if** ``create_non_existing_nodes`` is set to ``False``. | juraj-google-style |
def enable_nested_function_shape_inference(fn: _F) -> _F:
def wrapper(*args, **kwargs):
if flags.config().enable_nested_function_shape_inference.value():
return fn(*args, **kwargs)
flags.config().enable_nested_function_shape_inference.reset(True)
try:
return fn(*args, **kwargs)
finally:
flags.config().enable_nested_function_shape_inference.reset(False)
return wrapper | Decorator for enabling nested_function_shape_inference on a test.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will set nested_function_shape_inference,
reset the context, execute the test, then reset the context to the state
it was in prior to this test.
Example:
class MyTest(test.TestCase):
@enable_nested_function_shape_inference
def testFoo(self):
...
Args:
fn: the function to be wrapped.
Returns:
The wrapped function. | github-repos |
def stack_template_key_name(blueprint):
name = blueprint.name
return ('stack_templates/%s/%s-%s.json' % (blueprint.context.get_fqn(name), name, blueprint.version)) | Given a blueprint, produce an appropriate key name.
Args:
blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint
object to create the key from.
Returns:
string: Key name resulting from blueprint. | codesearchnet |
def expression(value):
if isinstance(value, Expression):
return Expression(value._type, value._value)
if hasattr(value, 'spl_json'):
sj = value.spl_json()
return Expression(sj['type'], sj['value'])
return Expression('splexpr', value) | Create an SPL expression.
Args:
value: Expression as a string or another `Expression`. If value is an instance of `Expression` then a new instance is returned containing the same type and value.
Returns:
Expression: SPL expression from `value`. | codesearchnet |
def weights_prepend_inputs_to_targets(labels):
past_first_zero = tf.cumsum(to_float(tf.equal(labels, 0)), axis=1)
nonzero = to_float(labels)
return to_float(tf.not_equal(past_first_zero * nonzero, 0)) | Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.