code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def build_backward_pass_step(get_transition_matrix_for_timestep):
def backward_pass_step(state,
filtered_parameters):
(filtered_mean, filtered_cov,
predicted_mean, predicted_cov) = filtered_parameters
transition_matrix = get_transition_matrix_for_timestep(state.timestep)
next_posterior_mean = state.backward_mean
next_posterior_cov = state.backward_cov
posterior_mean, posterior_cov = backward_smoothing_update(
filtered_mean,
filtered_cov,
predicted_mean,
predicted_cov,
next_posterior_mean,
next_posterior_cov,
transition_matrix)
return BackwardPassState(backward_mean=posterior_mean,
backward_cov=posterior_cov,
timestep=state.timestep-1)
return backward_pass_step | Build a callable that perform one step for backward smoothing.
Args:
get_transition_matrix_for_timestep: callable taking a timestep
as an integer `Tensor` argument, and returning a `LinearOperator`
of shape `[latent_size, latent_size]`.
Returns:
backward_pass_step: a callable that updates a BackwardPassState
from timestep `t` to `t-1`. | juraj-google-style |
def _validate(self):
for key in self:
if (key not in DEFAULTS):
raise exceptions.ConfigurationException('Unknown configuration key "{}"! Valid configuration keys are {}'.format(key, list(DEFAULTS.keys())))
validate_queues(self['queues'])
validate_bindings(self['bindings'])
validate_client_properties(self['client_properties']) | Perform checks on the configuration to assert its validity
Raises:
ConfigurationException: If the configuration is invalid. | codesearchnet |
def remove_empty_keys(values, remove=({}, None, [], 'null')):
if isinstance(values, dict):
return {key: remove_empty_keys(value, remove=remove)
for key, value in deepcopy(values).items() if value not in remove}
if isinstance(values, list):
return [remove_empty_keys(value, remove=remove)
for value in deepcopy(values) if value not in remove]
return values | Recursively remove key/value pairs where the value is in ``remove``.
This is targeted at comparing json-e rebuilt task definitions, since
json-e drops key/value pairs with empty values.
Args:
values (dict/list): the dict or list to remove empty keys from.
Returns:
values (dict/list): a dict or list copy, with empty keys removed. | juraj-google-style |
def aggregate(all_stats):
aggregate_stats = {'means': [], 'standard_deviations': []}
for optimizer_key in all_stats:
mean_stats = copy.deepcopy(all_stats[optimizer_key]['mean'])
mean_stats['name'] = optimizer_key
aggregate_stats['means'].append(mean_stats)
sd_stats = copy.deepcopy(all_stats[optimizer_key]['standard_deviation'])
sd_stats['name'] = optimizer_key
aggregate_stats['standard_deviations'].append(sd_stats)
_add_mean_sd_to_stats(aggregate_stats, 'means')
return aggregate_stats | Combine stats for multiple optimizers to obtain one mean and sd.
Useful for combining stats for the same optimizer class and multiple problems.
Args:
all_stats: dict; output from compare. | codesearchnet |
def isfunc(x):
return any([
inspect.isfunction(x) and not asyncio.iscoroutinefunction(x),
inspect.ismethod(x) and not asyncio.iscoroutinefunction(x)
]) | Returns `True` if the given value is a function or method object.
Arguments:
x (mixed): value to check.
Returns:
bool | juraj-google-style |
def JoinPath(self, path_segments):
path_segments = [
segment.split(self.PATH_SEPARATOR) for segment in path_segments]
path_segments = [
element for sublist in path_segments for element in sublist]
path_segments = list(filter(None, path_segments))
return '{0:s}{1:s}'.format(
self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments)) | Joins the path segments into a path.
Args:
path_segments (list[str]): path segments.
Returns:
str: joined path segments prefixed with the path separator. | juraj-google-style |
def __init__(self, string_table):
self._string_table = string_table
self._function_key_to_function = {} | Constructor.
Args:
string_table: A `StringTable` object. | github-repos |
def on_run_start(self, request): | Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens after the wrapper's run() call is entered,
after an increment of run call counter.
Args:
request: (`OnRunStartRequest`) callback request object carrying
information about the run call such as the fetches, feed dict, run
options, run metadata, and how many `run()` calls to this wrapper
session have occurred.
Returns:
An instance of `OnRunStartResponse`, carrying information to
debug URLs used to watch the tensors. | github-repos |
def _create_topk_unique(inputs, k):
height = inputs.shape[0]
width = inputs.shape[1]
neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32)
ones = tf.ones([height, width], dtype=tf.float32)
neg_inf_r2 = ones * neg_inf_r0
inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs)
tmp = inputs
topk_r2 = tf.zeros([height, k], dtype=tf.float32)
for i in range(k):
kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True)
k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0),
[height, 1])
topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2)
ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width]))
tmp = tf.where(ge_r2, neg_inf_r2, inputs)
log2_ceiling = int(math.ceil(math.log(float(int(width)), 2)))
next_power_of_two = 1 << log2_ceiling
count_mask = next_power_of_two - 1
mask_r0 = tf.constant(count_mask)
mask_r2 = tf.fill([height, k], mask_r0)
topk_r2_s32 = tf.bitcast(topk_r2, tf.int32)
topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2)
return topk_r2, topk_indices_r2 | Creates the top k values in sorted order with indices.
Args:
inputs: A tensor with rank of 2. [batch_size, original_size].
k: An integer, number of top elements to select.
Returns:
topk_r2: A tensor, the k largest elements. [batch_size, k].
topk_indices_r2: A tensor, indices of the top k values. [batch_size, k]. | juraj-google-style |
def GetArtifactDependencies(rdf_artifact, recursive=False, depth=1):
deps = set()
for source in rdf_artifact.sources:
if source.type in (rdf_artifacts.ArtifactSource.SourceType.ARTIFACT,
rdf_artifacts.ArtifactSource.SourceType.ARTIFACT_GROUP):
if source.attributes.GetItem("names"):
deps.update(source.attributes.GetItem("names"))
if depth > 10:
raise RuntimeError("Max artifact recursion depth reached.")
deps_set = set(deps)
if recursive:
for dep in deps:
artifact_obj = REGISTRY.GetArtifact(dep)
new_dep = GetArtifactDependencies(artifact_obj, True, depth=depth + 1)
if new_dep:
deps_set.update(new_dep)
return deps_set | Return a set of artifact dependencies.
Args:
rdf_artifact: RDF object artifact.
recursive: If True recurse into dependencies to find their dependencies.
depth: Used for limiting recursion depth.
Returns:
A set of strings containing the dependent artifact names.
Raises:
RuntimeError: If maximum recursion depth reached. | juraj-google-style |
def print_live_output(self):
if self.block:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
else:
while (self.thread.is_alive() or (self.old_output_size < len(self.output)) or (self.old_error_size < len(self.error))):
if ((self._stdout is not None) and (len(self.output) > self.old_output_size)):
while (self.old_output_size < len(self.output)):
self.logger.info(self.output[self.old_output_size])
self.old_output_size += 1
if ((self._stderr is not None) and (len(self.error) > self.old_error_size)):
while (self.old_error_size < len(self.error)):
self.logger.error(self.error[self.old_error_size])
self.old_error_size += 1 | Block and print the output of the command
Raises:
TypeError: If command is blocking | codesearchnet |
def execute_phase(self, phase):
repeat_count = 1
repeat_limit = (phase.options.repeat_limit or sys.maxsize)
while (not self._stopping.is_set()):
is_last_repeat = (repeat_count >= repeat_limit)
phase_execution_outcome = self._execute_phase_once(phase, is_last_repeat)
if (phase_execution_outcome.is_repeat and (not is_last_repeat)):
repeat_count += 1
continue
return phase_execution_outcome
return PhaseExecutionOutcome(None) | Executes a phase or skips it, yielding PhaseExecutionOutcome instances.
Args:
phase: Phase to execute.
Returns:
The final PhaseExecutionOutcome that wraps the phase return value
(or exception) of the final phase run. All intermediary results, if any,
are REPEAT and handled internally. Returning REPEAT here means the phase
hit its limit for repetitions. | codesearchnet |
def render_text(text, preformatted=False):
return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted)) | Return text formatted as a HTML
Args:
text: the text to render
preformatted: whether the text should be rendered as preformatted | codesearchnet |
def save_pretrained(self, save_directory: Union[str, os.PathLike], safe_serialization: bool=True, **kwargs):
use_auth_token = kwargs.pop('use_auth_token', None)
if use_auth_token is not None:
warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)
if kwargs.get('token', None) is not None:
raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')
kwargs['token'] = use_auth_token
if os.path.isfile(save_directory):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file')
return
os.makedirs(save_directory, exist_ok=True)
if hasattr(self, '_registered_impl'):
pipeline_info = self._registered_impl.copy()
custom_pipelines = {}
for task, info in pipeline_info.items():
if info['impl'] != self.__class__:
continue
info = info.copy()
module_name = info['impl'].__module__
last_module = module_name.split('.')[-1]
info['impl'] = f'{last_module}.{info['impl'].__name__}'
info['pt'] = tuple((c.__name__ for c in info['pt']))
info['tf'] = tuple((c.__name__ for c in info['tf']))
custom_pipelines[task] = info
self.model.config.custom_pipelines = custom_pipelines
custom_object_save(self, save_directory)
kwargs['safe_serialization'] = safe_serialization
self.model.save_pretrained(save_directory, **kwargs)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(save_directory, **kwargs)
if self.feature_extractor is not None:
self.feature_extractor.save_pretrained(save_directory, **kwargs)
if self.image_processor is not None:
self.image_processor.save_pretrained(save_directory, **kwargs)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory) | Save the pipeline's model and tokenizer.
Args:
save_directory (`str` or `os.PathLike`):
A path to the directory where to saved. It will be created if it doesn't exist.
safe_serialization (`str`):
Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow.
kwargs (`Dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. | github-repos |
def set_position_p(self, pvalue):
pvalue_msb = (int(pvalue) >> 8)
pvalue_lsb = (int(pvalue) & 255)
data = []
data.append(11)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KP_RAM)
data.append(BYTE2)
data.append(pvalue_lsb)
data.append(pvalue_msb)
send_data(data) | Set the P gain of the position PID
Args:
pvalue (int): P value | codesearchnet |
def _forward(self):
try:
self.current_token = next(self.tokens)
except StopIteration:
raise MissingTokensError(('Unexpected end of token stream at %d.' % self.current_pos))
self.current_pos += 1 | Advance to the next token.
Internal methods, updates:
- self.current_token
- self.current_pos
Raises:
MissingTokensError: when trying to advance beyond the end of the
token flow. | codesearchnet |
def build_image(registry, image):
if (':' in image['name']):
(_, tag) = image['name'].split(':', 1)
else:
(_, tag) = (image['name'], None)
values = {'registry': ('' if (registry is None) else (registry + '/')), 'image': image['name'], 'tag': tag}
if (tag is None):
args = ['-t {registry}{image}'.format(**values), '-t {registry}{image}:{version}'.format(version=versioning.current(), **values)]
else:
args = ['-t {registry}{image}'.format(**values)]
if ('file' in image):
args.append('-f {}'.format(conf.proj_path(image['file'])))
with conf.within_proj_dir(image.get('path', '.')):
log.info('Building <33>{registry}<35>/{image}', **values)
shell.run('docker build {args} .'.format(args=' '.join(args))) | Build docker image.
Args:
registry (str):
The name of the registry this image belongs to. If not given, the
resulting image will have a name without the registry.
image (dict[str, Any]):
The dict containing the information about the built image. This is
the same dictionary as defined in DOCKER_IMAGES variable. | codesearchnet |
def start(self, container, *args, **kwargs):
if (args or kwargs):
raise errors.DeprecatedMethod('Providing configuration in the start() method is no longer supported. Use the host_config param in create_container instead.')
url = self._url('/containers/{0}/start', container)
res = self._post(url)
self._raise_for_status(res) | Start a container. Similar to the ``docker start`` command, but
doesn't support attach options.
**Deprecation warning:** Passing configuration options in ``start`` is
no longer supported. Users are expected to provide host config options
in the ``host_config`` parameter of
:py:meth:`~ContainerApiMixin.create_container`.
Args:
container (str): The container to start
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
:py:class:`docker.errors.DeprecatedMethod`
If any argument besides ``container`` are provided.
Example:
>>> container = cli.create_container(
... image='busybox:latest',
... command='/bin/sleep 30')
>>> cli.start(container=container.get('Id')) | codesearchnet |
def get(self, name):
return self.prepare_model(self.client.api.inspect_image(name)) | Gets an image.
Args:
name (str): The name of the image.
Returns:
(:py:class:`Image`): The image.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error. | juraj-google-style |
def __new__(mcs, cls, bases, dct):
super_new = super(_Metaclass, mcs).__new__
parents = [b for b in bases if isinstance(b, _Metaclass)]
if not parents:
return super_new(mcs, cls, bases, dct)
new_attr = {}
_meta = dct.pop("Meta", type("Meta", (), {"setting_prefix": ""}))()
_meta.settings = {}
for name, setting in dct.items():
if isinstance(setting, Setting):
_meta.settings[name] = setting
if setting.name == "":
setting.name = name
if setting.prefix == "":
setting.prefix = _meta.setting_prefix
else:
new_attr[name] = setting
new_attr["_meta"] = _meta
new_attr["settings"] = _meta.settings
return super_new(mcs, cls, bases, new_attr) | New method.
Args:
cls (str): class name.
bases (tuple): base classes to inherit from.
dct (dict): class attributes.
Returns:
class: the new created class. | juraj-google-style |
def print_logs(redis_client, threads_stopped):
pubsub_client = redis_client.pubsub(ignore_subscribe_messages=True)
pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
num_consecutive_messages_received = 0
while True:
if threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if (msg is None):
num_consecutive_messages_received = 0
threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
data = json.loads(ray.utils.decode(msg['data']))
if (data['ip'] == localhost):
for line in data['lines']:
print('{}{}(pid={}){} {}'.format(colorama.Style.DIM, colorama.Fore.CYAN, data['pid'], colorama.Style.RESET_ALL, line))
else:
for line in data['lines']:
print('{}{}(pid={}, ip={}){} {}'.format(colorama.Style.DIM, colorama.Fore.CYAN, data['pid'], data['ip'], colorama.Style.RESET_ALL, line))
if (((num_consecutive_messages_received % 100) == 0) and (num_consecutive_messages_received > 0)):
logger.warning("The driver may not be able to keep up with the stdout/stderr of the workers. To avoid forwarding logs to the driver, use 'ray.init(log_to_driver=False)'.")
finally:
pubsub_client.close() | Prints log messages from workers on all of the nodes.
Args:
redis_client: A client to the primary Redis shard.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit. | codesearchnet |
def filterfalse_items(item_list, flag_list):
assert (len(item_list) == len(flag_list))
filtered_items = list(util_iter.ifilterfalse_items(item_list, flag_list))
return filtered_items | Returns items in item list where the corresponding item in flag list is true
Args:
item_list (list): list of items
flag_list (list): list of truthy values
Returns:
filtered_items : items where the corresponding flag was truthy
SeeAlso:
util_iter.ifilterfalse_items | codesearchnet |
def __get_labels(self):
labels = []
try:
with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), 'r') as file_desc:
for line in file_desc.readlines():
line = line.strip()
(label_name, label_color) = line.split(',', 1)
labels.append(Label(name=label_name, color=label_color))
except IOError:
pass
return labels | Read the label file of the documents and extract all the labels
Returns:
An array of labels.Label objects | codesearchnet |
def apply_actions(self, actions):
modified = []
for a in actions:
if "dict" in a:
k = a["dict"]
modified.append(k)
self.feffinp[k] = self.modify_object(a["action"], self.feffinp[k])
elif "file" in a:
self.modify(a["action"], a["file"])
else:
raise ValueError("Unrecognized format: {}".format(a))
if modified:
feff = self.feffinp
feff_input = "\n\n".join(str(feff[k]) for k in
["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"]
if k in feff)
for k, v in six.iteritems(feff):
with open(os.path.join('.', k), "w") as f:
f.write(str(v))
with open(os.path.join('.', "feff.inp"), "w") as f:
f.write(feff_input) | Applies a list of actions to the FEFF Input Set and rewrites modified
files.
Args:
actions [dict]: A list of actions of the form {'file': filename,
'action': moddermodification} or {'dict': feffinput_key,
'action': moddermodification} | juraj-google-style |
def _ExportEvent(self, output_module, event, deduplicate_events=True):
if (event.timestamp != self._export_event_timestamp):
self._FlushExportBuffer(output_module, deduplicate_events=deduplicate_events)
self._export_event_timestamp = event.timestamp
self._export_event_heap.PushEvent(event) | Exports an event using an output module.
Args:
output_module (OutputModule): output module.
event (EventObject): event.
deduplicate_events (Optional[bool]): True if events should be
deduplicated. | codesearchnet |
def AssertDictType(dct, expected_key_type, expected_value_type):
AssertType(dct, dict)
for key, value in iteritems(dct):
AssertType(key, expected_key_type)
AssertType(value, expected_value_type) | Ensures that given dictionary is actually a dictionary of specified type.
Args:
dct: A dictionary to assert the type for.
expected_key_type: An expected type for dictionary keys.
expected_value_type: An expected type for dictionary values.
Raises:
TypeError: If given dictionary is not really a dictionary or not all its
keys and values have the expected type. | juraj-google-style |
def SetCredential(self, path_spec, identifier, data):
supported_credentials = manager.CredentialsManager.GetCredentials(path_spec)
if (identifier not in supported_credentials.CREDENTIALS):
raise KeyError('Unsuppored credential: {0:s} for path specification type: {1:s}'.format(identifier, path_spec.type_indicator))
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
credentials[identifier] = data
self._credentials_per_path_spec[path_spec.comparable] = credentials | Sets a specific credential for the path specification.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
data (object): credential data.
Raises:
KeyError: if the credential is not supported by the path specification
type. | codesearchnet |
def MakeTokenRegex(meta_left, meta_right):
key = meta_left, meta_right
if key not in _token_re_cache:
_token_re_cache[key] = re.compile(
r'(' +
re.escape(meta_left) +
r'\S.*?' +
re.escape(meta_right) +
r')')
return _token_re_cache[key] | Return a (compiled) regular expression for tokenization.
Args:
meta_left, meta_right: e.g. '{' and '}'
- The regular expressions are memoized.
- This function is public so the syntax highlighter can use it. | juraj-google-style |
def get_resize_output_image_size(video, resolution_max_side: int) -> tuple[int, int]:
height, width = video.size()[-2:]
resolution_max_side = min(MAX_IMAGE_SIZE, resolution_max_side)
resolution_max_side = max(height, width) if resolution_max_side is None else resolution_max_side
aspect_ratio = width / height
if width >= height:
width = resolution_max_side
height = int(width / aspect_ratio)
if height % 2 != 0:
height += 1
elif height > width:
height = resolution_max_side
width = int(height * aspect_ratio)
if width % 2 != 0:
width += 1
height = max(height, 1)
width = max(width, 1)
return (height, width) | Get the output size of the video after resizing given a dictionary specifying the max and min sizes.
Args:
video (`np.ndarray`):
Video to resize.
resolution_max_side (`int`):
The longest edge of the video will be resized to this value. The shortest edge will be resized to keep the
input aspect ratio.
Returns:
The output size of the video after resizing. | github-repos |
def _convert_scipy_sparse_tensor(value, expected_input):
if issparse is not None and issparse(value):
if backend.is_sparse(expected_input):
sparse_coo = value.tocoo()
row, col = (sparse_coo.row, sparse_coo.col)
data, shape = (sparse_coo.data, sparse_coo.shape)
indices = np.concatenate((np.expand_dims(row, 1), np.expand_dims(col, 1)), 1)
return sparse_tensor.SparseTensor(indices, data, shape)
else:
if ops.executing_eagerly_outside_functions():
raise ValueError('A SciPy sparse matrix was passed to a model that expects dense inputs. Please densify your inputs first, such as by calling `x.toarray().')
return value.toarray()
else:
return value | Handle scipy sparse tensor conversions.
This method takes a value 'value' and returns the proper conversion. If
value is a scipy sparse tensor and the expected input is a dense tensor,
we densify 'value'. If value is a scipy sparse tensor and the expected input
is a TF SparseTensor, we convert 'value' to a SparseTensor. If 'value' is
not a scipy sparse tensor, or scipy is not imported, we pass it through
unchanged.
Args:
value: An object that may be a scipy sparse tensor
expected_input: The expected input placeholder.
Returns:
The possibly-converted 'value'. | github-repos |
def GetPrototype(self, descriptor):
if descriptor.full_name not in self._classes:
descriptor_name = descriptor.name
if str is bytes:
descriptor_name = descriptor.name.encode('ascii', 'ignore')
result_class = reflection.GeneratedProtocolMessageType(
descriptor_name,
(message.Message,),
{'DESCRIPTOR': descriptor, '__module__': None})
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
for extension in result_class.DESCRIPTOR.extensions:
if extension.containing_type.full_name not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return self._classes[descriptor.full_name] | Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor. | juraj-google-style |
def _GetMessage(self, message_file_key, lcid, message_identifier):
table_name = 'message_table_{0:d}_0x{1:08x}'.format(message_file_key, lcid)
has_table = self._database_file.HasTable(table_name)
if (not has_table):
return None
column_names = ['message_string']
condition = 'message_identifier == "0x{0:08x}"'.format(message_identifier)
values = list(self._database_file.GetValues([table_name], column_names, condition))
number_of_values = len(values)
if (number_of_values == 0):
return None
if (number_of_values == 1):
return values[0]['message_string']
raise RuntimeError('More than one value found in database.') | Retrieves a specific message from a specific message table.
Args:
message_file_key (int): message file key.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
Raises:
RuntimeError: if more than one value is found in the database. | codesearchnet |
def normalize_width(layer):
instructions = [instruction for instruction in filter(lambda x: x is not None, layer)]
longest = max([instruction.length for instruction in instructions])
for instruction in instructions:
instruction.layer_width = longest | When the elements of the layer have different widths, sets the width to the max elements.
Args:
layer (list): A list of elements. | juraj-google-style |
def image_summary(predictions, targets, hparams):
del hparams
results = tf.cast(tf.argmax(predictions, axis=(- 1)), tf.uint8)
gold = tf.cast(targets, tf.uint8)
summary1 = tf.summary.image('prediction', results, max_outputs=2)
summary2 = tf.summary.image('data', gold, max_outputs=2)
summary = tf.summary.merge([summary1, summary2])
return (summary, tf.zeros_like(predictions)) | Reshapes predictions and passes it to tensorboard.
Args:
predictions : The predicted image (logits).
targets : The ground truth.
hparams: model hparams.
Returns:
summary_proto: containing the summary images.
weights: A Tensor of zeros of the same shape as predictions. | codesearchnet |
def get_multi(cls, blob_keys, **ctx_options):
futs = cls.get_multi_async(blob_keys, **ctx_options)
return [fut.get_result() for fut in futs] | Multi-key version of get().
Args:
blob_keys: A list of blob keys.
**ctx_options: Context options for Model().get_by_id().
Returns:
A list whose items are each either a BlobInfo entity or None. | juraj-google-style |
def Read(self, file_object):
file_object.seek(self.last_read, os.SEEK_SET)
read_data = file_object.read(self._MAXIMUM_READ_SIZE)
self.last_read = file_object.get_offset()
compressed_data = b''.join([self._compressed_data, read_data])
(decompressed, extra_compressed) = self._decompressor.Decompress(compressed_data)
self._compressed_data = extra_compressed
self.uncompressed_offset += len(decompressed)
return decompressed | Reads the next uncompressed data from the gzip stream.
Args:
file_object (FileIO): file object that contains the compressed stream.
Returns:
bytes: next uncompressed data from the compressed stream. | codesearchnet |
def _StubMethod(self, stub, method_descriptor, rpc_controller, request, callback):
return stub.rpc_channel.CallMethod(method_descriptor, rpc_controller, request, method_descriptor.output_type._concrete_class, callback) | The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call). | codesearchnet |
def rCopy(d, f=identityConversion, discardNoneKeys=True, deepCopy=True):
if deepCopy:
d = copy.deepcopy(d)
newDict = {}
toCopy = [(k, v, newDict, ()) for (k, v) in d.iteritems()]
while (len(toCopy) > 0):
(k, v, d, prevKeys) = toCopy.pop()
prevKeys = (prevKeys + (k,))
if isinstance(v, dict):
d[k] = dict()
toCopy[0:0] = [(innerK, innerV, d[k], prevKeys) for (innerK, innerV) in v.iteritems()]
else:
newV = f(v, prevKeys)
if ((not discardNoneKeys) or (newV is not None)):
d[k] = newV
return newDict | Recursively copies a dict and returns the result.
Args:
d: The dict to copy.
f: A function to apply to values when copying that takes the value and the
list of keys from the root of the dict to the value and returns a value
for the new dict.
discardNoneKeys: If True, discard key-value pairs when f returns None for
the value.
deepCopy: If True, all values in returned dict are true copies (not the
same object).
Returns:
A new dict with keys and values from d replaced with the result of f. | codesearchnet |
def load_case(adapter, case_obj, update=False):
logger.info('Loading case {} into database'.format(case_obj['display_name']))
existing_case = adapter.case(case_obj['_id'])
if existing_case:
if update:
adapter.update_case(case_obj)
else:
raise IntegrityError("Case {0} already exists in database".format(case_obj['_id']))
else:
adapter.add_case(case_obj)
return case_obj | Load a case into the database
If the case already exists the function will exit.
If the user want to load a case that is already in the database
'update' has to be 'True'
Args:
adapter (MongoAdapter): connection to the database
case_obj (dict): case object to persist to the database
update(bool): If existing case should be updated
Returns:
case_obj(dict): A dictionary with the builded case | juraj-google-style |
def assert_parse_equals_golden(self, json_path: str, proto_path: str, proto_cls: Type[message.Message], *, parse_f: Callable[..., message.Message], json_delimiter: Optional[str]=None, proto_delimiter: Optional[str]=None, **parse_kwargs: Any) -> None:
testdata = self._read_json_and_protos(json_path, proto_path, proto_cls, json_delimiter=json_delimiter, proto_delimiter=proto_delimiter)
for json_str, proto in zip(testdata.json_strs, testdata.protos):
from_json = parse_f(json_str, proto_cls, **parse_kwargs)
self.assertEqual(from_json, proto) | Compare parser output against 'golden' file.
Note that we perform a comparison between protobuf representations.
If json_delimiter and proto_delimiter are supplied, the cardinality of the
resulting sequences must match exactly or an error will be thrown.
Args:
json_path: The filepath to the .json file (loaded as a 'test case').
proto_path: The filepath to the .prototxt file (loaded as a 'golden').
proto_cls: The type of protobuf message to parse into.
parse_f: The function responsible for parsing FHIR JSON to exmaine.
json_delimiter: An optional delimiter for the .json file to load multiple
representations. Defaults to None.
proto_delimiter: An optional delimiter for the .prototxt file to load
multiple representations. Defaults to None.
**parse_kwargs: Optional key/value arguments to supply to parse_f. | github-repos |
def skip_on_exceptions(self, exceptions: Sequence[Union[Type[Exception], Tuple[Exception, str]]]):
def skip_on_exception(unused_error):
error_stack = traceback.format_exc()
logging.warning('Skipping trial on unhandled exception: %s', error_stack)
self.skip(error_stack)
return utils.catch_errors(exceptions, skip_on_exception) | Returns a context manager to skip trial on user-specified exceptions.
Usages::
with feedback.skip_on_exceptions((ValueError, KeyError)):
...
with feedback.skip_on_exceptions(((ValueError, 'bad value for .*'),
(ValueError, '.* invalid range'),
TypeError)):
...
Args:
exceptions: A sequence of (exception type, or exception type plus regular
expression for error message).
Returns:
A context manager for skipping trials on user-specified exceptions. | github-repos |
def add_loss(self, loss, name=None, regularization=False, add_summaries=True):
_ = name
if regularization:
self._g.add_to_collection(GraphKeys.REGULARIZATION_LOSSES, loss)
tf.add_to_collection(GraphKeys.LOSSES, loss)
if add_summaries:
self.add_scalar_summary(loss, 'loss')
self.add_average_summary(loss, 'loss_average') | Append a loss to the total loss for the network.
Args:
loss: append this loss operation
name: The name for this loss, defaults to loss.op.name
regularization: Set to True if this is a regularization loss.
add_summaries: Set to True if you want to see scalar and average summary. | juraj-google-style |
def _pick_inserted_ops_moment_indices(operations: Sequence[ops.Operation], start: int=0, frontier: Dict[(ops.Qid, int)]=None) -> Tuple[(Sequence[int], Dict[(ops.Qid, int)])]:
if (frontier is None):
frontier = defaultdict((lambda : 0))
moment_indices = []
for op in operations:
op_start = max(start, max((frontier[q] for q in op.qubits)))
moment_indices.append(op_start)
for q in op.qubits:
frontier[q] = max(frontier[q], (op_start + 1))
return (moment_indices, frontier) | Greedily assigns operations to moments.
Args:
operations: The operations to assign to moments.
start: The first moment to consider assignment to.
frontier: The first moment to which an operation acting on a qubit
can be assigned. Updated in place as operations are assigned.
Returns:
The frontier giving the index of the moment after the last one to
which an operation that acts on each qubit is assigned. If a
frontier was specified as an argument, this is the same object. | codesearchnet |
def first(seq, key=(lambda x: bool(x)), default=None, apply=(lambda x: x)):
return next((apply(x) for x in seq if key(x)), (default() if callable(default) else default)) | Give the first value that satisfies the key test.
Args:
seq (iterable):
key (callable): test for each element of iterable
default: returned when all elements fail test
apply (callable): applied to element before return, but not to default value
Returns: first element in seq that passes key, mutated with optional apply
Examples:
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4 | codesearchnet |
def Deserialize(self, reader):
super(Header, self).Deserialize(reader)
if reader.ReadByte() != 0:
raise Exception('Incorrect Header Format') | Deserialize full object.
Args:
reader (neo.IO.BinaryReader): | juraj-google-style |
def double_sphere(cdata, sym):
nrows = cdata.shape[0]
ncols = cdata.shape[1]
ddata = np.zeros([nrows, ncols], dtype=np.complex128)
for n in xrange(0, nrows):
for m in xrange(0, ncols):
s = sym * cdata[np.mod(nrows - n, nrows),
np.mod(int(np.floor(ncols / 2)) + m, ncols)]
t = cdata[n, m]
if s * t == 0:
ddata[n, m] = s + t
else:
ddata[n, m] = (s + t) / 2
return ddata | Ensures that the data within cdata has double sphere symmetry.
Example::
>>> spherepy.doublesphere(cdata, 1)
Args:
sym (int): is 1 for scalar data and -1 for vector data
Returns:
numpy.array([*,*], dtype=np.complex128) containing array with
doublesphere symmetry. | juraj-google-style |
def inverse_stft_window_fn_inner(frame_length, dtype):
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_step_ = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step_.shape.assert_has_rank(0)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length
denom = array_ops.pad(denom, [(0, overlaps * frame_step_ - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step_])
denom = math_ops.reduce_sum(denom, 0, keepdims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step_])
return forward_window / denom[:frame_length] | Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar. | github-repos |
def __init__(self, instance, pretty=False, expand=StringFormatType.error):
self.instance = instance
self.pretty = pretty
self.expand = expand | Create a formatter.
Args:
instance: The object to format with.
pretty: If True, references to non-string attributes such as lists
are converted to basic form, with characters such as brackets
and parentheses removed.
expand: `StringFormatType`. | juraj-google-style |
def add_string_pairs_from_attributed_ui_element(results, ui_element, comment_prefix):
attributed_strings = ui_element.getElementsByTagName('attributedString')
if attributed_strings.length == 0:
return False
attributed_element = attributed_strings[0]
fragment_index = 1
for fragment in attributed_element.getElementsByTagName('fragment'):
try:
label_entry_key = fragment.attributes['content'].value
except KeyError:
label_entry_key = fragment.getElementsByTagName('string')[0].firstChild.nodeValue
comment = "%s Part %d" % (comment_prefix, fragment_index)
results.append((label_entry_key, comment))
fragment_index += 1
return fragment_index > 1 | Adds string pairs from a UI element with attributed text
Args:
results (list): The list to add the results to.
attributed_element (element): The element from the xib that contains, to extract the fragments from.
comment_prefix (str): The prefix of the comment to use for extracted string
(will be appended "Part X" suffices)
Returns:
bool: Whether or not an attributed string was found. | juraj-google-style |
def cos_distance(t1, t2, epsilon=1e-12, name=None):
with tf.name_scope(name, 'cos_distance', [t1, t2]) as scope:
t1 = tf.convert_to_tensor(t1, name='t1')
t2 = tf.convert_to_tensor(t2, name='t2')
x_inv_norm = tf.rsqrt(tf.maximum(length_squared(t1) * length_squared(t2),
epsilon))
return tf.subtract(1.0, dot_product(t1, t2) * x_inv_norm, name=scope) | Cos distance between t1 and t2 and caps the gradient of the Square Root.
Args:
t1: A tensor
t2: A tensor that can be multiplied by t1.
epsilon: A lower bound value for the distance. The square root is used as
the normalizer.
name: Optional name for this op.
Returns:
The cos distance between t1 and t2. | juraj-google-style |
def rating(self, **kwargs):
path = self._get_id_path('rating')
payload = {
'value': kwargs.pop('value', None),
}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response | This method lets users rate a movie. A valid session id or guest
session id is required.
Args:
session_id: see Authentication.
guest_session_id: see Authentication.
value: Rating value.
Returns:
A dict representation of the JSON returned from the API. | juraj-google-style |
def profile_match(adapter, profiles, hard_threshold=0.95, soft_threshold=0.9):
matches = {sample: [] for sample in profiles.keys()}
for case in adapter.cases():
for individual in case['individuals']:
for sample in profiles.keys():
if individual.get('profile'):
similarity = compare_profiles(profiles[sample], individual['profile'])
if (similarity >= hard_threshold):
msg = f"individual {sample} has a {similarity} similarity with individual {individual['ind_id']} in case {case['case_id']}"
LOG.critical(msg)
raise ProfileError
if (similarity >= soft_threshold):
match = f"{case['case_id']}.{individual['ind_id']}"
matches[sample].append(match)
return matches | given a dict of profiles, searches through all the samples in the DB
for a match. If a matching sample is found an exception is raised,
and the variants will not be loaded into the database.
Args:
adapter (MongoAdapter): Adapter to mongodb
profiles (dict(str)): The profiles (given as strings) for each sample in vcf.
hard_threshold(float): Rejects load if hamming distance above this is found
soft_threshold(float): Stores similar samples if hamming distance above this is found
Returns:
matches(dict(list)): list of similar samples for each sample in vcf. | codesearchnet |
def get(self, url, params=None, **kwargs):
check_type(url, basestring, may_be_none=False)
check_type(params, dict)
erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['GET'])
response = self.request('GET', url, erc, params=params, **kwargs)
return extract_and_parse_json(response) | Sends a GET request.
Args:
url(basestring): The URL of the API endpoint.
params(dict): The parameters for the HTTP GET request.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint. | juraj-google-style |
def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1, train_global_model=False):
self._use_stored_folds = use_stored_folds
self._n_folds = n_folds
self._n_rows = X_feat.shape[0]
self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds)
cv_obj = {}
if (id_vec is None):
id_vec = np.arange(1, (self._n_rows + 1))
best_val_acc_epoch_l = []
for (fold, train, test) in self._kf:
X_feat_train = X_feat[train]
X_seq_train = X_seq[train]
y_train = y[train]
X_feat_test = X_feat[test]
X_seq_test = X_seq[test]
y_test = y[test]
id_vec_test = id_vec[test]
print(fold, '/', n_folds)
dc = copy.deepcopy(self._concise_model)
dc.train(X_feat_train, X_seq_train, y_train, X_feat_test, X_seq_test, y_test, n_cores=n_cores)
dc._test(X_feat_test, X_seq_test, y_test, id_vec_test)
cv_obj[fold] = dc
best_val_acc_epoch_l.append(dc.get_accuracy()['best_val_acc_epoch'])
self._cv_model = cv_obj
if train_global_model:
dc = copy.deepcopy(self._concise_model)
dc._param['n_epochs'] = int(np.array(best_val_acc_epoch_l).mean())
print(('tranining global model with n_epochs = ' + str(dc._param['n_epochs'])))
dc.train(X_feat, X_seq, y, n_cores=n_cores)
dc._test(X_feat, X_seq, y, id_vec)
self._concise_global_model = dc | Train the Concise model in cross-validation.
Args:
X_feat: See :py:func:`concise.Concise.train`
X_seq: See :py:func:`concise.Concise.train`
y: See :py:func:`concise.Concise.train`
id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`.
n_folds (int): Number of CV-folds to use.
use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated.
n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored.
train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`). | codesearchnet |
def _DeepCopy(self, obj):
precondition.AssertType(obj, rdfvalue.RDFValue)
return obj.__class__.FromSerializedString(obj.SerializeToString()) | Creates an object copy by serializing/deserializing it.
RDFStruct.Copy() doesn't deep-copy repeated fields which may lead to
hard to catch bugs.
Args:
obj: RDFValue to be copied.
Returns:
A deep copy of the passed RDFValue. | juraj-google-style |
def recipe_smartsheet_report_to_bigquery(config, auth_read, auth_write, token, report, dataset, table, schema):
smartsheet(config, {'auth': auth_read, 'token': token, 'report': report, 'out': {'bigquery': {'auth': auth_write, 'dataset': dataset, 'table': table, 'schema': schema}}}) | Move report data into a BigQuery table.
Args:
auth_read (authentication) - Credentials used for reading data.
auth_write (authentication) - Credentials used for writing data.
token (string) - Retrieve from SmartSheet account settings.
report (string) - Retrieve from report properties.
dataset (string) - Existing BigQuery dataset.
table (string) - Table to create from this report.
schema (json) - Schema provided in JSON list format or leave empty to auto detect. | github-repos |
def create_doc_id_from_json(doc) -> str:
return hashlib.sha256(json.dumps(doc, sort_keys=True).encode('utf-8')).hexdigest() | Docs with identical contents get the same ID.
Args:
doc:
Returns: a string with the hash of the given document. | juraj-google-style |
def exp(vector):
weld_type = None
if isinstance(vector, LazyOpResult):
weld_type = vector.weld_type
vector = vector.expr
elif isinstance(vector, np.ndarray):
weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[
str(vector.dtype)]
return NumpyArrayWeld(numpy_weld_impl.exp(vector, weld_type), WeldDouble()) | Computes a per-element exponent of the passed-in vector.
Args:
vector (TYPE): Description | juraj-google-style |
def savedata(self, output, location=None):
output.persist = True
if location:
output.persist_location = location | Save output data from any task in this workflow to S3
Args:
output: Reference task output (e.g. task.outputs.output1).
location (optional): Subfolder under which the output will be saved.
It will be placed under the account directory in gbd-customer-data bucket:
s3://gbd-customer-data/{account_id}/{location}
Leave blank to save to: workflow_output/{workflow_id}/{task_name}/{port_name}
Returns:
None | codesearchnet |
def _TerminateProcessByPid(self, pid):
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
self._TerminateProcess(process)
self._StopMonitoringProcess(process) | Terminate a process that's monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with and monitored by the
engine. | juraj-google-style |
def should_trigger_for_step(self, step):
if self._last_triggered_step is None:
return True
if self._last_triggered_step == step:
return False
if self._every_secs is not None:
if time.time() >= self._last_triggered_time + self._every_secs:
return True
if self._every_steps is not None:
if step >= self._last_triggered_step + self._every_steps:
return True
return False | Return true if the timer should trigger for the specified step.
Args:
step: Training step to trigger on.
Returns:
True if the difference between the current time and the time of the last
trigger exceeds `every_secs`, or if the difference between the current
step and the last triggered step exceeds `every_steps`. False otherwise. | github-repos |
def _RegisterFlagByModule(self, module_name, flag):
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag) | Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module. | juraj-google-style |
def attribute(*args, **kw):
return operator(*args, kind=Operator.Type.ATTRIBUTE, **kw) | Registers a new attribute only operator function in the test engine.
Arguments:
*args: variadic arguments.
**kw: variadic keyword arguments.
Returns:
function | codesearchnet |
def registration_info_request(self, registration_id):
return self.requests_session.get((self.INFO_END_POINT + registration_id), params={'details': 'true'}) | Makes a request for registration info and returns the response object
Args:
registration_id: id to be checked
Returns:
response of registration info request | codesearchnet |
def read_frame(self):
(ret, frame) = self.capture.read()
if (not ret):
self.event_source.stop()
try:
self.capture.release()
except AttributeError:
pass
return None
if ((self.convert_color != (- 1)) and is_color_image(frame)):
return cv2.cvtColor(frame, self.convert_color)
return frame | Reads a frame and converts the color if needed.
In case no frame is available, i.e. self.capture.read() returns False
as the first return value, the event_source of the TimedAnimation is
stopped, and if possible the capture source released.
Returns:
None if stopped, otherwise the color converted source image. | codesearchnet |
def plot_scatter_matrix(self, freq=None, title=None,
figsize=(10, 10), **kwargs):
if title is None:
title = self._get_default_plot_title(
freq, 'Return Scatter Matrix')
plt.figure()
ser = self._get_series(freq).to_returns().dropna()
pd.scatter_matrix(ser, figsize=figsize, **kwargs)
return plt.suptitle(title) | Wrapper around pandas' scatter_matrix.
Args:
* freq (str): Data frequency used for display purposes.
Refer to pandas docs for valid freq strings.
* figsize ((x,y)): figure size
* title (str): Title if default not appropriate
* kwargs: passed to pandas' scatter_matrix method | juraj-google-style |
def CopyToStatTimeTuple(self):
normalized_timestamp = self._GetNormalizedTimestamp()
if (normalized_timestamp is None):
return (None, None)
if (self._precision in (definitions.PRECISION_1_NANOSECOND, definitions.PRECISION_100_NANOSECONDS, definitions.PRECISION_1_MICROSECOND, definitions.PRECISION_1_MILLISECOND, definitions.PRECISION_100_MILLISECONDS)):
remainder = int(((normalized_timestamp % 1) * self._100NS_PER_SECOND))
return (int(normalized_timestamp), remainder)
return (int(normalized_timestamp), None) | Copies the date time value to a stat timestamp tuple.
Returns:
tuple[int, int]: a POSIX timestamp in seconds and the remainder in
100 nano seconds or (None, None) on error. | codesearchnet |
def get_logging_metric_hook(benchmark_log_dir=None,
tensors_to_log=None,
every_n_secs=600,
**kwargs):
if benchmark_log_dir is None:
raise ValueError("metric_log_dir should be provided to use metric logger")
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return metric_hook.LoggingMetricHook(
tensors=tensors_to_log,
log_dir=benchmark_log_dir,
every_n_secs=every_n_secs) | Function to get LoggingMetricHook.
Args:
benchmark_log_dir: `string`, directory path to save the metric log.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
every_n_secs: `int`, the frequency for logging the metric. Default to every
10 mins.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing. | juraj-google-style |
def compose_args(self, action_name, in_argdict):
for action in self.actions:
if (action.name == action_name):
break
else:
raise AttributeError('Unknown Action: {0}'.format(action_name))
unexpected = (set(in_argdict) - set((argument.name for argument in action.in_args)))
if unexpected:
raise ValueError("Unexpected argument '{0}'. Method signature: {1}".format(next(iter(unexpected)), str(action)))
composed = []
for argument in action.in_args:
name = argument.name
if (name in in_argdict):
composed.append((name, in_argdict[name]))
continue
if (name in self.DEFAULT_ARGS):
composed.append((name, self.DEFAULT_ARGS[name]))
continue
if (argument.vartype.default is not None):
composed.append((name, argument.vartype.default))
raise ValueError("Missing argument '{0}'. Method signature: {1}".format(argument.name, str(action)))
return composed | Compose the argument list from an argument dictionary, with
respect for default values.
Args:
action_name (str): The name of the action to be performed.
in_argdict (dict): Arguments as a dict, eg
``{'InstanceID': 0, 'Speed': 1}. The values
can be a string or something with a string representation.
Returns:
list: a list of ``(name, value)`` tuples.
Raises:
`AttributeError`: If this service does not support the action.
`ValueError`: If the argument lists do not match the action
signature. | codesearchnet |
def geosearch(self, latitude=None, longitude=None, radius=1000, title=None, auto_suggest=True, results=10):
def test_lat_long(val):
' handle testing lat and long '
if (not isinstance(val, Decimal)):
error = 'Latitude and Longitude must be specified either as a Decimal or in formats that can be coerced into a Decimal.'
try:
return Decimal(val)
except (DecimalException, TypeError):
raise ValueError(error)
return val
params = {'list': 'geosearch', 'gsradius': radius, 'gslimit': results}
if (title is not None):
if auto_suggest:
title = self.suggest(title)
params['gspage'] = title
else:
lat = test_lat_long(latitude)
lon = test_lat_long(longitude)
params['gscoord'] = '{0}|{1}'.format(lat, lon)
raw_results = self.wiki_request(params)
self._check_error_response(raw_results, title)
return [d['title'] for d in raw_results['query']['geosearch']] | Search for pages that relate to the provided geocoords or near
the page
Args:
latitude (Decimal or None): Latitude geocoord; must be \
coercable to decimal
longitude (Decimal or None): Longitude geocoord; must be \
coercable to decimal
radius (int): Radius around page or geocoords to pull back; \
in meters
title (str): Page title to use as a geocoordinate; this has \
precedence over lat/long
auto_suggest (bool): Auto-suggest the page title
results (int): Number of pages within the radius to return
Returns:
list: A listing of page titles
Raises:
ValueError: If either the passed latitutde or longitude are \
not coercable to a Decimal | codesearchnet |
def download_as_obj(
base_url=d1_common.const.URL_DATAONE_ROOT,
timeout_sec=d1_common.const.DEFAULT_HTTP_TIMEOUT,
):
return decode_der(download_as_der(base_url, timeout_sec)) | Download public certificate from a TLS/SSL web server as Certificate object.
Also see download_as_der().
Args:
base_url : str
A full URL to a DataONE service endpoint or a server hostname
timeout_sec : int or float
Timeout for the SSL socket operations
Returns:
cryptography.Certificate | juraj-google-style |
def _reload_config(self, reload_original_config):
if reload_original_config:
self.original_config = self.running_config
self.original_config.set_name('original')
paths = self.running_config.get_paths()
self.running_config = FortiConfig('running', vdom=self.vdom)
for path in paths:
self.load_config(path, empty_candidate=True) | This command will update the running config from the live device.
Args:
* reload_original_config:
* If ``True`` the original config will be loaded with the running config before reloading the\
original config.
* If ``False`` the original config will remain untouched. | juraj-google-style |
def get_list(self, key, pipeline=False):
if pipeline:
return self._pipeline.lrange(key, 0, (- 1))
return self._db.lrange(key, 0, (- 1)) | Get all the value in the list stored at key.
Args:
key (str): Key where the list is stored.
pipeline (bool): True, start a transaction block. Default false.
Returns:
list: values in the list ordered by list index | codesearchnet |
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = ops.convert_to_tensor(y_true, dtype=self._dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype=self._dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
if not self._built:
self._build(y_true.shape, y_pred.shape)
if sample_weight is None:
sample_weight = 1
sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype)
if len(sample_weight.shape) == 1:
sample_weight = ops.expand_dims(sample_weight, axis=1)
sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true))
weighted_y_true = y_true * ops.cast(sample_weight, y_true.dtype)
self.sum.assign(self.sum + ops.sum(weighted_y_true, axis=0))
self.squared_sum.assign(self.squared_sum + ops.sum(y_true * weighted_y_true, axis=0))
self.total_mse.assign(self.total_mse + ops.sum((y_true - y_pred) ** 2 * ops.cast(sample_weight, y_true.dtype), axis=0))
self.count.assign(self.count + ops.sum(sample_weight, axis=0))
self.num_samples.assign(self.num_samples + ops.size(y_true)) | Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
Defaults to `1`.
Returns:
Update op. | github-repos |
def _check_if_fenced(self, name):
if name in object.__getattribute__(self, '_attributes_to_fence'):
raise TranspilerAccessError("The fenced %s has the property %s protected" %
(type(object.__getattribute__(self, '_wrapped')), name)) | Checks if the attribute name is in the list of attributes to protect. If so, raises
TranspilerAccessError.
Args:
name (string): the attribute name to check
Raises:
TranspilerAccessError: when name is the list of attributes to protect. | juraj-google-style |
def __init__(self, jids, _id = None):
super(GetStatusesIqProtocolEntity, self).__init__(self.__class__.XMLNS, _id, _type = "get", to = YowConstants.WHATSAPP_SERVER)
self.setGetStatusesProps(jids) | Request the statuses of users. Should be sent once after login.
Args:
- jids: A list of jids representing the users whose statuses you are
trying to get. | juraj-google-style |
def generate_version(max_major: int=1, max_minor: int=7, max_patch: int=15) -> str:
major = randint(0, max_major)
minor = randint(0, max_minor)
patch = randint(0, max_patch)
return '{:d}.{:d}.{:d}'.format(major, minor, patch) | Select a random version.
Args:
max_major (int, optional) maximum major version
max_minor (int, optional) maximum minor version
max_patch (int, optional) maximum patch version
Returns:
str, Version String | codesearchnet |
def _bind_length_scalar_handlers(tids, scalar_factory, lns=_NON_ZERO_LENGTH_LNS):
handler = partial(_length_scalar_handler, scalar_factory)
return _bind_length_handlers(tids, handler, lns) | Binds a set of scalar handlers for an inclusive range of low-nibble values.
Args:
tids (Sequence[int]): The Type IDs to bind to.
scalar_factory (Callable): The factory for the scalar parsing function.
This function can itself return a function representing a thunk to defer the
scalar parsing or a direct value.
lns (Sequence[int]): The low-nibble lengths to bind to. | juraj-google-style |
def CopyAttributesFromSessionCompletion(self, session_completion):
if (self.identifier != session_completion.identifier):
raise ValueError('Session identifier mismatch.')
self.aborted = session_completion.aborted
if session_completion.analysis_reports_counter:
self.analysis_reports_counter = session_completion.analysis_reports_counter
self.completion_time = session_completion.timestamp
if session_completion.event_labels_counter:
self.event_labels_counter = session_completion.event_labels_counter
if session_completion.parsers_counter:
self.parsers_counter = session_completion.parsers_counter | Copies attributes from a session completion.
Args:
session_completion (SessionCompletion): session completion attribute
container.
Raises:
ValueError: if the identifier of the session completion does not match
that of the session. | codesearchnet |
def fuzzy_index_match(possiblities, label, **kwargs):
possibilities = list(possiblities)
if isinstance(label, basestring):
return fuzzy_get(possibilities, label, **kwargs)
if isinstance(label, int):
return possibilities[label]
if isinstance(label, list):
return [fuzzy_get(possibilities, lbl) for lbl in label] | Find the closest matching column label, key, or integer indexed value
Returns:
type(label): sequence of immutable objects corresponding to best matches to each object in label
if label is an int returns the object (value) in the list of possibilities at that index
if label is a str returns the closest str match in possibilities
>>> from collections import OrderedDict as odict
>>> fuzzy_index_match(pd.DataFrame(pd.np.random.randn(9,4), columns=list('ABCD'), index=range(9)), 'b')
'B'
>>> fuzzy_index_match(odict(zip('12345','ABCDE')), 'r2d2')
'2'
>>> fuzzy_index_match(odict(zip('12345','ABCDE')), 1)
'2'
>>> fuzzy_index_match(odict(zip('12345','ABCDE')), -1)
'5'
>>> fuzzy_index_match(odict(zip(range(4),'FOUR')), -4)
0 | codesearchnet |
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
if (not pdb_file_type):
pdb_file_type = self.pdb_file_type
counter = 0
for g in tqdm(self.genes):
pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun)
if pdbs:
counter += len(pdbs)
log.info('Updated PDB metadata dataframe. See the "df_pdb_metadata" attribute for a summary dataframe.')
log.info('Saved {} structures total'.format(counter)) | Download ALL mapped experimental structures to each protein's structures directory.
Args:
outdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist | codesearchnet |
def _scrub_method_name(self, method_name):
if (method_name not in self._scrubbed_method_names):
self._scrubbed_method_names[method_name] = scrub_method_name(method_name)
return self._scrubbed_method_names[method_name] | Scrubs a method name, returning result from local cache if available.
This method wraps fitparse.utils.scrub_method_name and memoizes results,
as scrubbing a method name is expensive.
Args:
method_name: Method name to scrub.
Returns:
Scrubbed method name. | codesearchnet |
def __init__(self, scope, parent, name, result, paren=False):
CodeEntity.__init__(self, scope, parent)
self.name = name
self.result = result
self.parenthesis = paren | Constructor for expressions.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the expression in the program.
result (str): The return type of the expression in the program.
Kwargs:
paren (bool): Whether the expression is enclosed in parentheses. | juraj-google-style |
def wrap_embedded_keyvalue(self, data):
if data is not None:
try:
data = u'{}'.format(data)
except UnicodeEncodeError:
pass
variables = []
for v in re.finditer(self._vars_keyvalue_embedded, data):
variables.append(v.group(0))
for var in set(variables):
variable_string = re.search(self._variable_parse, var).group(0)
data = data.replace(var, '": "{}"'.format(variable_string))
return data | Wrap keyvalue embedded variable in double quotes.
Args:
data (string): The data with embedded variables.
Returns:
(string): Results retrieved from DB | juraj-google-style |
def _remove(self, removeList, selfValue):
for removeValue in removeList:
print(removeValue, removeList)
removeEverything(removeValue, selfValue) | Remove elements from a list by matching the elements in the other list.
This method only looks inside current instance's value, not recursive.
There is no need for a recursive one anyway.
Match by == operation.
Args:
removeList (list): The list of matching elements.
selfValue (list): The list you remove value from. Usually ``self.value`` | juraj-google-style |
def valid(self, value, include_name=True):
return super(Tree, self).valid(value, include_name and [self._name] or []) | Valid
Checks if a value is valid based on the instance's values
Arguments:
value {mixed} -- The value to validate
include_name {bool} -- If true, Tree's name will be prepended to
all error keys
Returns:
bool | juraj-google-style |
def position(self, partition):
if (not isinstance(partition, TopicPartition)):
raise TypeError('partition must be a TopicPartition namedtuple')
assert self._subscription.is_assigned(partition), 'Partition is not assigned'
offset = self._subscription.assignment[partition].position
if (offset is None):
self._update_fetch_positions([partition])
offset = self._subscription.assignment[partition].position
return offset | Get the offset of the next record that will be fetched
Arguments:
partition (TopicPartition): Partition to check
Returns:
int: Offset | codesearchnet |
def merge(self, other):
for attr in self.attrs:
if (not (getattr(other, attr, None) is None)):
setattr(self, attr, getattr(other, attr))
if other.raw:
if (not self.raw):
self.raw = {}
self.raw.update(other.raw) | Copy properties from other into self, skipping ``None`` values. Also merges the raw data.
Args:
other (SkypeObj): second object to copy fields from | codesearchnet |
def GetOutputClass(cls, name):
if not isinstance(name, py2to3.STRING_TYPES):
raise ValueError('Name attribute is not a string.')
name = name.lower()
if name not in cls._output_classes:
raise KeyError(
'Name: [{0:s}] not registered as an output module.'.format(name))
return cls._output_classes[name] | Retrieves the output class for a specific name.
Args:
name (str): name of the output module.
Returns:
type: output module class.
Raises:
KeyError: if there is no output class found with the supplied name.
ValueError: if name is not a string. | juraj-google-style |
def getexcfo(e):
tb = sys.exc_info()[2]
tbinfo = traceback.extract_tb(tb)
path, line, name, src = '', '', '', None
if tbinfo:
path, line, name, sorc = tbinfo[-1]
retd = {
'msg': str(e),
'file': path,
'line': line,
'name': name,
'src': src
}
if isinstance(e, s_exc.SynErr):
retd['syn:err'] = e.errinfo
return (e.__class__.__name__, retd) | Get an err tufo from an exception.
Args:
e (Exception): An Exception (or Exception subclass).
Notes:
This can be called outside of the context of an exception handler,
however details such as file, line, function name and source may be
missing.
Returns:
((str, dict)): | juraj-google-style |
def functional_from_config(cls, config, custom_objects=None):
created_layers = {}
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
args, kwargs = deserialize_node(node_data, created_layers)
layer(*args, **kwargs)
def process_layer(layer_data):
layer_name = layer_data['name']
if 'module' not in layer_data:
layer = saving_utils.model_from_config(layer_data, custom_objects=custom_objects)
else:
layer = serialization_lib.deserialize_keras_object(layer_data, custom_objects=custom_objects)
if not isinstance(layer, Operation):
raise ValueError(f'Unexpected object from deserialization, expected a layer or operation, got a {type(layer)}')
created_layers[layer_name] = layer
inbound_nodes_data = layer_data['inbound_nodes']
for node_data in inbound_nodes_data:
add_unprocessed_node(layer, node_data)
functional_config = {}
for key in ['layers', 'input_layers', 'output_layers']:
functional_config[key] = config.pop(key)
for key in ['name', 'trainable']:
if key in config:
functional_config[key] = config.pop(key)
else:
functional_config[key] = None
for layer_data in functional_config['layers']:
process_layer(layer_data)
while unprocessed_nodes:
for layer_data in functional_config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
node_data_list = unprocessed_nodes[layer]
node_index = 0
while node_index < len(node_data_list):
node_data = node_data_list[node_index]
try:
process_node(layer, node_data)
except IndexError:
break
node_index += 1
if node_index < len(node_data_list):
unprocessed_nodes[layer] = node_data_list[node_index:]
else:
del unprocessed_nodes[layer]
name = functional_config['name']
trainable = functional_config['trainable']
def get_tensor(layer_name, node_index, tensor_index):
assert layer_name in created_layers
layer = created_layers[layer_name]
if isinstance(layer, Functional):
node_index -= 1
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
return layer_output_tensors[tensor_index]
def map_tensors(tensors):
if isinstance(tensors, list) and len(tensors) == 3 and isinstance(tensors[0], str):
return get_tensor(*tensors)
if isinstance(tensors, dict):
return {k: map_tensors(v) for k, v in tensors.items()}
if isinstance(tensors, tuple):
return tuple([map_tensors(v) for v in tensors])
return [map_tensors(v) for v in tensors]
input_tensors = map_tensors(functional_config['input_layers'])
output_tensors = map_tensors(functional_config['output_layers'])
return cls(inputs=input_tensors, outputs=output_tensors, name=name, trainable=trainable, **config) | Instantiates a Functional model from its config (from `get_config()`).
Args:
cls: Class of the model, e.g. a custom subclass of `Model`.
config: Output of `get_config()` for the original model instance.
custom_objects: Optional dict of custom objects.
Returns:
An instance of `cls`. | github-repos |
def get_security_group_id(name='', env='', region=''):
vpc_id = get_vpc_id(env, region)
LOG.info('Find %s sg in %s [%s] in %s', name, env, region, vpc_id)
url = '{0}/securityGroups/{1}/{2}/{3}?vpcId={4}'.format(API_URL, env, region, name, vpc_id)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok
result = response.json()
try:
security_group_id = result['id']
except KeyError:
msg = 'Security group ({0}) not found'.format(name)
raise SpinnakerSecurityGroupError(msg)
LOG.info('Found: %s', security_group_id)
return security_group_id | Get a security group ID.
Args:
name (str): Security Group name to find.
env (str): Deployment environment to search.
region (str): AWS Region to search.
Returns:
str: ID of Security Group, e.g. sg-xxxx.
Raises:
AssertionError: Call to Gate API was not successful.
SpinnakerSecurityGroupError: Security Group _name_ was not found for
_env_ in _region_. | juraj-google-style |
def stage_redis(self, variable, data):
if isinstance(data, int):
data = str(data)
if variable.endswith('Binary'):
try:
data = base64.b64decode(data)
except binascii.Error:
msg = 'The Binary staging data for variable {} is not properly base64 encoded.'
msg = msg.format(variable)
sys.exit(msg)
elif variable.endswith('BinaryArray'):
if isinstance(data, string_types):
data = json.loads(data)
try:
decoded_data = []
for d in data:
d_decoded = base64.b64decode(d)
decoded_data.append(d_decoded)
data = decoded_data
except binascii.Error:
msg = 'The BinaryArray staging data for variable {} is not properly base64 encoded.'
msg = msg.format(variable)
sys.exit(msg)
self.log.info(u'[stage] Creating variable {}'.format(variable))
self.tcex.playbook.create(variable, data) | Stage data in Redis.
Args:
variable (str): The Redis variable name.
data (dict|list|str): The data to store in Redis. | juraj-google-style |
def id_by_index(index, resources):
if index < 0 or index >= len(resources):
return ''
try:
return resources[index].header_signature
except AttributeError:
return resources[index].address | Helper method to fetch the id or address of a resource by its index
Args:
resources (list of objects): The resources to be paginated
index (integer): The index of the target resource
Returns:
str: The address or header_signature of the resource,
returns an empty string if not found | juraj-google-style |
def __init__(self, base_fd, handlers, pathspec=None, progress_callback=None):
del pathspec
self.base_fd = base_fd
self.progress_callback = progress_callback
self._handlers = handlers
if base_fd is None:
self.pathspec = rdf_paths.PathSpec()
else:
self.pathspec = base_fd.pathspec.Copy()
self.metadata = {} | Constructor.
Args:
base_fd: A handler to the predecessor handler.
handlers: A mapping from rdf_paths.PathSpec.PathType to classes
implementing VFSHandler.
pathspec: The pathspec to open.
progress_callback: A callback to indicate that the open call is still
working but needs more time.
Raises:
IOError: if this handler can not be instantiated over the
requested path. | juraj-google-style |
def _merge_input_ids_with_input_values(self, input_ids: Optional[torch.Tensor]=None, input_values: Optional[torch.Tensor]=None, input_values_cutoffs: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None) -> Optional[torch.Tensor]:
inputs_embeds = self.embed_text_tokens(input_ids)
if input_values is not None:
input_values_cutoffs = nn.functional.pad(input_values_cutoffs, (1, 0))
audio_lengths = input_values_cutoffs[input_values_cutoffs >= 0].diff()
audio_lengths = audio_lengths[audio_lengths > 0]
input_values_mask = torch.arange(input_values_cutoffs.max(), device=input_values.device).expand(len(audio_lengths), -1)
input_values_mask = input_values_mask < audio_lengths.unsqueeze(1)
with torch.no_grad():
audio_tokens_list = []
for batch_input_values, batch_input_values_cutoffs in zip(input_values, input_values_cutoffs):
batch_input_values_cutoffs = batch_input_values_cutoffs[batch_input_values_cutoffs >= 0]
for i in range(batch_input_values_cutoffs.shape[0] - 1):
start_idx = batch_input_values_cutoffs[i]
end_idx = batch_input_values_cutoffs[i + 1]
audio_batch = batch_input_values[..., start_idx:end_idx]
codec_outputs = self.codec_model.encode(audio_batch.unsqueeze(0))
codebook_ids = codec_outputs.audio_codes.transpose(1, -1)
audio_tokens_list.append(codebook_ids[0])
max_audio_frames = max((el.shape[0] for el in audio_tokens_list))
batched_audio_token_ids = torch.stack([nn.functional.pad(el, (0, 0, 0, max_audio_frames - el.shape[0])) for el in audio_tokens_list])
audio_codes_mask = self.codec_model.get_audio_codes_mask(input_values_mask)
audio_token_id = self.config.audio_token_id
audio_token_mask = input_ids == audio_token_id
audio_embeds = self.backbone_model.embed_tokens(batched_audio_token_ids)
inputs_embeds[audio_token_mask] = audio_embeds[audio_codes_mask]
audio_eos_frame_ids = torch.ones((1, 1, self.config.num_codebooks), device=input_ids.device, dtype=torch.long) * self.config.codebook_eos_token_id
audio_eos_embeds = self.backbone_model.embed_tokens(audio_eos_frame_ids).squeeze(1)
audio_eos_token_mask = input_ids == self.config.audio_eos_token_id
inputs_embeds[audio_eos_token_mask] = audio_eos_embeds.repeat(audio_eos_token_mask.sum(), 1)
if labels is not None:
labels_expanded = labels.unsqueeze(-1).repeat(1, 1, self.config.num_codebooks)
labels_expanded[audio_token_mask] = batched_audio_token_ids[audio_codes_mask]
labels_expanded[audio_eos_token_mask] = audio_eos_frame_ids
depth_decoder_ignore_frames_idxs = (labels == -101).nonzero(as_tuple=True)
labels_expanded[depth_decoder_ignore_frames_idxs[0], depth_decoder_ignore_frames_idxs[1], 1:] = -100
labels = labels_expanded
return {'inputs_embeds': inputs_embeds, 'labels': labels} | Merges the input_ids and input_values to produce a single inputs_embeds tensor:
1 - Infers the codec model on the input_values to retreive codebook token.
2 - Embeds codebook tokens and places them at the correct positions in the inputs_embeds tensor.
3 - If labels are provided, expands them to match codebook dimensions and position the target codebook tokens in the inputs_embeds tensor.
Args:
input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
The input ids to embed.
input_values (`torch.Tensor` of shape `(batch_size, channels, audio_sequence_length)`):
The audio input values to embed.
input_values_cutoffs (`torch.Tensor` of shape `(batch_size, max_num_audio)`):
The cutoffs of the audio input values relative to its batch index, padded with -1 when no audio. | github-repos |
def GetDataStream(self, name, case_sensitive=True):
if (not isinstance(name, py2to3.STRING_TYPES)):
raise ValueError('Name is not a string.')
name_lower = name.lower()
matching_data_stream = None
for data_stream in self._GetDataStreams():
if (data_stream.name == name):
return data_stream
if ((not case_sensitive) and (data_stream.name.lower() == name_lower)):
if (not matching_data_stream):
matching_data_stream = data_stream
return matching_data_stream | Retrieves a data stream by name.
Args:
name (str): name of the data stream.
case_sensitive (Optional[bool]): True if the name is case sensitive.
Returns:
DataStream: a data stream or None if not available.
Raises:
ValueError: if the name is not string. | codesearchnet |
def get(self):
return dict(interfaces=self.interfaces.getall(), instances=self.instances.getall()) | Returns the spanning-tree configuration as a dict object
The dictionary object represents the entire spanning-tree
configuration derived from the nodes running config. This
includes both globally configuration attributes as well as
interfaces and instances. See the StpInterfaces and StpInstances
classes for the key/value pair definitions.
Note:
See the individual classes for detailed message structures
Returns:
A Python dictionary object of key/value pairs the represent
the entire supported spanning-tree configuration::
{
"mode": [mstp, none],
"interfaces": {...},
"instances": {...}
} | codesearchnet |
def AddProcessingOptions(self, argument_group):
argument_group.add_argument(
'--single_process', '--single-process', dest='single_process',
action='store_true', default=False, help=(
'Indicate that the tool should run in a single process.'))
argument_helper_names = ['temporary_directory', 'workers', 'zeromq']
if self._CanEnforceProcessMemoryLimit():
argument_helper_names.append('process_resources')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
argument_group, names=argument_helper_names) | Adds the processing options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group. | juraj-google-style |
def indent_xml(elem, level=0, more_sibs=False):
i = '\n'
pad = ' '
if level:
i += ((level - 1) * pad)
num_kids = len(elem)
if num_kids:
if ((not elem.text) or (not elem.text.strip())):
elem.text = (i + pad)
if level:
elem.text += pad
count = 0
for kid in elem:
if (kid.tag == 'data'):
kid.text = '*DATA*'
indent_xml(kid, (level + 1), (count < (num_kids - 1)))
count += 1
if ((not elem.tail) or (not elem.tail.strip())):
elem.tail = i
if more_sibs:
elem.tail += pad
elif (level and ((not elem.tail) or (not elem.tail.strip()))):
elem.tail = i
if more_sibs:
elem.tail += pad | Indent an xml element object to prepare for pretty printing.
To avoid changing the contents of the original Element, it is
recommended that a copy is made to send to this function.
Args:
elem: Element to indent.
level: Int indent level (default is 0)
more_sibs: Bool, whether to anticipate further siblings. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.