code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def write(self, b):
self._checkClosed()
self._uploader.put(b)
bytes_written = len(b)
self._position += bytes_written
return bytes_written | Write bytes from b.
Returns number of bytes written (<= len(b)).
Args:
b: (memoryview) Buffer with data to write. | github-repos |
def instantiate_references_json(references_json):
references = {}
for obj in references_json:
obj_id = obj['id']
obj_type = obj.get('subtype', obj['type'])
cls = get_class(obj_type)
instance = cls.__new__(cls, id=obj_id)
if instance is None:
raise RuntimeError('Error loading model from JSON (type: %s, id: %s)' % (obj_type, obj_id))
references[instance.id] = instance
return references | Given a JSON representation of all the models in a graph, return a
dict of new model objects.
Args:
references_json (``JSON``)
JSON specifying new Bokeh models to create
Returns:
dict[str, Model] | juraj-google-style |
def run(self):
for build_dir in self.build_dirs:
if os.path.isdir(build_dir):
sys.stdout.write('Removing %s%s' % (build_dir, os.linesep))
shutil.rmtree(build_dir)
for (root, dirs, files) in os.walk(self.cwd):
for name in files:
fullpath = os.path.join(root, name)
if any(fullpath.endswith(ext) for ext in self.build_artifacts):
sys.stdout.write('Removing %s%s' % (fullpath, os.linesep))
os.remove(fullpath) | Runs the command.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None`` | juraj-google-style |
def _CalculateComprehensionState(self, newline):
current = self.next_token
previous = current.previous_token
top_of_stack = self.comp_stack[-1] if self.comp_stack else None
penalty = 0
if top_of_stack is not None:
if current == top_of_stack.closing_bracket:
last = self.comp_stack.pop()
if last.has_interior_split:
penalty += style.Get('SPLIT_PENALTY_COMPREHENSION')
return penalty
if newline:
top_of_stack.has_interior_split = True
if subtypes.COMP_EXPR in current.subtypes and subtypes.COMP_EXPR not in previous.subtypes:
self.comp_stack.append(object_state.ComprehensionState(current))
return penalty
if current.value == 'for' and subtypes.COMP_FOR in current.subtypes:
if top_of_stack.for_token is not None:
if style.Get('SPLIT_COMPLEX_COMPREHENSION') and top_of_stack.has_split_at_for != newline and (top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr()):
penalty += split_penalty.UNBREAKABLE
else:
top_of_stack.for_token = current
top_of_stack.has_split_at_for = newline
if style.Get('SPLIT_COMPLEX_COMPREHENSION') and newline and top_of_stack.HasTrivialExpr():
penalty += split_penalty.CONNECTED
if subtypes.COMP_IF in current.subtypes and subtypes.COMP_IF not in previous.subtypes:
if style.Get('SPLIT_COMPLEX_COMPREHENSION') and top_of_stack.has_split_at_for != newline and (top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr()):
penalty += split_penalty.UNBREAKABLE
return penalty | Makes required changes to comprehension state.
Args:
newline: Whether the current token is to be added on a newline.
Returns:
The penalty for the token-newline combination given the current
comprehension state. | github-repos |
def get_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
forbidden_item_ids = set() if forbidden_item_ids is None else set(forbidden_item_ids)
children = self.get_children_graph(item_ids, language=language, forbidden_item_ids=forbidden_item_ids)
counts = self.get_children_counts(active=None)
if item_ids is None:
item_ids = set(children.keys())
def _get_leaves(item_id):
leaves = set()
def __search(item_ids):
result = set(flatten([children.get(item_id, []) for item_id in item_ids]))
new_leaves = {item_id for item_id in result if item_id not in children.keys()}
leaves.update(new_leaves)
return result - new_leaves
fixed_point(
is_zero=lambda to_visit: len(to_visit) == 0,
minus=lambda to_visit, visited: to_visit - visited,
plus=lambda visited_x, visited_y: visited_x | visited_y,
f=__search,
x={item_id}
)
leaves = {leaf for leaf in leaves if counts[leaf] == 0}
if len(leaves) > 0:
return leaves
if counts[item_id] == 0 and item_id not in forbidden_item_ids:
return {item_id}
return set()
return {item_id: _get_leaves(item_id) for item_id in item_ids} | Get mapping of items to their reachable leaves. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (reachable leaves) | juraj-google-style |
def _validate(self):
probably_good_to_go = True
sheet = self.table
identity = self.db_sheet_cols.id
id_col = sheet.loc[(:, identity)]
if any(id_col.duplicated()):
warnings.warn('your database is corrupt: duplicates encountered in the srno-column')
logger.debug(('srno duplicates:\n' + str(id_col.duplicated())))
probably_good_to_go = False
return probably_good_to_go | Checks that the db-file is ok
Returns:
True if OK, False if not. | codesearchnet |
def _any_overlap_or_contiguous(self, test_overlap: bool) -> bool:
for i in range(len(self.intervals)):
for j in range(i + 1, len(self.intervals)):
first = self.intervals[i]
second = self.intervals[j]
if test_overlap:
test = first.overlaps(second)
else:
test = first.contiguous(second)
if test:
return True
return False | Do any of the intervals overlap?
Args:
test_overlap: if ``True``, test for overlapping intervals; if
``False``, test for contiguous intervals. | juraj-google-style |
def get_gdns_publisher(config, metrics, **kwargs):
builder = gdns_publisher.GDNSPublisherBuilder(config, metrics, **kwargs)
return builder.build_publisher() | Get a GDNSPublisher client.
A factory function that validates configuration and returns a
publisher client (:interface:`gordon.interfaces.IMessageHandler`)
provider.
Args:
config (dict): Google Cloud DNS API related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
kwargs (dict): Additional keyword arguments to pass to the
publisher.
Returns:
A :class:`GDNSPublisher` instance. | codesearchnet |
def operator_and_matrix(self, shapes_info, dtype, use_placeholder, ensure_self_adjoint_and_pd=False):
raise NotImplementedError('Not implemented yet.') | Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
together, and is used by tests.
Args:
shapes_info: `OperatorShapesInfo`, encoding shape information about the
operator.
dtype: Numpy dtype. Data type of returned array/operator.
use_placeholder: Python bool. If True, initialize the operator with a
placeholder of undefined shape and correct dtype.
ensure_self_adjoint_and_pd: If `True`,
construct this operator to be Hermitian Positive Definite, as well
as ensuring the hints `is_positive_definite` and `is_self_adjoint`
are set.
This is useful for testing methods such as `cholesky`.
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator. | github-repos |
def profile_view(request, user_id=None):
if (request.user.is_eighthoffice and ('full' not in request.GET) and (user_id is not None)):
return redirect('eighth_profile', user_id=user_id)
if (user_id is not None):
try:
profile_user = User.objects.get(id=user_id)
if (profile_user is None):
raise Http404
except User.DoesNotExist:
raise Http404
else:
profile_user = request.user
num_blocks = 6
eighth_schedule = []
start_block = EighthBlock.objects.get_first_upcoming_block()
blocks = []
if start_block:
blocks = ([start_block] + list(start_block.next_blocks((num_blocks - 1))))
for block in blocks:
sch = {'block': block}
try:
sch['signup'] = EighthSignup.objects.get(scheduled_activity__block=block, user=profile_user)
except EighthSignup.DoesNotExist:
sch['signup'] = None
except MultipleObjectsReturned:
client.captureException()
sch['signup'] = None
eighth_schedule.append(sch)
if profile_user.is_eighth_sponsor:
sponsor = EighthSponsor.objects.get(user=profile_user)
start_date = get_start_date(request)
eighth_sponsor_schedule = EighthScheduledActivity.objects.for_sponsor(sponsor).filter(block__date__gte=start_date).order_by('block__date', 'block__block_letter')
eighth_sponsor_schedule = eighth_sponsor_schedule[:10]
else:
eighth_sponsor_schedule = None
admin_or_teacher = (request.user.is_eighth_admin or request.user.is_teacher)
can_view_eighth = (profile_user.can_view_eighth or (request.user == profile_user))
eighth_restricted_msg = ((not can_view_eighth) and admin_or_teacher)
if ((not can_view_eighth) and (not request.user.is_eighth_admin) and (not request.user.is_teacher)):
eighth_schedule = []
has_been_nominated = (profile_user.username in [u.nominee.username for u in request.user.nomination_votes.filter(position__position_name=settings.NOMINATION_POSITION)])
context = {'profile_user': profile_user, 'eighth_schedule': eighth_schedule, 'can_view_eighth': can_view_eighth, 'eighth_restricted_msg': eighth_restricted_msg, 'eighth_sponsor_schedule': eighth_sponsor_schedule, 'nominations_active': settings.NOMINATIONS_ACTIVE, 'nomination_position': settings.NOMINATION_POSITION, 'has_been_nominated': has_been_nominated}
return render(request, 'users/profile.html', context) | Displays a view of a user's profile.
Args:
user_id
The ID of the user whose profile is being viewed. If not
specified, show the user's own profile. | codesearchnet |
def _import_templates(force=False):
tmplpath = os.path.join(resource_filename('cloud_inquisitor', 'data'), 'templates')
disk_templates = {f: os.path.join(root, f) for root, directory, files in os.walk(tmplpath) for f in files}
db_templates = {tmpl.template_name: tmpl for tmpl in db.Template.find()}
for name, template_file in disk_templates.items():
with open(template_file, 'r') as f:
body = f.read()
disk_hash = get_hash(body)
if name not in db_templates:
template = Template()
template.template_name = name
template.template = body
db.session.add(template)
auditlog(
event='template.import',
actor='init',
data={
'template_name': name,
'template': body
}
)
logger.info('Imported template {}'.format(name))
else:
template = db_templates[name]
db_hash = get_hash(template.template)
if db_hash != disk_hash:
if force or not db_templates[name].is_modified:
template.template = body
db.session.add(template)
auditlog(
event='template.update',
actor='init',
data={
'template_name': name,
'template_diff': diff(template.template, body)
}
)
logger.info('Updated template {}'.format(name))
else:
logger.warning(
'Updated template available for {}. Will not import as it would'
' overwrite user edited content and force is not enabled'.format(name)
) | Import templates from disk into database
Reads all templates from disk and adds them to the database. By default, any template that has been modified by
the user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates
to be imported regardless of status
Args:
force (`bool`): Force overwrite any templates with local changes made. Default: `False`
Returns:
`None` | juraj-google-style |
def _IsotonicRegressionGrad(op: ops.Operation, grad_output, grad_segments):
del grad_segments
segments = op.outputs[1]
return _MeanAggregator(grad_output, segments) | Gradient for the isotonic regression function.
Args:
op: The IsotonicRegression tensorflow op.
grad_output: Tensor of incoming gradients with respect to the output.
grad_segments: Tensor of incoming gradients with respect to the segments.
Returns:
A tensor, same size as `grad_output` with the gradient with respect to
the input. | github-repos |
def set_parameters(self, parameters_dict):
DB.set_hash_value(self._key, 'parameters', parameters_dict)
self.publish("parameters_updated") | Set the subarray parameters.
Args:
parameters_dict (dict): Dictionary of Subarray parameters | juraj-google-style |
def box(self, x0, y0, width, height):
assert (width > 1)
assert (height > 1)
width -= 1
height -= 1
for x in range(x0, (x0 + width)):
self.point(x, y0, '-')
self.point(x, (y0 + height), '-')
for y in range(y0, (y0 + height)):
self.point(x0, y, '|')
self.point((x0 + width), y, '|')
self.point(x0, y0, '+')
self.point((x0 + width), y0, '+')
self.point(x0, (y0 + height), '+')
self.point((x0 + width), (y0 + height), '+') | Create a box on ASCII canvas.
Args:
x0 (int): x coordinate of the box corner.
y0 (int): y coordinate of the box corner.
width (int): box width.
height (int): box height. | codesearchnet |
def append(self, text, afterline=None):
if afterline:
self._vim.current.buffer.append(text, afterline)
else:
self._vim.current.buffer.append(text) | Append text to the current buffer.
Args:
text (str or Sequence[str]): One or many lines of text to append.
afterline (Optional[int]):
Line number to append after. If 0, text is prepended before the
first line; if ``None``, at end of the buffer. | juraj-google-style |
def _add_saveable(saveables, seen_ops, saveable):
if saveable.op is not None and saveable.op in seen_ops:
raise ValueError(f'The same saveable will be restored with two names: {saveable.name}')
saveables.append(saveable)
seen_ops.add(saveable.op) | Adds the saveable to the saveables list.
Args:
saveables: List to append the SaveableObject to.
seen_ops: Set of the ops of the saveables already processed. Used to
check that each saveable is only saved once.
saveable: The saveable.
Raises:
ValueError: If the saveable has already been processed. | github-repos |
def emit(self, record):
properties = {
'process': record.processName,
'module': record.module,
'fileName': record.filename,
'lineNumber': record.lineno,
'level': record.levelname,
}
if record.exc_info:
self.client.track_exception(*record.exc_info, properties=properties)
return
formatted_message = self.format(record)
self.client.track_trace(formatted_message, properties=properties, severity=record.levelname) | Emit a record.
If a formatter is specified, it is used to format the record. If exception information is present, an Exception
telemetry object is sent instead of a Trace telemetry object.
Args:
record (:class:`logging.LogRecord`). the record to format and send. | juraj-google-style |
def collect_function_arg_names(function_names, return_all_args_function_names, function_renames):
function_name_v1_to_attr = {}
function_name_v2_to_attr = {}
def visit(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = ['tf.' + name for name in tf_export.get_v1_names(attr)]
if any((name in function_names for name in api_names_v1)):
for name in api_names_v1:
function_name_v1_to_attr[name] = attr
api_names_v2 = ['tf.' + name for name in tf_export.get_v2_names(attr)]
for name in api_names_v2:
function_name_v2_to_attr[name] = attr
visitor = public_api.PublicAPIVisitor(visit)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf.compat.v1, visitor)
traverse.traverse(tf.compat.v2, visitor)
def get_arguments_list(attr):
if tf_inspect.isclass(attr):
arg_list = tf_inspect.getargspec(getattr(attr, '__init__'))[0]
return arg_list[1:]
else:
return tf_inspect.getargspec(attr)[0]
function_to_args = {}
if any((name not in function_name_v1_to_attr for name in function_names)):
raise ValueError(f'Symbols not found in `tf.compat.v1`: `{'`, `'.join(function_names - function_name_v1_to_attr.keys())}`')
for name_v1, attr_v1 in function_name_v1_to_attr.items():
args_v1 = get_arguments_list(attr_v1)
if name_v1 in return_all_args_function_names:
function_to_args[name_v1] = args_v1
continue
name_v2 = name_v1
if name_v1 in function_renames:
name_v2 = function_renames[name_v1]
if name_v2.startswith('tf.compat.v1.'):
raise ValueError(f'Symbol `{name_v1}` is renamed to `{name_v2}`, no need to add keyword argument names, remove from `reordered_function_names`')
if name_v2 not in function_name_v2_to_attr:
raise ValueError(f'Symbol `{name_v2}` not found in `tf.compat.v2`')
args_v2 = get_arguments_list(function_name_v2_to_attr[name_v2])
if args_v1 == args_v2:
raise ValueError(f'Symbol `{name_v1}` has no changes in arguments, no need to add keyword argument names, remove from `reordered_function_names`')
needed_arg_names = []
same_so_far = True
for index, arg in enumerate(args_v1):
if same_so_far and index < len(args_v2) and (arg == args_v2[index]):
needed_arg_names.append(None)
else:
same_so_far = False
needed_arg_names.append(arg)
function_to_args[name_v1] = needed_arg_names
return function_to_args | Determines argument names for reordered function signatures.
Args:
function_names: Functions to collect arguments for.
return_all_args_function_names: Functions to collect all argument names for.
function_renames: Function renames between v1 and v2.
Returns:
Dictionary mapping function names to a list of argument names. Each argument
name list can have leading `None` elements to indicate that some of the
function arguments did not change between v1 and v2. | github-repos |
def bam2es(
bam_fn,
es_fo,
allowed_delta,
):
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
es_fo.write("
with pysam.AlignmentFile(bam_fn, "rb") as sam:
references_dict = {}
for i in range(len(sam.references)):
references_dict[sam.references[i]] = i + 1
for read in sam:
rnf_read_tuple = rnftools.rnfformat.ReadTuple()
rnf_read_tuple.destringize(read.query_name)
left = read.reference_start + 1
right = read.reference_end
chrom_id = references_dict[sam.references[read.reference_id]]
nb_of_segments = len(rnf_read_tuple.segments)
if rnf_read_tuple.segments[0].genome_id == 1:
should_be_mapped = True
else:
should_be_mapped = False
if read.is_unmapped:
if should_be_mapped:
category = "u"
else:
category = "U"
else:
if should_be_mapped:
exists_corresponding_segment = False
for j in range(len(rnf_read_tuple.segments)):
segment = rnf_read_tuple.segments[j]
if (
(segment.left == 0 or abs(segment.left - left) <= allowed_delta)
and (segment.right == 0 or abs(segment.right - right) <= allowed_delta)
and (segment.left != 0 or segment.right == 0)
and (chrom_id == 0 or chrom_id == segment.chr_id)
):
exists_corresponding_segment = True
segment = str(j + 1)
break
if exists_corresponding_segment:
category = "M_" + segment
else:
category = "w"
else:
category = "m"
es_fo.write(
"\t".join(
map(
str,
[
read.query_name,
"unmapped" if read.is_unmapped else "mapped_" + str(read.mapping_quality),
chrom_id,
"R" if read.is_reverse else "F",
left,
right,
category,
nb_of_segments
]
)
) + os.linesep
) | Convert BAM file to ES file.
Args:
bam_fn (str): File name of the BAM file.
bam_fo (file): File object of the ES file.
allowed_delta (int): Maximal allowed coordinates difference for correct reads. | juraj-google-style |
def _SetSELinuxContext(path):
restorecon = '/sbin/restorecon'
if os.path.isfile(restorecon) and os.access(restorecon, os.X_OK):
subprocess.call([restorecon, path]) | Set the appropriate SELinux context, if SELinux tools are installed.
Calls /sbin/restorecon on the provided path to set the SELinux context as
specified by policy. This call does not operate recursively.
Only some OS configurations use SELinux. It is therefore acceptable for
restorecon to be missing, in which case we do nothing.
Args:
path: string, the path on which to fix the SELinux context. | juraj-google-style |
def write_config(config, config_path=CONFIG_PATH):
if (not os.path.exists(config_path)):
os.makedirs(os.path.dirname(config_path))
with open(config_path, 'w', encoding='utf-8') as f:
config.write(f) | Write the config to the output path.
Creates the necessary directories if they aren't there.
Args:
config (configparser.ConfigParser): A ConfigParser. | codesearchnet |
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = ops.convert_to_tensor(y_true, self._dtype)
y_pred = ops.convert_to_tensor(y_pred, self._dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
error_sq = ops.square(y_pred - y_true)
return super().update_state(error_sq, sample_weight=sample_weight) | Accumulates root mean squared error statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Can
be a `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true`.
Defaults to `1`.
Returns:
Update op. | github-repos |
def run(self, fn, args=(), kwargs=None, options=None):
return super(OneDeviceStrategy, self).run(fn, args, kwargs, options) | Run `fn` on each replica, with the given arguments.
In `OneDeviceStrategy`, `fn` is simply called within a device scope for the
given device, with the provided arguments.
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Return value from running `fn`. | github-repos |
def _ragged_embedding_lookup_with_reduce(table: tf_variables.Variable, ragged: ragged_tensor.RaggedTensor, weights: ragged_tensor.RaggedTensor, combiner: str) -> core.Tensor:
if weights is None:
weights = array_ops.ones_like(ragged, dtype=table.dtype)
weights = array_ops.expand_dims(weights, axis=2)
ragged_result = embedding_ops.embedding_lookup(table, ragged)
ragged_result = math_ops.reduce_sum(ragged_result * weights, axis=1)
if combiner == 'mean':
ragged_result = math_ops.div_no_nan(ragged_result, math_ops.reduce_sum(weights, axis=1))
elif combiner == 'sqrtn':
ragged_result = math_ops.div_no_nan(ragged_result, math_ops.sqrt(math_ops.reduce_sum(weights * weights, axis=1)))
return ragged_result | Compute a ragged lookup followed by a reduce on axis 1.
Args:
table: The embedding table.
ragged: A RaggedTensor of ids to look up.
weights: A RaggedTensor of weights (or None).
combiner: One of "mean", "sum", "sqrtn".
Returns:
A Tensor. | github-repos |
def repository_blob(self, sha, **kwargs):
path = ('/projects/%s/repository/blobs/%s' % (self.get_id(), sha))
return self.manager.gitlab.http_get(path, **kwargs) | Return a file by blob SHA.
Args:
sha(str): ID of the blob
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
dict: The blob content and metadata | codesearchnet |
def get_supervisor(func: types.AnyFunction) -> types.Supervisor:
if not callable(func):
raise TypeError("func is not callable")
if asyncio.iscoroutinefunction(func):
supervisor = _async_supervisor
else:
supervisor = _sync_supervisor
return functools.partial(supervisor, func) | Get the appropriate supervisor to use and pre-apply the function.
Args:
func: A function. | juraj-google-style |
def _ResizeBilinearGrad(op: ops.Operation, grad):
grad0 = gen_image_ops.resize_bilinear_grad(grad, op.inputs[0], align_corners=op.get_attr('align_corners'), half_pixel_centers=op.get_attr('half_pixel_centers'))
return [grad0, None] | The derivatives for bilinear resizing.
Args:
op: The ResizeBilinear op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input. | github-repos |
def sunset(self, date=None, zenith=None):
return (segment.sunset(date, zenith) for segment in self) | Calculate sunset times for locations.
Args:
date (datetime.date): Calculate rise or set for given date
zenith (str): Calculate sunset events, or start of twilight times
Returns:
list of list of datetime.datetime: The time for the sunset for each
point in each segment | juraj-google-style |
def ws004c(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `ws004c`'.format(value))
self._ws004c = value | Corresponds to IDD Field `ws004c`
Args:
value (float): value for IDD Field `ws004c`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | juraj-google-style |
def add_positional_embedding(x, max_length, name=None, positions=None):
with tf.name_scope('add_positional_embedding'):
(_, length, depth) = common_layers.shape_list(x)
var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
if (positions is None):
pad_length = tf.maximum(0, (length - max_length))
sliced = tf.cond(tf.less(length, max_length), (lambda : tf.slice(var, [0, 0], [length, (- 1)])), (lambda : tf.pad(var, [[0, pad_length], [0, 0]])))
return (x + tf.expand_dims(sliced, 0))
else:
return (x + tf.gather(var, tf.to_int32(positions))) | Adds positional embedding.
Args:
x: Tensor with shape [batch, length, depth].
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
positions: Tensor with shape [batch, length].
Returns:
Tensor of same shape as x. | codesearchnet |
def xzhdr(self, header, msgid_range=None):
args = header
if msgid_range is not None:
args += " " + utils.unparse_msgid_range(msgid_range)
code, message = self.command("XZHDR", args)
if code != 221:
raise NNTPReplyError(code, message)
return self.info(code, message, compressed=True) | XZHDR command.
Args:
msgid_range: A message-id as a string, or an article number as an
integer, or a tuple of specifying a range of article numbers in
the form (first, [last]) - if last is omitted then all articles
after first are included. A msgid_range of None (the default)
uses the current article. | juraj-google-style |
def _calculate_minimum_silent_period(baudrate):
_checkNumerical(baudrate, minvalue=1, description='baudrate')
BITTIMES_PER_CHARACTERTIME = 11
MINIMUM_SILENT_CHARACTERTIMES = 3.5
bittime = 1 / float(baudrate)
return bittime * BITTIMES_PER_CHARACTERTIME * MINIMUM_SILENT_CHARACTERTIMES | Calculate the silent period length to comply with the 3.5 character silence between messages.
Args:
baudrate (numerical): The baudrate for the serial port
Returns:
The number of seconds (float) that should pass between each message on the bus.
Raises:
ValueError, TypeError. | juraj-google-style |
def get_entities(seq, suffix=False):
if any((isinstance(s, list) for s in seq)):
seq = [item for sublist in seq for item in (sublist + ['O'])]
prev_tag = 'O'
prev_type = ''
begin_offset = 0
chunks = []
for (i, chunk) in enumerate((seq + ['O'])):
if suffix:
tag = chunk[(- 1)]
type_ = chunk.split('-')[0]
else:
tag = chunk[0]
type_ = chunk.split('-')[(- 1)]
if end_of_chunk(prev_tag, tag, prev_type, type_):
chunks.append((prev_type, begin_offset, (i - 1)))
if start_of_chunk(prev_tag, tag, prev_type, type_):
begin_offset = i
prev_tag = tag
prev_type = type_
return chunks | Gets entities from sequence.
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
>>> from seqeval.metrics.sequence_labeling import get_entities
>>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']
>>> get_entities(seq)
[('PER', 0, 1), ('LOC', 3, 3)] | codesearchnet |
def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir':
if case_sensitive:
return PrettyDir(
self.obj, [pattr for pattr in self.pattrs if term in pattr.name]
)
else:
term = term.lower()
return PrettyDir(
self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()]
) | Searches for names that match some pattern.
Args:
term: String used to match names. A name is returned if it matches
the whole search term.
case_sensitive: Boolean to match case or not, default is False
(case insensitive).
Return:
A PrettyDir object with matched names. | juraj-google-style |
def get_current(cls):
filepath = os.getenv('REZ_RXT_FILE')
if ((not filepath) or (not os.path.exists(filepath))):
return None
return cls.load(filepath) | Get the context for the current env, if there is one.
Returns:
`ResolvedContext`: Current context, or None if not in a resolved env. | codesearchnet |
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
cookie_flags = event_values.get('flags', None)
if cookie_flags == 0:
del event_values['flags']
elif cookie_flags:
flags = []
for flag_value, flag_description in iter(self._COOKIE_FLAGS.items()):
if cookie_flags & flag_value:
flags.append(flag_description)
event_values['flags'] = '|'.join(flags)
return self._ConditionalFormatMessages(event_values) | Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter. | juraj-google-style |
def to(self, new_unit):
return self.__class__((np.array(self) * self.unit.get_conversion_factor(new_unit)), unit_type=self.unit_type, unit=new_unit) | Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV | codesearchnet |
def set_tpu_core_ids(self, mesh_name, tpu_core_ids):
_pywrap_dtensor_device.SetTPUCoreIDs(self._device_info, mesh_name, tpu_core_ids) | Sets the singleton global device ID-to-physical core ID map.
Args:
mesh_name: The name of a mesh. If empty, set the default mapping.
tpu_core_ids: TPU core IDs sorted by TF task/device ordinal. | github-repos |
def write_xls(data, file_name, worksheet_names=None):
workbook = xlwt.Workbook()
for sheet_index, sheet_data in enumerate(data):
if worksheet_names and sheet_index < len(worksheet_names) and worksheet_names[sheet_index]:
name = worksheet_names[sheet_index]
else:
name = 'Worksheet {}'.format(sheet_index)
sheet = workbook.add_sheet(name)
for row_index, row in enumerate(sheet_data):
for col_index, value in enumerate(row):
sheet.write(row_index, col_index, value)
workbook.save(file_name) | Writes out to old excel format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
worksheet_names: A list of worksheet names (optional). | juraj-google-style |
def param_type(self, name):
self._ensure_loaded()
if (name not in self.annotated_params):
return None
return self.annotated_params[name].type_name | Get the parameter type information by name.
Args:
name (str): The full name of a parameter.
Returns:
str: The type name or None if no type information is given. | codesearchnet |
def write_dot_file(G, filename):
with io.open(filename, 'w') as fh:
fh.write('strict digraph DependencyDiagram {\n')
edge_list = G.edges()
node_list = set(G.nodes())
if edge_list:
for edge in sorted(edge_list):
(source, targ) = edge
node_list = (node_list - set(source))
node_list = (node_list - set(targ))
line = '"{}" -> "{}";\n'
fh.write(line.format(source, targ))
if node_list:
for node in sorted(node_list):
line = '"{}"\n'.format(node)
fh.write(line)
fh.write('}') | Writes the graph G in dot file format for graphviz visualization.
Args:
a Networkx graph
A filename to name the dot files | codesearchnet |
def get_student_certificate(self, username, course_id):
resp = self.requester.get(urljoin(self.base_url, '/api/certificates/v0/certificates/{username}/courses/{course_key}/'.format(username=username, course_key=course_id)))
resp.raise_for_status()
return Certificate(resp.json()) | Returns an Certificate object with the user certificates
Args:
username (str): an edx user's username
course_id (str): an edX course id.
Returns:
Certificate: object representing the student certificate for a course | codesearchnet |
def update(self, reference, field_updates, option=None):
if (option.__class__.__name__ == 'ExistsOption'):
raise ValueError('you must not pass an explicit write option to update.')
write_pbs = _helpers.pbs_for_update(reference._document_path, field_updates, option)
self._add_write_pbs(write_pbs) | Add a "change" to update a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.update` for
more information on ``field_updates`` and ``option``.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
field_updates (dict): Field names or paths to update and values
to update with.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes. | codesearchnet |
def _is_autocomplete_valid(cur_commands, alias_command):
parent_command = ' '.join(cur_commands[1:])
with open(GLOBAL_ALIAS_TAB_COMP_TABLE_PATH, 'r') as tab_completion_table_file:
try:
tab_completion_table = json.loads(tab_completion_table_file.read())
return ((alias_command in tab_completion_table) and (parent_command in tab_completion_table[alias_command]))
except Exception:
return False | Determine whether autocomplete can be performed at the current state.
Args:
parser: The current CLI parser.
cur_commands: The current commands typed in the console.
alias_command: The alias command.
Returns:
True if autocomplete can be performed. | codesearchnet |
def _print_list(self, values: List[Any], print_func: Callable[[Any], None]) -> None:
self.generator.open_json_list()
field_size = len(values)
for i in range(field_size):
print_func(values[i])
if i < field_size - 1:
self.generator.push(',')
self.generator.add_newline()
self.generator.close_json_list() | Adds the printed JSON list representation of values to _output.
Args:
values: The values to print as a JSON list.
print_func: A function responsible for printing a single value. | github-repos |
def _package_path(package):
from os import path
confdir = config_dir()
return path.join(confdir, "{}.cfg".format(package)) | Returns the full path to the default package configuration file.
Args:
package (str): name of the python package to return a path for. | juraj-google-style |
def offsets_in_rows(self):
return gen_ragged_math_ops.ragged_range(starts=constant_op.constant(0, self.dtype), limits=self.row_lengths(), deltas=constant_op.constant(1, self.dtype)).rt_dense_values | Return the offset of each value.
RowPartition takes an array x and converts it into sublists.
offsets[i] is the index of x[i] in its sublist.
Given a shape, such as:
[*,*,*],[*,*],[],[*,*]
This returns:
0,1,2,0,1,0,1
Returns:
an offset for every value. | github-repos |
def slice_list(in_list, lens):
if (not isinstance(lens, list)):
raise TypeError('"indices" must be a list of integers')
elif (sum(lens) != len(in_list)):
raise ValueError('sum of lens and list length does not match: {} != {}'.format(sum(lens), len(in_list)))
out_list = []
idx = 0
for i in range(len(lens)):
out_list.append(in_list[idx:(idx + lens[i])])
idx += lens[i]
return out_list | Slice a list into several sub lists by a list of given length.
Args:
in_list (list): The list to be sliced.
lens(int or list): The expected length of each out list.
Returns:
list: A list of sliced list. | codesearchnet |
def ChunkedCausalMultiHeadedAttention(
feature_depth, num_heads=8, dropout=0.0, chunk_selector=None, mode='train'):
prepare_attention_input = combinators.Serial(
combinators.Branch(),
combinators.Parallel(
combinators.Branch(num_branches=3),
CausalMask(axis=-2),
),
combinators.Parallel(
combinators.Parallel(
core.Dense(feature_depth),
core.Dense(feature_depth),
core.Dense(feature_depth),
),
combinators.Identity()
)
)
return combinators.Serial(
combinators.Map(prepare_attention_input),
ChunkedAttentionSelector(selector=chunk_selector),
combinators.Map(PureMultiHeadedAttention(
feature_depth=feature_depth, num_heads=num_heads,
dropout=dropout, mode=mode), check_shapes=False),
combinators.Map(core.Dense(feature_depth))
) | Transformer-style causal multi-headed attention operating on chunks.
Accepts inputs that are a list of chunks and applies causal attention.
Args:
feature_depth: int: depth of embedding
num_heads: int: number of attention heads
dropout: float: dropout rate
chunk_selector: a function from chunk number to list of chunks to attend.
mode: str: 'train' or 'eval'
Returns:
Multi-headed self-attention layer. | juraj-google-style |
def transition_retry(self, pipeline_key, retry_message):
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to retry pipeline ID "%s" but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to retry pipeline ID "%s", found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
params = pipeline_record.params
offset_seconds = (
params['backoff_seconds'] *
(params['backoff_factor'] ** pipeline_record.current_attempt))
pipeline_record.next_retry_time = (
self._gettime() + datetime.timedelta(seconds=offset_seconds))
pipeline_record.current_attempt += 1
pipeline_record.retry_message = retry_message
pipeline_record.status = _PipelineRecord.WAITING
if pipeline_record.current_attempt >= pipeline_record.max_attempts:
root_pipeline_key = (
_PipelineRecord.root_pipeline.get_value_for_datastore(
pipeline_record))
logging.warning(
'Giving up on pipeline ID "%s" after %d attempt(s); causing abort '
'all the way to the root pipeline ID "%s"', pipeline_key.name(),
pipeline_record.current_attempt, root_pipeline_key.name())
pipeline_record.abort_message = (
'Aborting after %d attempts' % pipeline_record.current_attempt)
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
else:
task = taskqueue.Task(
url=self.pipeline_handler_path,
eta=pipeline_record.next_retry_time,
params=dict(pipeline_key=pipeline_key,
purpose=_BarrierRecord.START,
attempt=pipeline_record.current_attempt),
headers={'X-Ae-Pipeline-Key': pipeline_key},
target=pipeline_record.params['target'])
task.add(queue_name=self.queue_name, transactional=True)
pipeline_record.put()
db.run_in_transaction(txn) | Marks the given pipeline as requiring another retry.
Does nothing if all attempts have been exceeded.
Args:
pipeline_key: db.Key of the _PipelineRecord that needs to be retried.
retry_message: User-supplied message indicating the reason for the retry. | juraj-google-style |
def with_organisation(self, organisation):
if organisation is None:
organisation = ''
organisation = slugify(organisation)
self._validate_organisation(organisation)
self.organisation = organisation
return self | Add an organisation segment.
Args:
organisation (str): Official name of an administrative body
holding an election.
Returns:
IdBuilder
Raises:
ValueError | juraj-google-style |
def learn(self, initial_state_key, limit=1000, game_n=1):
end_flag = False
state_key_list = ([None] * len(self.q_learning_list))
action_key_list = ([None] * len(self.q_learning_list))
next_action_key_list = ([None] * len(self.q_learning_list))
for game in range(game_n):
state_key = initial_state_key
self.t = 1
while (self.t <= limit):
for i in range(len(self.q_learning_list)):
state_key_list[i] = state_key
if ((game + 1) == game_n):
self.state_key_list.append(tuple(i, state_key_list))
self.q_learning_list[i].t = self.t
next_action_list = self.q_learning_list[i].extract_possible_actions(tuple(i, state_key_list))
if len(next_action_list):
action_key = self.q_learning_list[i].select_action(state_key=tuple(i, state_key_list), next_action_list=next_action_list)
action_key_list[i] = action_key
reward_value = self.q_learning_list[i].observe_reward_value(tuple(i, state_key_list), tuple(i, action_key_list))
if (self.q_learning_list[i].check_the_end_flag(tuple(i, state_key_list)) is True):
end_flag = True
next_next_action_list = self.q_learning_list[i].extract_possible_actions(tuple(i, action_key_list))
if len(next_next_action_list):
next_action_key = self.q_learning_list[i].predict_next_action(tuple(i, action_key_list), next_next_action_list)
next_action_key_list[i] = next_action_key
next_max_q = self.q_learning_list[i].extract_q_df(tuple(i, action_key_list), next_action_key)
self.q_learning_list[i].update_q(state_key=tuple(i, state_key_list), action_key=tuple(i, action_key_list), reward_value=reward_value, next_max_q=next_max_q)
state_key = self.q_learning_list[i].update_state(state_key=tuple(i, state_key_list), action_key=tuple(i, action_key_list))
state_key_list[i] = state_key
self.t += 1
self.q_learning_list[i].t = self.t
if (end_flag is True):
break | Multi-Agent Learning.
Override.
Args:
initial_state_key: Initial state.
limit: Limit of the number of learning.
game_n: The number of games. | codesearchnet |
def rename(self, source_file_names, destination_file_names):
err_msg = 'source_file_names and destination_file_names should be equal in length'
assert len(source_file_names) == len(destination_file_names), err_msg
def _rename_file(source, destination):
try:
os.rename(source, destination)
except OSError as err:
raise IOError(err)
exceptions = {}
for source, destination in zip(source_file_names, destination_file_names):
try:
_rename_file(source, destination)
except Exception as e:
exceptions[source, destination] = e
if exceptions:
raise BeamIOError('Rename operation failed', exceptions) | Rename the files at the source list to the destination list.
Source and destination lists should be of the same size.
Args:
source_file_names: List of file paths that need to be moved
destination_file_names: List of destination_file_names for the files
Raises:
``BeamIOError``: if any of the rename operations fail | github-repos |
def update_pipeline(self, pipeline):
payload = None
if type(pipeline) is not StreakPipeline:
return requests.codes.bad_request, None
payload = pipeline.to_dict(rw = True)
try:
uri = '/'.join([
self.api_uri,
self.pipelines_suffix,
pipeline.attributes['pipelineKey']
])
except KeyError:
return requests.codes.bad_request, None
code, r_data = self._req('post', uri , json.dumps(payload))
return code, r_data | Updates a pipeline with the provided attributes.
Args:
key required identifier for the pipeline
pipeline StreakPipeline object
return (status code, pipeline_dict) | juraj-google-style |
def download_file(url, destination, **kwargs):
web_file = open_remote_url(url, **kwargs)
file_size = 0
if not web_file:
logger.error(
"Remote file not found. Attempted URLs: {}".format(url))
return
modified = is_remote_file_modified(web_file, destination)
if modified:
logger.info("Downloading: " + web_file.url)
file_size = copy_remote_file(web_file, destination)
else:
logger.info("File up-to-date: " + destination)
web_file.close()
return file_size | Download file process:
- Open the url
- Check if it has been downloaded and it hanged.
- Download it to the destination folder.
Args:
:urls: url to take the file.
:destionation: place to store the downloaded file. | juraj-google-style |
def table(self, ref):
try:
obj_number = ObjectNumber.parse(ref)
ds_obj_number = obj_number.as_dataset
dataset = self._db.dataset(ds_obj_number)
table = dataset.table(ref)
except NotObjectNumberError:
q = self.database.session.query(Table)\
.filter(Table.name == str(ref))\
.order_by(Table.vid.desc())
table = q.first()
if not table:
raise NotFoundError("No table for ref: '{}'".format(ref))
return table | Finds table by ref and returns it.
Args:
ref (str): id, vid (versioned id) or name of the table
Raises:
NotFoundError: if table with given ref not found.
Returns:
orm.Table | juraj-google-style |
def update_remote_archive(self, save_uri, timeout=-1):
return self._client.update_with_zero_body(uri=save_uri, timeout=timeout) | Saves a backup of the appliance to a previously-configured remote location.
Args:
save_uri (dict): The URI for saving the backup to a previously configured location.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Backup details. | juraj-google-style |
def _add_batched_ragged_partition(rt, partition, tensor_dict, feature_key, validate, outer_splits=None):
if isinstance(partition, RaggedFeature.UniformRowLength):
if rt.ragged_rank > 1:
length = ops.convert_to_tensor(partition.length, rt.row_splits.dtype)
return ragged_tensor.RaggedTensor.from_row_splits(ragged_tensor.RaggedTensor.from_uniform_row_length(rt.values, length, validate=validate), rt.row_splits
else:
reshaped_vals = array_ops.reshape(rt.values, array_ops.concat([[-1, partition.length], array_ops.shape(rt.values)[1:]], axis=0))
return ragged_tensor.RaggedTensor.from_row_splits(reshaped_vals, rt.row_splits
partition_t = tensor_dict[partition.key]
if partition_t.values.dtype != rt.row_splits.dtype:
partition_t = math_ops.cast(partition_t, rt.row_splits.dtype)
checks = []
if outer_splits is not None:
if validate:
checks.append(check_ops.assert_equal(outer_splits, partition_t.row_splits, message='Feature %s: values and partitions are not aligned' % feature_key))
partition_t = partition_t.values
with ops.control_dependencies(checks):
if isinstance(partition, (RaggedFeature.RowSplits, RaggedFeature.RowLimits)):
if isinstance(partition, RaggedFeature.RowSplits):
partition_t = partition_t[:, 1:]
adjusted_limits = partition_t.values + array_ops.repeat(rt.row_starts(), partition_t.row_lengths())
return partition_t.with_values(ragged_tensor.RaggedTensor.from_row_limits(rt.values, adjusted_limits, validate=validate))
elif isinstance(partition, RaggedFeature.RowStarts):
adjusted_starts = partition_t.values + array_ops.repeat(rt.row_starts(), partition_t.row_lengths())
return partition_t.with_values(ragged_tensor.RaggedTensor.from_row_starts(rt.values, adjusted_starts, validate=validate))
elif isinstance(partition, RaggedFeature.RowLengths):
return partition_t.with_values(ragged_tensor.RaggedTensor.from_row_lengths(rt.values, partition_t.values, validate=validate))
elif isinstance(partition, RaggedFeature.ValueRowIds):
nrows = math_ops.maximum(ragged_math_ops.reduce_max(partition_t + 1, axis=1), 0)
adjusted_rowids = partition_t.values + array_ops.repeat(math_ops.cumsum(nrows, exclusive=True), partition_t.row_lengths())
return ragged_tensor.RaggedTensor.from_row_lengths(ragged_tensor.RaggedTensor.from_value_rowids(rt.values, adjusted_rowids, validate=validate), nrows, validate=validate)
raise ValueError(f'Unhandled partition type {partition!r}') | Adds a batched ragged partition tensor to a batched ragged tensor.
Args:
rt: A RaggedTensor with shape [batch_size, ...].
partition: The partition configuration object. Specifies the key that
should be used to look up the partition tensor (unless partition is a
RaggedFeature.UniformRowLength, in which case there is no partition
tensor). The specified tensor must have shape [batch_size, ...].
tensor_dict: The dictionary mapping keys to tensors.
feature_key: The name of the feature being parsed (for error messages).
validate: Whether to validate that the values form a valid RaggedTensor.
outer_splits: If not None, then we have two batch dimensions, and this
is the row-splits for the collapsed batch dimension. Every partition
tensor must have an outer row_splits that matches this value.
Returns:
A new RaggedTensor where each batch item `rt[i]` has been partitioned
using the `partition_t[i]`. | github-repos |
def _calc_dir_size(path):
dir_size = 0
for (root, dirs, files) in os.walk(path):
for fn in files:
full_fn = os.path.join(root, fn)
dir_size += os.path.getsize(full_fn)
return dir_size | Calculate size of all files in `path`.
Args:
path (str): Path to the directory.
Returns:
int: Size of the directory in bytes. | juraj-google-style |
def __init__(self, *args, **kwargs):
super(PublishTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.PublishTransaction | Create instance.
Args:
*args:
**kwargs: | juraj-google-style |
def aggregate(all_stats):
aggregate_stats = {'means': [], 'standard_deviations': []}
for optimizer_key in all_stats:
mean_stats = copy.deepcopy(all_stats[optimizer_key]['mean'])
mean_stats['name'] = optimizer_key
aggregate_stats['means'].append(mean_stats)
sd_stats = copy.deepcopy(
all_stats[optimizer_key]['standard_deviation'])
sd_stats['name'] = optimizer_key
aggregate_stats['standard_deviations'].append(sd_stats)
_add_mean_sd_to_stats(aggregate_stats, 'means')
return aggregate_stats | Combine stats for multiple optimizers to obtain one mean and sd.
Useful for combining stats for the same optimizer class and multiple problems.
Args:
all_stats: dict; output from compare. | juraj-google-style |
def get_intermediate_dirs(fs, dir_path):
intermediates = []
with fs.lock():
for path in recursepath(abspath(dir_path), reverse=True):
try:
resource = fs.getinfo(path)
except ResourceNotFound:
intermediates.append(abspath(path))
else:
if resource.is_dir:
break
raise errors.DirectoryExpected(dir_path)
return intermediates[::-1][:-1] | Get a list of non-existing intermediate directories.
Arguments:
fs (FS): A filesystem instance.
dir_path (str): A path to a new directory on the filesystem.
Returns:
list: A list of non-existing paths.
Raises:
~fs.errors.DirectoryExpected: If a path component
references a file and not a directory. | juraj-google-style |
def traverse_pagination(response, endpoint):
results = response.get('results', [])
next_page = response.get('next')
while next_page:
querystring = parse_qs(urlparse(next_page).query, keep_blank_values=True)
response = endpoint.get(**querystring)
results += response.get('results', [])
next_page = response.get('next')
return results | Traverse a paginated API response.
Extracts and concatenates "results" (list of dict) returned by DRF-powered
APIs.
Arguments:
response (Dict): Current response dict from service API
endpoint (slumber Resource object): slumber Resource object from edx-rest-api-client
Returns:
list of dict. | juraj-google-style |
def _GetEventIdentifiers(self, event):
attributes = []
attribute_string = 'data_type: {0:s}'.format(event.data_type)
attributes.append(attribute_string)
for (attribute_name, attribute_value) in sorted(event.GetAttributes()):
if (attribute_name in self._IDENTIFIER_EXCLUDED_ATTRIBUTES):
continue
if (not attribute_value):
continue
if (attribute_name == 'pathspec'):
attribute_value = attribute_value.comparable
elif isinstance(attribute_value, dict):
attribute_value = sorted(attribute_value.items())
elif isinstance(attribute_value, set):
attribute_value = sorted(list(attribute_value))
elif isinstance(attribute_value, py2to3.BYTES_TYPE):
attribute_value = repr(attribute_value)
try:
attribute_string = '{0:s}: {1!s}'.format(attribute_name, attribute_value)
except UnicodeDecodeError:
logger.error('Failed to decode attribute {0:s}'.format(attribute_name))
attributes.append(attribute_string)
if (event.timestamp_desc in ('atime', 'ctime', 'crtime', 'mtime', definitions.TIME_DESCRIPTION_LAST_ACCESS, definitions.TIME_DESCRIPTION_CHANGE, definitions.TIME_DESCRIPTION_CREATION, definitions.TIME_DESCRIPTION_MODIFICATION)):
macb_group_identifier = ', '.join(attributes)
else:
macb_group_identifier = None
attributes.insert(0, event.timestamp_desc)
content_identifier = ', '.join(attributes)
return (macb_group_identifier, content_identifier) | Retrieves different identifiers of the event.
Every event contains event data, which consists of attributes and values.
These attributes and values can be represented as a string and used for
sorting and uniquely identifying events. This function determines multiple
identifiers:
* an identifier of the attributes and values without the timestamp
description (or usage). This is referred to as the MACB group
identifier.
* an identifier of the attributes and values including the timestamp
description (or usage). This is referred to as the event content
identifier.
The identifier without the timestamp description can be used to group
events that have the same MACB (modification, access, change, birth)
timestamps. The PsortEventHeap will store these events individually and
relies on PsortMultiProcessEngine to do the actual grouping of events.
Args:
event (EventObject): event.
Returns:
tuple: containing:
str: identifier of the event MACB group or None if the event cannot
be grouped.
str: identifier of the event content. | codesearchnet |
def __init__(self, url, username, password, auth_header=DEFAULT_AUTH_HEADER, cafile=None):
self._url = url
self._username = username
self._password = password
self._auth_header = auth_header
self._cafile = cafile | Constructor
Args:
url: API url endpoint
username: API username or real username
password: API token or user password
auth_header: API HTTP header | juraj-google-style |
def number_check(check, return_number=True):
try:
int(check)
good = True
except ValueError:
LOGGER.critical('Function number_check ValueError {item}'.format(item=check))
good = False
if return_number:
while not good:
print("That is not a number.")
print("Please try again.")
check = input("Please enter a number?: ")
try:
int(check)
good = True
except ValueError:
LOGGER.critical('Function number_check ValueError {item}'.format(item=check))
good = False
return check
else:
return good | Function to verify item entered is a number
Args:
check: Thing to check for a number
return_number: Set to True it returns a number value, set to False returns True or False
Returns: Check return_number for return options | juraj-google-style |
def iter_replace_strings(replacements):
def function_iter_replace_strings(iterable_strings):
for string in iterable_strings:
yield reduce((lambda s, kv: s.replace(*kv)),
replacements.items(),
string)
return function_iter_replace_strings | Create a function that uses replacement pairs to process a string.
The returned function takes an iterator and yields on each processed
line.
Args:
replacements: Dict containing 'find_string': 'replace_string' pairs
Returns:
function with signature: iterator of strings = function(iterable) | juraj-google-style |
def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):
self._counter = self._id_counter()
self._conn = socket.create_connection(('localhost', self.host_port), _SOCKET_CONNECTION_TIMEOUT)
self._conn.settimeout(_SOCKET_READ_TIMEOUT)
self._client = self._conn.makefile(mode='brw')
resp = self._cmd(cmd, uid)
if (not resp):
raise ProtocolError(self._ad, ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)
result = json.loads(str(resp, encoding='utf8'))
if result['status']:
self.uid = result['uid']
else:
self.uid = UNKNOWN_UID | Opens a connection to a JSON RPC server.
Opens a connection to a remote client. The connection attempt will time
out if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each
subsequent operation over this socket will time out after
_SOCKET_READ_TIMEOUT seconds as well.
Args:
uid: int, The uid of the session to join, or UNKNOWN_UID to start a
new session.
cmd: JsonRpcCommand, The command to use for creating the connection.
Raises:
IOError: Raised when the socket times out from io error
socket.timeout: Raised when the socket waits to long for connection.
ProtocolError: Raised when there is an error in the protocol. | codesearchnet |
async def find_deleted(self, seq_set: SequenceSet,
selected: SelectedMailbox) -> Sequence[int]:
session_flags = selected.session_flags
return [msg.uid async for _, msg in self.find(seq_set, selected)
if Deleted in msg.get_flags(session_flags)] | Return all the active message UIDs that have the ``\\Deleted`` flag.
Args:
seq_set: The sequence set of the possible messages.
selected: The selected mailbox session. | juraj-google-style |
def trace(name, *trace_args):
def decorator(f):
def wrapper(*args, **kwargs):
t = tracer(name)
if t.getEffectiveLevel() < logging.DEBUG:
return f(*args, **kwargs)
argspec = inspect.getfullargspec(f)
t.debug('%s: {', f.__name__)
for arg in trace_args:
if isinstance(arg, int):
argname = argspec.args[arg]
val = args[arg]
else:
argname = arg
val = kwargs[arg]
t.debug('%s: %s = %s', f.__name__, argname, show(val))
ret = f(*args, **kwargs)
t.debug('%s: -> %s', f.__name__, show(ret))
t.debug('%s: }', f.__name__)
return ret
return wrapper
return decorator | Record args and return value for a function call.
The trace is of the form
function name: {
function name: arg = value
function name: arg = value
...
function name: -> return
function name: }
This will let us write tools to pretty print the traces with indentation etc.
Args:
name: module name, usually `__name__`
*trace_args: function arguments to log
Returns:
a decorator | github-repos |
class SquadResult:
def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
self.start_logits = start_logits
self.end_logits = end_logits
self.unique_id = unique_id
if start_top_index:
self.start_top_index = start_top_index
self.end_top_index = end_top_index
self.cls_logits = cls_logits | Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
Args:
unique_id: The unique identifier corresponding to that example.
start_logits: The logits corresponding to the start of the answer
end_logits: The logits corresponding to the end of the answer | github-repos |
def dump(self, output, close_after_write=True):
try:
output.write
self.stream = output
except AttributeError:
self.stream = io.open(output, 'w', encoding='utf-8')
try:
self.write_table()
finally:
if close_after_write:
self.stream.close()
self.stream = sys.stdout | Write data to the output with tabular format.
Args:
output (file descriptor or str):
file descriptor or path to the output file.
close_after_write (bool, optional):
Close the output after write.
Defaults to |True|. | codesearchnet |
def search(self, patterns, start=30, limit=1000, include_category=False):
api_name = 'opendns-patterns'
fmt_url_path = u'search/{0}'
start = '-{0}days'.format(start)
include_category = str(include_category).lower()
query_params = {'start': start, 'limit': limit, 'includecategory': include_category}
return self._multi_get(api_name, fmt_url_path, patterns, query_params) | Performs pattern searches against the Investigate database.
Args:
patterns: An enumerable of RegEx domain patterns to search for
start: How far back results extend from in days (max is 30)
limit: Number of results to show (max is 1000)
include_category: Include OpenDNS security categories
Returns:
An enumerable of matching domain strings | codesearchnet |
def IsSocket(self):
if (self._stat_object is None):
self._stat_object = self._GetStat()
if (self._stat_object is not None):
self.entry_type = self._stat_object.type
return (self.entry_type == definitions.FILE_ENTRY_TYPE_SOCKET) | Determines if the file entry is a socket.
Returns:
bool: True if the file entry is a socket. | codesearchnet |
def Trim(lst, limit):
limit = max(0, limit)
clipping = lst[limit:]
del lst[limit:]
return clipping | Trims a given list so that it is not longer than given limit.
Args:
lst: A list to trim.
limit: A maximum number of elements in the list after trimming.
Returns:
A suffix of the input list that was trimmed. | juraj-google-style |
def process_command(self, command):
result = ScubaContext()
result.script = None
result.image = self.image
result.entrypoint = self.entrypoint
result.environment = self.environment.copy()
if command:
alias = self.aliases.get(command[0])
if not alias:
result.script = [shell_quote_cmd(command)]
else:
if alias.image:
result.image = alias.image
if alias.entrypoint is not None:
result.entrypoint = alias.entrypoint
if alias.environment:
result.environment.update(alias.environment)
if len(alias.script) > 1:
if len(command) > 1:
raise ConfigError('Additional arguments not allowed with multi-line aliases')
result.script = alias.script
else:
command.pop(0)
result.script = [alias.script[0] + ' ' + shell_quote_cmd(command)]
result.script = flatten_list(result.script)
return result | Processes a user command using aliases
Arguments:
command A user command list (e.g. argv)
Returns: A ScubaContext object with the following attributes:
script: a list of command line strings
image: the docker image name to use | juraj-google-style |
def _from_c_op(cls: type[OperationType], c_op, g) -> OperationType:
self = Operation(c_op, SymbolicTensor)
self._init(g)
return self | Create an Operation from a TF_Operation.
For internal use only: This is useful for creating Operation for ops
indirectly created by C API methods, e.g. the ops created by
TF_ImportGraphDef.
Args:
c_op: a TF_Operation.
g: A Graph.
Returns:
an Operation object. | github-repos |
def execute(cmd, shell=False, poll_period=1.0, catch_out=False):
log = logging.getLogger(__name__)
log.debug("Starting: %s", cmd)
stdout = ""
stderr = ""
if not shell and isinstance(cmd, string_types):
cmd = shlex.split(cmd)
if catch_out:
process = subprocess.Popen(
cmd,
shell=shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
else:
process = subprocess.Popen(cmd, shell=shell, close_fds=True)
stdout, stderr = process.communicate()
if stderr:
log.error("There were errors:\n%s", stderr)
if stdout:
log.debug("Process output:\n%s", stdout)
returncode = process.returncode
log.debug("Process exit code: %s", returncode)
return returncode, stdout, stderr | Execute UNIX command and wait for its completion
Args:
cmd (str or list): command to execute
shell (bool): invoke inside shell environment
catch_out (bool): collect process' output
Returns:
returncode (int): process return code
stdout (str): collected process stdout (only if catch_out set to true)
stderr (str): collected process stderr (only if catch_out set to true) | juraj-google-style |
def freeze_parameter(self, name):
i = self.get_parameter_names(include_frozen=True).index(name)
self.unfrozen_mask[i] = False | Freeze a parameter by name
Args:
name: The name of the parameter | juraj-google-style |
def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
final_embeddings = inputs_embeds + position_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings | Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor. | github-repos |
def get_dict_with_chain(self, chain, only_keys=None, chain_keys=None, exclude_attributes=None, df_format=False):
if not only_keys:
keys = list(self.__dict__.keys())
else:
keys = ssbio.utils.force_list(only_keys)
if exclude_attributes:
exclude_attributes = ssbio.utils.force_list(exclude_attributes)
for x in exclude_attributes:
if x in keys:
keys.remove(x)
else:
exclude_attributes = []
exclude_attributes.extend(['mapped_chains', 'chains'])
final_dict = {k: v for k, v in Object.get_dict(self, only_attributes=keys, exclude_attributes=exclude_attributes,
df_format=df_format).items()}
chain_prop = self.chains.get_by_id(chain)
if not chain_keys:
chain_keys = [x for x in chain_prop.get_dict().keys() if x not in final_dict]
chain_dict = chain_prop.get_dict(only_attributes=chain_keys, df_format=df_format)
final_dict.update(chain_dict)
return final_dict | get_dict method which incorporates attributes found in a specific chain. Does not overwrite any attributes
in the original StructProp.
Args:
chain:
only_keys:
chain_keys:
exclude_attributes:
df_format:
Returns:
dict: attributes of StructProp + the chain specified | juraj-google-style |
def walk(self, action, user_data=None):
action(self.index_file, self.__root, 0, user_data)
self.__do_walk(self.__root, 1, action, user_data) | Walk the hierarchy, applying action to each filename.
Args:
action: callable, the callable to invoke for each filename,
will be invoked with the filename, the subfiles, and
the level in the sitemap. | juraj-google-style |
def record_queue_metrics(self, active_requests: int, waiting_requests: int) -> None:
if not _has_opentelemetry:
return
try:
self.active_requests_gauge.set(active_requests)
self.waiting_requests_gauge.set(waiting_requests)
logger.debug(f'Queue metrics: {active_requests} active requests, {waiting_requests} waiting requests')
except Exception as e:
logger.warning(f'Failed to record queue metrics: {e}') | Record metrics about active and waiting requests.
Args:
active_requests: Number of active requests
waiting_requests: Number of waiting requests | github-repos |
def update_case(case_obj, existing_case):
variant_nrs = ['nr_variants', 'nr_sv_variants']
individuals = [('individuals','_inds'), ('sv_individuals','_sv_inds')]
updated_case = deepcopy(existing_case)
for i,file_name in enumerate(['vcf_path','vcf_sv_path']):
variant_type = 'snv'
if file_name == 'vcf_sv_path':
variant_type = 'sv'
if case_obj.get(file_name):
if updated_case.get(file_name):
LOG.warning("VCF of type %s already exists in case", variant_type)
raise CaseError("Can not replace VCF in existing case")
else:
updated_case[file_name] = case_obj[file_name]
updated_case[variant_nrs[i]] = case_obj[variant_nrs[i]]
updated_case[individuals[i][0]] = case_obj[individuals[i][0]]
updated_case[individuals[i][1]] = case_obj[individuals[i][1]]
return updated_case | Update an existing case
This will add paths to VCF files, individuals etc
Args:
case_obj(models.Case)
existing_case(models.Case)
Returns:
updated_case(models.Case): Updated existing case | juraj-google-style |
def helper(*commands):
def decorated_func(f):
f.__help_targets__ = list(commands)
return f
return decorated_func | Decorate a function to be the helper function of commands.
Arguments:
commands: Names of command that should trigger this function object.
---------------------------
Interface of helper methods:
@helper('some-command')
def help_foo(self, args):
'''
Arguments:
args: A list of arguments.
Returns:
A string that is the help message.
'''
pass | codesearchnet |
def add_error(self, error):
self._count += 1
self._record.add_error(('expect@%s+%s' % (time.time(), self._count)), error) | Record an error from expect APIs.
This method generates a position stamp for the expect. The stamp is
composed of a timestamp and the number of errors recorded so far.
Args:
error: Exception or signals.ExceptionRecord, the error to add. | codesearchnet |
def _parse_flowcontrol_receive(self, config):
value = 'off'
match = re.search('flowcontrol receive (\\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_receive=value) | Scans the config block and returns the flowcontrol receive value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol receive value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict | codesearchnet |
def install_json_params(self, ij=None):
if ((self._install_json_params is None) or (ij is not None)):
self._install_json_params = {}
if (ij is None):
ij = self.install_json
for p in (ij.get('params') or []):
self._install_json_params.setdefault(p.get('name'), p)
return self._install_json_params | Return install.json params in a dict with name param as key.
Args:
ij (dict, optional): Defaults to None. The install.json contents.
Returns:
dict: A dictionary containing the install.json input params with name as key. | codesearchnet |
def trk50(msg):
d = hex2bin(data(msg))
if (d[11] == '0'):
return None
sign = int(d[12])
value = bin2int(d[13:23])
if sign:
value = (value - 1024)
trk = ((value * 90.0) / 512.0)
if (trk < 0):
trk = (360 + trk)
return round(trk, 3) | True track angle, BDS 5,0 message
Args:
msg (String): 28 bytes hexadecimal message (BDS50) string
Returns:
float: angle in degrees to true north (from 0 to 360) | codesearchnet |
def add_item(name, command, system_wide=False):
desktop_env = system.get_name()
if os.path.isfile(command):
command_is_file = True
if not desktop_env == 'windows':
sp.Popen(['chmod +x %s' % command], shell=True)
if desktop_env == 'windows':
import winreg
if system_wide:
startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup')
else:
startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup')
if not command_is_file:
with open(os.path.join(startup_dir, name + '.bat'), 'w') as f:
f.write(command)
else:
shutil.copy(command, startup_dir)
elif desktop_env == 'mac':
sp.Popen(['launchctl submit -l %s -- %s'] % (name, command), shell=True)
else:
if desktop_env == 'unknown':
if system_wide:
login_file = '/etc/profile'
else:
login_file = os.path.expanduser('~/.profile')
with open(login_file, 'a') as f:
f.write(command)
else:
try:
desktop_file_name = name + '.desktop'
startup_file = os.path.join(get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name)
desktop_str = desktopfile.construct(name=name, exec_=command, additional_opts={'X-GNOME-Autostart-enabled': 'true'})
with open(startup_file, 'w') as f:
f.write(desktop_str)
except:
pass | Adds a program to startup.
Adds a program to user startup.
Args:
name (str) : The name of the startup entry.
command (str) : The command to run.
system_wide (bool): Add to system-wide startup.
Note:
``system_wide`` requires superuser/admin privileges. | juraj-google-style |
def __init__(self,
moments: Iterable[ops.Moment] = (),
device: devices.Device = devices.UnconstrainedDevice) -> None:
self._moments = list(moments)
self._device = device
self._device.validate_circuit(self) | Initializes a circuit.
Args:
moments: The initial list of moments defining the circuit.
device: Hardware that the circuit should be able to run on. | juraj-google-style |
def _normalize_edge(self, edge: EDGE) -> EDGE:
def lower(n: GridQubit, m: GridQubit) -> bool:
return ((n.row < m.row) or ((n.row == m.row) and (n.col < m.col)))
(n1, n2) = edge
return ((n1, n2) if lower(n1, n2) else (n2, n1)) | Gives unique representative of the edge.
Two edges are equivalent if they form an edge between the same nodes.
This method returns representative of this edge which can be compared
using equality operator later.
Args:
edge: Edge to normalize.
Returns:
Normalized edge with lexicographically lower node on the first
position. | codesearchnet |
def _verify_parsed_token(parsed_token, issuers, audiences, allowed_client_ids, is_legacy_google_auth=True):
if (parsed_token.get('iss') not in issuers):
_logger.warning('Issuer was not valid: %s', parsed_token.get('iss'))
return False
aud = parsed_token.get('aud')
if (not aud):
_logger.warning('No aud field in token')
return False
cid = parsed_token.get('azp')
audience_allowed = ((aud in audiences) or (is_legacy_google_auth and (aud == cid)))
if (not audience_allowed):
_logger.warning('Audience not allowed: %s', aud)
return False
if is_legacy_google_auth:
if (list(allowed_client_ids) == SKIP_CLIENT_ID_CHECK):
_logger.warning("Client ID check can't be skipped for ID tokens. Id_token cannot be verified.")
return False
elif ((not cid) or (cid not in allowed_client_ids)):
_logger.warning('Client ID is not allowed: %s', cid)
return False
if ('email' not in parsed_token):
return False
return True | Verify a parsed user ID token.
Args:
parsed_token: The parsed token information.
issuers: A list of allowed issuers
audiences: The allowed audiences.
allowed_client_ids: The allowed client IDs.
Returns:
True if the token is verified, False otherwise. | codesearchnet |
def get_path(self, url):
cache_path = self._url_to_path(url)
if os.path.exists(cache_path):
return cache_path
return None | Returns the path of a cached resource.
Args:
url: The url of the resource
Returns:
The path to the cached resource or None if not in the cache | codesearchnet |
def check(self, locator=None, allow_label_click=None, **kwargs):
self._check_with_label(
"checkbox", True, locator=locator, allow_label_click=allow_label_click, **kwargs) | Find a check box and mark it as checked. The check box can be found via name, id, or label
text. ::
page.check("German")
Args:
locator (str, optional): Which check box to check.
allow_label_click (bool, optional): Attempt to click the label to toggle state if
element is non-visible. Defaults to :data:`capybara.automatic_label_click`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. | juraj-google-style |
def load(self, filename, offset):
self.offset = offset
self.filename = filename
self.bootsector = BootSector(
filename=filename,
length=NTFS_BOOTSECTOR_SIZE,
offset=self.offset)
self.mft_table = MftTable(
mft_entry_size=self.bootsector.mft_record_size,
filename=self.filename,
offset=self.mft_table_offset
)
self.mft_table.preload_entries(NUM_SYSTEM_ENTRIES)
self._load_volume_information() | Loads NTFS volume information
Args:
filename (str): Path to file/device to read the volume \
information from.
offset (uint): Valid NTFS partition offset from the beginning \
of the file/device.
Raises:
IOError: If source file/device does not exist or is not readable | juraj-google-style |
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(Boolean, self).write(ostream, kmip_version=kmip_version)
self.write_value(ostream, kmip_version=kmip_version) | Write the encoding of the Boolean object to the output stream.
Args:
ostream (Stream): A buffer to contain the encoded bytes of a
Boolean object. Usually a BytearrayStream object. Required.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0. | codesearchnet |
def update(self, friendly_name=None, description=None, query=None):
self._table._load_info()
if query is not None:
if isinstance(query, _query.Query):
query = query.sql
self._table._info['view'] = {'query': query}
self._table.update(friendly_name=friendly_name, description=description) | Selectively updates View information.
Any parameters that are None (the default) are not applied in the update.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
query: if not None, a new query string for the View. | juraj-google-style |
def remove_node_by_value(self, value):
self.node_list = [node for node in self.node_list if (node.value != value)]
for node in self.node_list:
node.link_list = [link for link in node.link_list if (link.target.value != value)] | Delete all nodes in ``self.node_list`` with the value ``value``.
Args:
value (Any): The value to find and delete owners of.
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> graph.remove_node_by_value('One')
>>> len(graph.node_list)
0 | codesearchnet |
def exponential_moving_average(self, var, avg_var=None, decay=0.999, ignore_nan=False):
with self._g.as_default():
if ((decay < 0) or (decay >= 1.0)):
raise ValueError(('Decay is %5.2f, but has to be in [0, 1).' % decay))
if (avg_var is None):
avg_name = ('%s_average' % _bare_var_name(var))
with tf.control_dependencies(None):
with tf.name_scope((avg_name + '/Initializer/')):
if isinstance(var, tf.Variable):
init_val = var.initialized_value()
elif var.get_shape().is_fully_defined():
init_val = tf.constant(0, shape=var.get_shape(), dtype=var.dtype.base_dtype)
else:
init_val = tf.constant(0, dtype=var.dtype.base_dtype)
avg_var = tf.Variable(init_val, name=avg_name, trainable=False)
num_updates = tf.cast(self.global_step, tf.float32)
decay = tf.minimum(decay, tf.maximum(0.9, ((1.0 + num_updates) / (10.0 + num_updates))))
with tf.device(avg_var.device):
if ignore_nan:
var = tf.where(tf.is_finite(var), var, avg_var)
if var.get_shape().is_fully_defined():
avg_update = tf.assign_sub(avg_var, ((1 - decay) * (avg_var - var)))
else:
avg_update = tf.assign(avg_var, (avg_var - ((1 - decay) * (avg_var - var))), validate_shape=False)
self._g.add_to_collection(GraphKeys.UPDATE_OPS, avg_update)
return avg_update | Calculates the exponential moving average.
TODO(): check if this implementation of moving average can now
be replaced by tensorflows implementation.
Adds a variable to keep track of the exponential moving average and adds an
update operation to the bookkeeper. The name of the variable is
'%s_average' % name prefixed with the current variable scope.
Args:
var: The variable for which a moving average should be computed.
avg_var: The variable to set the average into, if None create a zero
initialized one.
decay: How much history to use in the moving average.
Higher, means more history values [0, 1) accepted.
ignore_nan: If the value is NaN or Inf, skip it.
Returns:
The averaged variable.
Raises:
ValueError: if decay is not in [0, 1). | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.