code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def plot(self, ax=None, legend=None, return_fig=False, **kwargs):
if (ax is None):
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
return_ax = False
else:
return_ax = True
d = None
if (legend is not None):
try:
d = legend.get_decor(self)
except:
pass
if (d is not None):
kwargs['color'] = d.colour
kwargs['lw'] = (getattr(d, 'lineweight', None) or getattr(d, 'lw', 1))
kwargs['ls'] = (getattr(d, 'linestyle', None) or getattr(d, 'ls', '-'))
axkwargs = {}
xlim = getattr(d, 'xlim', None)
if (xlim is not None):
axkwargs['xlim'] = list(map(float, xlim.split(',')))
xticks = getattr(d, 'xticks', None)
if (xticks is not None):
axkwargs['xticks'] = list(map(float, xticks.split(',')))
xscale = getattr(d, 'xscale', None)
if (xscale is not None):
axkwargs['xscale'] = xscale
ax.set(**axkwargs)
ax.plot(self, self.basis, **kwargs)
ax.set_title(self.mnemonic)
ax.set_xlabel(self.units)
if False:
ax.xaxis.tick_top()
if True:
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(90)
ax.set_ylim([self.stop, self.start])
ax.grid('on', color='k', alpha=0.33, lw=0.33, linestyle='-')
if return_ax:
return ax
elif return_fig:
return fig
else:
return None | Plot a curve.
Args:
ax (ax): A matplotlib axis.
legend (striplog.legend): A legend. Optional.
return_fig (bool): whether to return the matplotlib figure.
Default False.
kwargs: Arguments for ``ax.set()``
Returns:
ax. If you passed in an ax, otherwise None. | codesearchnet |
def close(self, reason=None):
with self._closing:
if self._closed:
return
if self.is_active:
_LOGGER.debug("Stopping consumer.")
self._consumer.stop()
self._consumer = None
_LOGGER.debug("Stopping scheduler.")
self._scheduler.shutdown()
self._scheduler = None
_LOGGER.debug("Stopping leaser.")
self._leaser.stop()
self._leaser = None
_LOGGER.debug("Stopping dispatcher.")
self._dispatcher.stop()
self._dispatcher = None
_LOGGER.debug("Stopping heartbeater.")
self._heartbeater.stop()
self._heartbeater = None
self._rpc = None
self._closed = True
_LOGGER.debug("Finished stopping manager.")
for callback in self._close_callbacks:
callback(self, reason) | Stop consuming messages and shutdown all helper threads.
This method is idempotent. Additional calls will have no effect.
Args:
reason (Any): The reason to close this. If None, this is considered
an "intentional" shutdown. This is passed to the callbacks
specified via :meth:`add_close_callback`. | juraj-google-style |
def set_custom_predict_fn(self, predict_fn):
self.delete('estimator_and_spec')
self.store('custom_predict_fn', predict_fn)
self.set_inference_address('custom_predict_fn')
if (not self.has_model_name()):
self.set_model_name('1')
return self | Sets a custom function for inference.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a custom function as the model to query. In this case, the
provided function should accept example protos and return:
- For classification: A 2D list of numbers. The first dimension is for
each example being predicted. The second dimension are the probabilities
for each class ID in the prediction.
- For regression: A 1D list of numbers, with a regression score for each
example being predicted.
Args:
predict_fn: The custom python function which will be used for model
inference.
Returns:
self, in order to enabled method chaining. | codesearchnet |
def get_flat_size(self):
return sum((np.prod(v.get_shape().as_list()) for v in self.variables.values())) | Returns the total length of all of the flattened variables.
Returns:
The length of all flattened variables concatenated. | codesearchnet |
def delay(self, n, start_time):
if ((n > self.max_retries) or ((n > self.min_retries) and ((time.time() - start_time) > self.max_retry_period))):
return (- 1)
return min((math.pow(self.backoff_factor, (n - 1)) * self.initial_delay), self.max_delay) | Calculate delay before the next retry.
Args:
n: the number of current attempt. The first attempt should be 1.
start_time: the time when retry started in unix time.
Returns:
Number of seconds to wait before next retry. -1 if retry should give up. | codesearchnet |
def build_input_fns(data_dir, batch_size):
with open(download(data_dir, 'vocab.pkl'), 'r') as f:
words_to_idx = pickle.load(f)
num_words = len(words_to_idx)
vocabulary = ([None] * num_words)
for (word, idx) in words_to_idx.items():
vocabulary[idx] = word
def train_input_fn():
dataset = newsgroups_dataset(data_dir, 'train', num_words, shuffle_and_repeat=True)
dataset = dataset.batch(batch_size).prefetch(32)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
def eval_input_fn():
dataset = newsgroups_dataset(data_dir, 'test', num_words, shuffle_and_repeat=False)
dataset = dataset.batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return (train_input_fn, eval_input_fn, vocabulary) | Builds iterators for train and evaluation data.
Each object is represented as a bag-of-words vector.
Arguments:
data_dir: Folder in which to store the data.
batch_size: Batch size for both train and evaluation.
Returns:
train_input_fn: A function that returns an iterator over the training data.
eval_input_fn: A function that returns an iterator over the evaluation data.
vocabulary: A mapping of word's integer index to the corresponding string. | codesearchnet |
def _GetByteStreamOperation(self):
byte_order_string = self.GetStructByteOrderString()
format_string = self.GetStructFormatString()
if (not format_string):
return None
format_string = ''.join([byte_order_string, format_string])
return byte_operations.StructOperation(format_string) | Retrieves the byte stream operation.
Returns:
ByteStreamOperation: byte stream operation or None if unable to determine. | codesearchnet |
def cosmic_link(variant_obj):
cosmic_ids = variant_obj.get('cosmic_ids')
if not cosmic_ids:
return None
else:
cosmic_id = cosmic_ids[0]
url_template = ("https:
return url_template.format(cosmic_id) | Compose link to COSMIC Database.
Args:
variant_obj(scout.models.Variant)
Returns:
url_template(str): Link to COSMIIC database if cosmic id is present | juraj-google-style |
def rgb_to_yuv(images):
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(_rgb_to_yuv_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) | Converts one or more images from RGB to YUV.
Outputs a tensor of the same shape as the `images` tensor, containing the YUV
value of the pixels.
The output is only well defined if the value in images are in [0, 1].
There are two ways of representing an image: [0, 255] pixel values range or
[0, 1] (as float) pixel values range. Users need to convert the input image
into a float [0, 1] range.
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`. | github-repos |
def predict_raw(self, X):
b = np.ones((X.shape[0], 1))
w2 = self.w[-(self.h + 1):].reshape(self.h + 1, 1)
w1 = self.w[:-(self.h + 1)].reshape(self.i + 1, self.h)
if X.shape[1] > self.i:
X = X[:, :self.i]
elif X.shape[1] < self.i:
idx = range(X.shape[1])
idx.append(self.i)
w1 = w1[idx, :]
if sparse.issparse(X):
return np.hstack((sigm(sparse.hstack((X, b)).dot(w1)), b)).dot(w2)
else:
return np.hstack((sigm(np.hstack((X, b)).dot(w1)), b)).dot(w2) | Predict targets for a feature matrix.
Args:
X (np.array of float): feature matrix for prediction | juraj-google-style |
def _parse_options(self, options):
for key in ('username', 'client_name', 'client_id', 'client_secret', 'trusted', 'logout_uri'):
value = options.get(key)
if (value is not None):
self.fields[key] = value
username = self.fields.pop('username', None)
if (username is not None):
try:
user_model = get_user_model()
self.fields['user'] = user_model.objects.get(username=username)
except user_model.DoesNotExist:
raise CommandError('User matching the provided username does not exist.')
client_name = self.fields.pop('client_name', None)
if (client_name is not None):
self.fields['name'] = client_name
logout_uri = self.fields.get('logout_uri')
if logout_uri:
try:
URLValidator()(logout_uri)
except ValidationError:
raise CommandError('The logout_uri is invalid.') | Parse the command's options.
Arguments:
options (dict): Options with which the command was called.
Raises:
CommandError, if a user matching the provided username does not exist. | codesearchnet |
def parse_non_selinux(parts):
(links, owner, group, last) = parts
result = {'links': int(links), 'owner': owner, 'group': group}
if (',' in last[:4]):
(major, minor, rest) = last.split(None, 2)
result['major'] = int(major.rstrip(','))
result['minor'] = int(minor)
else:
(size, rest) = last.split(None, 1)
result['size'] = int(size)
result['date'] = rest[:12]
(path, link) = parse_path(rest[13:])
result['name'] = path
if link:
result['link'] = link
return result | Parse part of an ls output line that isn't selinux.
Args:
parts (list): A four element list of strings representing the initial
parts of an ls line after the permission bits. The parts are link
count, owner, group, and everything else.
Returns:
A dict containing links, owner, group, date, and name. If the line
represented a device, major and minor numbers are included. Otherwise,
size is included. If the raw name was a symbolic link, link is
included. | codesearchnet |
def expand_dims(self, image):
self._ensure_format_supported(image)
if isinstance(image, PIL.Image.Image):
return image
if is_torch_tensor(image):
image = image.unsqueeze(0)
else:
image = np.expand_dims(image, axis=0)
return image | Expands 2-dimensional `image` to 3 dimensions.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to expand. | github-repos |
def dbmax_stddev(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmax_stddev`'.format(value))
self._dbmax_stddev = value | Corresponds to IDD Field `dbmax_stddev`
Standard deviation of extreme annual maximum dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax_stddev`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | juraj-google-style |
def rename_next_state_fluent(name: str) -> str:
i = name.index('/')
functor = name[:i-1]
arity = name[i+1:]
return "{}/{}".format(functor, arity) | Returns next state fluent canonical name.
Args:
name (str): The current state fluent name.
Returns:
str: The next state fluent name. | juraj-google-style |
def __init__(self, context):
self._logdir = context.logdir
self._db_uri = context.db_uri
self._window_title = context.window_title
self._multiplexer = context.multiplexer
self._db_connection_provider = context.db_connection_provider
self._assets_zip_provider = context.assets_zip_provider | Instantiates CorePlugin.
Args:
context: A base_plugin.TBContext instance. | juraj-google-style |
def __init__(self, xid=None, flags=ConfigFlag.OFPC_FRAG_NORMAL,
miss_send_len=ControllerMaxLen.OFPCML_NO_BUFFER):
super().__init__(xid, flags, miss_send_len)
self.header.message_type = Type.OFPT_SET_CONFIG | Create a SetConfig with the optional parameters below.
Args:
xid (int): xid to be used on the message header.
flags (:class:`~pyof.v0x01.controller2switch.common.ConfigFlag`):
OFPC_* flags.
miss_send_len (int): UBInt16 max bytes of new flow that the
datapath should send to the controller. | juraj-google-style |
def query(self, terms=None, negated_terms=None):
if terms is None:
terms = []
matches_all = 'owl:Thing' in terms
if negated_terms is None:
negated_terms = []
termset = set(terms)
negated_termset = set(negated_terms)
matches = []
n_terms = len(termset)
for subj in self.subjects:
if matches_all or len(termset.intersection(self.inferred_types(subj))) == n_terms:
if len(negated_termset.intersection(self.inferred_types(subj))) == 0:
matches.append(subj)
return matches | Basic boolean query, using inference.
Arguments:
- terms: list
list of class ids. Returns the set of subjects that have at least one inferred annotation to each of the specified classes.
- negated_terms: list
list of class ids. Filters the set of subjects so that there are no inferred annotations to any of the specified classes | juraj-google-style |
def check_schema_equal(left: Union['bigquery.TableSchema', 'bigquery.TableFieldSchema'], right: Union['bigquery.TableSchema', 'bigquery.TableFieldSchema'], *, ignore_descriptions: bool=False, ignore_field_order: bool=False) -> bool:
if type(left) != type(right) or not isinstance(left, (bigquery.TableSchema, bigquery.TableFieldSchema)):
return False
if isinstance(left, bigquery.TableFieldSchema):
if left.name != right.name:
return False
if left.type != right.type:
if sorted((left.type, right.type)) not in (['BOOL', 'BOOLEAN'], ['FLOAT', 'FLOAT64'], ['INT64', 'INTEGER'], ['RECORD', 'STRUCT']):
return False
if left.mode != right.mode:
return False
if not ignore_descriptions and left.description != right.description:
return False
if isinstance(left, bigquery.TableSchema) or left.type in ('RECORD', 'STRUCT'):
if len(left.fields) != len(right.fields):
return False
if ignore_field_order:
left_fields = sorted(left.fields, key=lambda field: field.name)
right_fields = sorted(right.fields, key=lambda field: field.name)
else:
left_fields = left.fields
right_fields = right.fields
for left_field, right_field in zip(left_fields, right_fields):
if not check_schema_equal(left_field, right_field, ignore_descriptions=ignore_descriptions, ignore_field_order=ignore_field_order):
return False
return True | Check whether schemas are equivalent.
This comparison function differs from using == to compare TableSchema
because it ignores categories, policy tags, descriptions (optionally), and
field ordering (optionally).
Args:
left (~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableFieldSchema):
One schema to compare.
right (~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableSchema, ~apache_beam.io.gcp.internal.clients.bigquery.bigquery_v2_messages.TableFieldSchema):
The other schema to compare.
ignore_descriptions (bool): (optional) Whether or not to ignore field
descriptions when comparing. Defaults to False.
ignore_field_order (bool): (optional) Whether or not to ignore struct field
order when comparing. Defaults to False.
Returns:
bool: True if the schemas are equivalent, False otherwise. | github-repos |
def extrapolate_points(points, n_points):
points = points[:n_points]
lat = []
lon = []
last = None
for point in points:
if last is not None:
lat.append(last.lat-point.lat)
lon.append(last.lon-point.lon)
last = point
dts = np.mean([p.dt for p in points])
lons = np.mean(lon)
lats = np.mean(lat)
gen_sample = []
last = points[0]
for _ in range(n_points):
point = Point(last.lat+lats, last.lon+lons, None)
point.dt = dts
gen_sample.append(point)
last = point
return gen_sample | Extrapolate a number of points, based on the first ones
Args:
points (:obj:`list` of :obj:`Point`)
n_points (int): number of points to extrapolate
Returns:
:obj:`list` of :obj:`Point` | juraj-google-style |
def __lt__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
< other._tp__get_typed_properties()
) | Test if self is less than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class. | juraj-google-style |
def get_feature_variable_double(self, feature_key, variable_key, user_id, attributes=None):
variable_type = entities.Variable.Type.DOUBLE
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes) | Returns value for a certain double variable attached to a feature flag.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
Double value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable. | codesearchnet |
def symbol(name: str=None, symbol_type: Type[Symbol]=Symbol) -> 'SymbolWildcard':
if isinstance(name, type) and issubclass(name, Symbol) and symbol_type is Symbol:
return SymbolWildcard(name)
return SymbolWildcard(symbol_type, variable_name=name) | Create a `SymbolWildcard` that matches a single `Symbol` argument.
Args:
name:
Optional variable name for the wildcard.
symbol_type:
An optional subclass of `Symbol` to further limit which kind of symbols are
matched by the wildcard.
Returns:
A `SymbolWildcard` that matches the *symbol_type*. | juraj-google-style |
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
self._ParseLogonApplications(parser_mediator, registry_key)
self._ParseRegisteredDLLs(parser_mediator, registry_key) | Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key. | juraj-google-style |
def HandleMessage(self, message):
self._is_active = True
try:
action_cls = actions.ActionPlugin.classes.get(message.name)
if action_cls is None:
raise RuntimeError("Client action %r not known" % message.name)
action = action_cls(grr_worker=self)
self.transaction_log.Write(message)
action.Progress()
action.Execute(message)
self.transaction_log.Clear()
finally:
self._is_active = False
self.stats_collector.RequestSend() | Entry point for processing jobs.
Args:
message: The GrrMessage that was delivered from the server.
Raises:
RuntimeError: The client action requested was not found. | juraj-google-style |
def licenses(self):
buf_size = self.MAX_BUF_SIZE
buf = (ctypes.c_char * buf_size)()
res = self._dll.JLINK_GetAvailableLicense(buf, buf_size)
if (res < 0):
raise errors.JLinkException(res)
return ctypes.string_at(buf).decode() | Returns a string of the built-in licenses the J-Link has.
Args:
self (JLink): the ``JLink`` instance
Returns:
String of the contents of the built-in licenses the J-Link has. | codesearchnet |
def dump_next(self):
if (self.dump_walker is None):
return pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.STREAM_WALKER_NOT_INITIALIZED)
try:
return self.dump_walker.pop()
except StreamEmptyError:
return None | Dump the next reading from the stream.
Returns:
IOTileReading: The next reading or None if there isn't one | codesearchnet |
def encrypt_block(self, plainText):
if not self.initialized:
raise TypeError("CamCrypt object has not been initialized")
if len(plainText) != BLOCK_SIZE:
raise ValueError("plainText must be %d bytes long (received %d bytes)" %
(BLOCK_SIZE, len(plainText)))
cipher = ctypes.create_string_buffer(BLOCK_SIZE)
self.encblock(self.bitlen, plainText, self.keytable, cipher)
return cipher.raw | Encrypt a 16-byte block of data.
NOTE: This function was formerly called `encrypt`, but was changed when
support for encrypting arbitrary-length strings was added.
Args:
plainText (str): 16-byte data.
Returns:
16-byte str.
Raises:
TypeError if CamCrypt object has not been initialized.
ValueError if `plainText` is not BLOCK_SIZE (i.e. 16) bytes. | juraj-google-style |
def _new(self, name, **kwargs):
if self._name_path:
parent = self
for path_element in self._name_path.split('/'):
self._set_xml_from_keys(parent, (path_element, None))
parent = parent.find(path_element)
parent.text = name
else:
ElementTree.SubElement(self, 'name').text = name
for item in self.data_keys.items():
self._set_xml_from_keys(self, item, **kwargs) | Create a new JSSObject with name and "keys".
Generate a default XML template for this object, based on
the class attribute "keys".
Args:
name: String name of the object to use as the
object's name property.
kwargs:
Accepted keyword args can be viewed by checking the
"data_keys" class attribute. Typically, they include all
top-level keys, and non-duplicated keys used elsewhere.
Values will be cast to string. (Int 10, bool False
become string values "10" and "false").
Ignores kwargs that aren't in object's keys attribute. | codesearchnet |
def __init__(self, channel):
self.ListSessionEntityTypes = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.SessionEntityTypes/ListSessionEntityTypes',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.ListSessionEntityTypesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.ListSessionEntityTypesResponse.FromString,
)
self.GetSessionEntityType = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.SessionEntityTypes/GetSessionEntityType',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.GetSessionEntityTypeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.SessionEntityType.FromString,
)
self.CreateSessionEntityType = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.SessionEntityTypes/CreateSessionEntityType',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.CreateSessionEntityTypeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.SessionEntityType.FromString,
)
self.UpdateSessionEntityType = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.SessionEntityTypes/UpdateSessionEntityType',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.UpdateSessionEntityTypeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.SessionEntityType.FromString,
)
self.DeleteSessionEntityType = channel.unary_unary(
'/google.cloud.dialogflow.v2beta1.SessionEntityTypes/DeleteSessionEntityType',
request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_session__entity__type__pb2.DeleteSessionEntityTypeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def upgrade(**kwargs):
log.warning('pkg.upgrade not implemented on Windows yet')
refresh = salt.utils.data.is_true(kwargs.get('refresh', True))
saltenv = kwargs.get('saltenv', 'base')
log.warning('pkg.upgrade not implemented on Windows yet refresh:%s saltenv:%s', refresh, saltenv)
return {} | Upgrade all software. Currently not implemented
Kwargs:
saltenv (str): The salt environment to use. Default ``base``.
refresh (bool): Refresh package metadata. Default ``True``.
.. note::
This feature is not yet implemented for Windows.
Returns:
dict: Empty dict, until implemented
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade | codesearchnet |
def __init__(self, parser, codegen, writer):
self._parser = parser
self._codegen = codegen
self._symbolgen = SymtableCodeGen()
self._writer = writer
self._sources = []
self._searchers = []
self._borrowers = [] | Creates an instance of *MibCompiler* class.
Args:
parser: ASN.1 MIB parser object
codegen: MIB transformation object
writer: transformed MIB storing object | juraj-google-style |
def from_signature(message, signature):
if (signature.recovery_id is None):
raise ValueError('The signature must have a recovery_id.')
msg = get_bytes(message)
pub_keys = bitcoin_curve.recover_public_key(msg, signature, signature.recovery_id)
for (k, recid) in pub_keys:
if ((signature.recovery_id is not None) and (recid == signature.recovery_id)):
return PublicKey(k.x, k.y)
return None | Attempts to create PublicKey object by deriving it
from the message and signature.
Args:
message (bytes): The message to be verified.
signature (Signature): The signature for message.
The recovery_id must not be None!
Returns:
PublicKey:
A PublicKey object derived from the
signature, it it exists. None otherwise. | codesearchnet |
def replace_dimensions(tensor_or_shape, old_dim_or_dims, new_dim_or_dims):
if isinstance(tensor_or_shape, Tensor):
return reshape(tensor_or_shape, replace_dimensions(
tensor_or_shape.shape, old_dim_or_dims, new_dim_or_dims))
if not isinstance(tensor_or_shape, Shape):
raise ValueError(
"tensor_or_shape must be a Tensor or Shape got %s" % (tensor_or_shape,))
in_dims = tensor_or_shape.dims
if isinstance(old_dim_or_dims, Dimension):
old_dim_or_dims = [old_dim_or_dims]
if isinstance(new_dim_or_dims, Dimension):
new_dim_or_dims = [new_dim_or_dims]
if not isinstance(old_dim_or_dims, list) or not old_dim_or_dims:
raise ValueError(
"old_dim_or_dims must be a Dimension or a list of Dimension got %s"
% (old_dim_or_dims,))
if not isinstance(new_dim_or_dims, list) or not new_dim_or_dims:
raise ValueError(
"new_dim_or_dims must be a Dimension or a list of Dimension got %s"
% (new_dim_or_dims,))
try:
positions = [in_dims.index(d) for d in old_dim_or_dims]
pos = positions[0]
if positions != list(range(pos, pos + len(positions))):
raise ValueError()
except ValueError:
raise ValueError(
"old_dim_or_dims must be a subsequence of the input's dimensions"
" old_dim_or_dims=%s input's dimensions=%s" %
(old_dim_or_dims, in_dims))
return Shape(in_dims[:pos] + new_dim_or_dims +
in_dims[pos + len(old_dim_or_dims):]) | Replace dimensions in a Tensor or Shape.
old_dim_or_dims consists of a single dimension or a list of dimensions
that must occur consecutively in the input shape. They are replaced
by the dimensions in new_dim_or_dims.
Args:
tensor_or_shape: a Tensor or a Shape
old_dim_or_dims: a Dimension or a list of Dimensions
new_dim_or_dims: a Dimensions or a list of Dimensions
Returns:
a new Tensor or a Shape | juraj-google-style |
def _process_single_batch(model, inputs, targets, output_loss_metrics=None, sample_weights=None, training=False):
with backend.eager_learning_phase_scope(1 if training else 0), training_utils.RespectCompiledTrainableState(model):
with GradientTape() as tape:
outs, total_loss, output_losses, masks = _model_loss(model, inputs, targets, output_loss_metrics=output_loss_metrics, sample_weights=sample_weights, training=training)
if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):
scaled_total_loss = model.optimizer.get_scaled_loss(total_loss)
else:
scaled_total_loss = total_loss
if training:
trainable_weights = model.trainable_weights
if trainable_weights:
if hasattr(model, '_backwards'):
model._backwards(tape, scaled_total_loss)
else:
grads = tape.gradient(scaled_total_loss, trainable_weights)
if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):
grads = model.optimizer.get_unscaled_gradients(grads)
model.optimizer.apply_gradients(zip(grads, trainable_weights))
else:
logging.warning('The list of trainable weights is empty. Make sure that you are not setting model.trainable to False before compiling the model.')
return (outs, total_loss, output_losses, masks) | Calculate the loss and gradient for one input batch.
The model weights are updated if training is set to True.
Args:
model: Model whose loss has to be calculated.
inputs: List of input arrays.
targets: List of target arrays.
output_loss_metrics: List of metrics that are used to aggregated output
loss values.
sample_weights: Optional list of sample weight arrays.
training: The boolean represents if the weights of the model are updated.
'fit' methods will set this to True while 'evaluate' methods will
set this to False.
Returns:
output of the model, total loss, the loss and the mask
associated with each output.
Raises:
ValueError: If the model has no loss to optimize. | github-repos |
def __init__(self, value=None, tag=enums.Tags.DEFAULT):
if value is None:
value = int(time.time())
super(DateTime, self).__init__(value, tag)
self.type = enums.Types.DATE_TIME | Create a DateTime.
Args:
value (int): The value of the DateTime in number of seconds since
the Epoch. See the time package for additional information.
Optional, defaults to the current time.
tag (Tags): An enumeration defining the tag of the LongInteger.
Optional, defaults to Tags.DEFAULT. | juraj-google-style |
def UploadFile(self, fd, offset=0, amount=None):
return self._UploadChunkStream(
self._streamer.StreamFile(fd, offset=offset, amount=amount)) | Uploads chunks of a given file descriptor to the transfer store flow.
Args:
fd: A file descriptor to upload.
offset: An integer offset at which the file upload should start on.
amount: An upper bound on number of bytes to stream. If it is `None` then
the whole file is uploaded.
Returns:
A `BlobImageDescriptor` object. | juraj-google-style |
def __one_equals_true(value):
if (isinstance(value, six.integer_types) and (value == 1)):
return True
elif (isinstance(value, six.string_types) and (re.match('\\d+', value, flags=(re.IGNORECASE + re.UNICODE)) is not None) and (six.text_type(value) == '1')):
return True
return False | Test for ``1`` as a number or a string and return ``True`` if it is.
Args:
value: string or number or None.
Returns:
bool: ``True`` if 1 otherwise ``False``. | codesearchnet |
def get_module(dir_path: str, relative_to_dir: str) -> str:
dir_path = dir_path[len(relative_to_dir):]
dir_path = dir_path.replace(os.sep, '/')
return dir_path.replace('/', '.').strip('.') | Get module that corresponds to path relative to relative_to_dir.
Args:
dir_path: Path to directory.
relative_to_dir: Get module relative to this directory.
Returns:
Name of module that corresponds to the given directory. | github-repos |
def export_model(model, model_type, export_dir, model_column_fn):
(wide_columns, deep_columns) = model_column_fn()
if (model_type == 'wide'):
columns = wide_columns
elif (model_type == 'deep'):
columns = deep_columns
else:
columns = (wide_columns + deep_columns)
feature_spec = tf.feature_column.make_parse_example_spec(columns)
example_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
model.export_savedmodel(export_dir, example_input_fn, strip_default_attrs=True) | Export to SavedModel format.
Args:
model: Estimator object
model_type: string indicating model type. "wide", "deep" or "wide_deep"
export_dir: directory to export the model.
model_column_fn: Function to generate model feature columns. | codesearchnet |
def latents_to_observations(self, latent_means, latent_covs):
with tf.name_scope('latents_to_observations'):
pushforward_latents_step = build_pushforward_latents_step(self.get_observation_matrix_for_timestep, self.get_observation_noise_for_timestep)
latent_means = distribution_util.move_dimension(latent_means, source_idx=(- 2), dest_idx=0)
latent_means = latent_means[(..., tf.newaxis)]
latent_covs = distribution_util.move_dimension(latent_covs, source_idx=(- 3), dest_idx=0)
(initial_observation_mean, initial_observation_cov) = pushforward_latents_step(_=None, latent_t_mean_cov=(self.initial_step, latent_means[self.initial_step], latent_covs[self.initial_step]))
timesteps = tf.range(self.initial_step, (self.initial_step + self.num_timesteps))
(observation_means, observation_covs) = tf.scan(pushforward_latents_step, elems=(timesteps, latent_means, latent_covs), initializer=(initial_observation_mean, initial_observation_cov), parallel_iterations=10000)
observation_means = distribution_util.move_dimension(observation_means[(..., 0)], source_idx=0, dest_idx=(- 2))
observation_covs = distribution_util.move_dimension(observation_covs, source_idx=0, dest_idx=(- 3))
return (observation_means, observation_covs) | Push latent means and covariances forward through the observation model.
Args:
latent_means: float `Tensor` of shape `[..., num_timesteps, latent_size]`
latent_covs: float `Tensor` of shape
`[..., num_timesteps, latent_size, latent_size]`.
Returns:
observation_means: float `Tensor` of shape
`[..., num_timesteps, observation_size]`
observation_covs: float `Tensor` of shape
`[..., num_timesteps, observation_size, observation_size]` | codesearchnet |
def _CreateWindowsPathResolver(self, file_system, mount_point, environment_variables):
if (environment_variables is None):
environment_variables = []
path_resolver = windows_path_resolver.WindowsPathResolver(file_system, mount_point)
for environment_variable in environment_variables:
name = environment_variable.name.lower()
if (name not in ('systemroot', 'userprofile')):
continue
path_resolver.SetEnvironmentVariable(environment_variable.name, environment_variable.value)
return path_resolver | Create a Windows path resolver and sets the environment variables.
Args:
file_system (dfvfs.FileSystem): file system.
mount_point (dfvfs.PathSpec): mount point path specification.
environment_variables (list[EnvironmentVariableArtifact]): environment
variables.
Returns:
dfvfs.WindowsPathResolver: Windows path resolver. | codesearchnet |
def flatten(vari):
if isinstance(vari, Poly):
shape = int(numpy.prod(vari.shape))
return reshape(vari, (shape,))
return numpy.array(vari).flatten() | Flatten a shapeable quantity.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Shapeable input quantity.
Returns:
(chaospy.poly.base.Poly, numpy.ndarray):
Same type as ``vari`` with `len(Q.shape)==1`.
Examples:
>>> P = chaospy.reshape(chaospy.prange(4), (2,2))
>>> print(P)
[[1, q0], [q0^2, q0^3]]
>>> print(chaospy.flatten(P))
[1, q0, q0^2, q0^3] | juraj-google-style |
def subnet_range(ip_net, cidr):
subnets_dict = dict()
subnet = whole_subnet_maker(ip_net, cidr)
subnets_dict['IP'] = ip_net
subnets_dict['NET'] = subnet
subnets_dict['CIDR'] = '%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr)
if int(cidr) >= 24:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[3]) + 1
last_ip = (int(subnet_split[3]) + 1) + (253 - int(__mask_conversion[int(cidr)]['OCT4']))
bcast_ip = (int(subnet_split[3]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT4']))
temp = '%s.%s.%s.' % (subnet_split[0], subnet_split[1], subnet_split[2])
subnets_dict['RANGE'] = '%s%i to %s%i' % (temp, first_ip, temp, last_ip)
subnets_dict['BCAST'] = '%s%i' % (temp, bcast_ip)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
elif int(cidr) >= 16:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[2])
last_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT3']))
bcast_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT3']))
temp = '%s.%s.' % (subnet_split[0], subnet_split[1])
subnets_dict['RANGE'] = '%s%i.1 to %s%i.254' % (temp, first_ip, temp, last_ip)
subnets_dict['BCAST'] = '%s%i.255' % (temp, bcast_ip)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
elif int(cidr) >= 8:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[1])
last_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT2']))
bcast_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT2']))
temp = '%s.' % (subnet_split[0],)
subnets_dict['RANGE'] = '%s%i.0.1 to %s%i.255.254' % (temp, first_ip, temp, last_ip)
subnets_dict['BCAST'] = '%s%i.255.255' % (temp, bcast_ip)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
elif int(cidr) >= 1:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[0])
last_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT1']))
bcast_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT1']))
subnets_dict['RANGE'] = '%i.0.0.1 to %i.255.255.254' % (first_ip, last_ip)
subnets_dict['BCAST'] = '%i.255.255.255' % (bcast_ip,)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
return subnets_dict | Function to return a subnet range value from a IP address and CIDR pair
Args:
ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 1 to 32
Returns: returns a dictionary of info | juraj-google-style |
def wait_for(port_num, timeout):
logger.debug("wait for {port_num}".format(**locals()))
t_start = time.time()
sleeps = 0.1
while time.time() - t_start < timeout:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((_host(), port_num))
return True
except (IOError, socket.error):
time.sleep(sleeps)
finally:
s.close()
return False | waits while process starts.
Args:
port_num - port number
timeout - specify how long, in seconds, a command can take before times out.
return True if process started, return False if not | juraj-google-style |
def str_to_inet(address):
try:
return socket.inet_pton(socket.AF_INET, address)
except socket.error:
return socket.inet_pton(socket.AF_INET6, address) | Convert an a string IP address to a inet struct
Args:
address (str): String representation of address
Returns:
inet: Inet network address | codesearchnet |
def add(x1, x2, output_shape=None, name=None):
output_shape = convert_to_shape(output_shape)
if (not isinstance(x2, Tensor)):
return ScalarAddOperation(x1, x2).outputs[0]
with tf.name_scope(name, default_name='add'):
(x1, x2) = binary_arguments_to_tensors(x1, x2)
return AddOperation(x1, x2, output_shape=_infer_binary_broadcast_shape(x1.shape, x2.shape, output_shape)).outputs[0] | Binary addition with broadcsting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor | codesearchnet |
def resize(self, image: 'torch.Tensor', size: SizeDict, size_divisor: int=32, interpolation: 'F.InterpolationMode'=None, antialias: bool=True, **kwargs) -> 'torch.Tensor':
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if not size.shortest_edge:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}')
shorter = size.shortest_edge
longer = int(1333 / 800 * shorter)
output_size = get_resize_output_image_size(image, shorter=shorter, longer=longer, size_divisor=size_divisor)
return F.resize(image, output_size, interpolation=interpolation, antialias=antialias) | Resize an image.
Resizes the shorter side of the image to `size["shortest_edge"]` while preserving the aspect ratio. If the
longer side is larger than the max size `(int(`size["shortest_edge"]` * 1333 / 800))`, the longer side is then
resized to the max size while preserving the aspect ratio.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
size_divisor (`int`, *optional*, defaults to 32):
The image is resized to a size that is a multiple of this value.
resample (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
Returns:
`torch.Tensor`: The resized image. | github-repos |
def construct_policy(app='coreforrest', env='dev', group='forrest', region='us-east-1', pipeline_settings=None):
LOG.info('Create custom IAM Policy for %s.', app)
services = pipeline_settings.get('services', {})
LOG.debug('Found requested services: %s', services)
services = auto_service(pipeline_settings=pipeline_settings, services=services)
if services:
credential = get_env_credential(env=env)
account_number = credential['accountId']
statements = []
for (service, value) in services.items():
if (value is True):
items = []
elif isinstance(value, str):
items = [value]
else:
items = value
rendered_statements = render_policy_template(account_number=account_number, app=app, env=env, group=group, items=items, pipeline_settings=pipeline_settings, region=region, service=service)
statements.extend(rendered_statements)
if statements:
policy_json = get_template('infrastructure/iam/wrapper.json.j2', statements=json.dumps(statements))
else:
LOG.info('No services defined for %s.', app)
policy_json = None
return policy_json | Assemble IAM Policy for _app_.
Args:
app (str): Name of Spinnaker Application.
env (str): Environment/Account in AWS
group (str):A Application group/namespace
region (str): AWS region
pipeline_settings (dict): Settings from *pipeline.json*.
Returns:
json: Custom IAM Policy for _app_.
None: When no *services* have been defined in *pipeline.json*. | codesearchnet |
def is_packet_trace(path):
path = os.path.abspath(path)
if (not os.path.isfile(path)):
return False
try:
f = open(path, 'rb')
except:
return False
magic = f.read(4)
f.close()
return (magic in FILE_TYPE_HANDLER) | Determine if a file is a packet trace that is supported by this module.
Args:
path (str): path to the trace file.
Returns:
bool: True if the file is a valid packet trace. | codesearchnet |
def blit(self, source, x=0, y=0, width=None, height=None, srcX=0, srcY=0, fg_alpha=1.0, bg_alpha=1.0):
assert isinstance(source, (Console, Window)), 'source muse be a Window or Console instance'
(x, y, width, height) = self._normalizeRect(x, y, width, height)
(srcX, srcY, width, height) = source._normalizeRect(srcX, srcY, width, height)
(srcX, srcY) = source._translate(srcX, srcY)
source = source.console
(x, y) = self._translate(x, y)
self = self.console
if (self == source):
tmp = Console(width, height)
_lib.TCOD_console_blit(source.console_c, srcX, srcY, width, height, tmp.console_c, 0, 0, fg_alpha, bg_alpha)
_lib.TCOD_console_blit(tmp.console_c, 0, 0, width, height, self.console_c, x, y, fg_alpha, bg_alpha)
else:
_lib.TCOD_console_blit(source.console_c, srcX, srcY, width, height, self.console_c, x, y, fg_alpha, bg_alpha) | Blit another console or Window onto the current console.
By default it blits the entire source to the topleft corner.
Args:
source (Union[tdl.Console, tdl.Window]): The blitting source.
A console can blit to itself without any problems.
x (int): x-coordinate of this console to blit on.
y (int): y-coordinate of this console to blit on.
width (Optional[int]): Width of the rectangle.
Can be None to extend as far as possible to the
bottom right corner of the blit area or can be a negative
number to be sized reltive to the total size of the
B{destination} console.
height (Optional[int]): Height of the rectangle.
srcX (int): x-coordinate of the source region to blit.
srcY (int): y-coordinate of the source region to blit.
fg_alpha (float): The foreground alpha. | codesearchnet |
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = BytearrayStream()
if self._credential_type:
self._credential_type.write(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Credential struct missing the credential type.')
if self._credential_value:
self._credential_value.write(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Credential struct missing the credential value.')
self.length = local_stream.length()
super(Credential, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer) | Write the data encoding the Credential struct to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if either the credential type or value are not
defined. | codesearchnet |
def _get_string_match(self, key):
expression = '(?:\\s*)'.join(['^', 'define', '\\(', "'{}'".format(key), ',', "\\'(.*)\\'", '\\)', ';'])
pattern = re.compile(expression, re.MULTILINE)
return pattern.search(self._content) | Gets a MatchObject for the given key, assuming a string value.
Args:
key (str): Key of the property to look-up.
Return:
MatchObject: The discovered match. | codesearchnet |
def plan_scripts(self):
if (not self.__plan_scripts):
self.__plan_scripts = PlanScripts(self.__connection)
return self.__plan_scripts | Gets the Plan Scripts API client.
Returns:
PlanScripts: | codesearchnet |
def on_test_batch_begin(self, batch, logs=None): | Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`Model` is set to `N`, this method will only be called every
`N` batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future. | github-repos |
def _find(self, index):
match = _PATTERN.search(self.text, index)
while self._max_tries > 0 and match is not None:
start = match.start()
candidate = self.text[start:match.end()]
candidate = self._trim_after_first_match(_SECOND_NUMBER_START_PATTERN,
candidate)
match = self._extract_match(candidate, start)
if match is not None:
return match
index = start + len(candidate)
self._max_tries -= 1
match = _PATTERN.search(self.text, index)
return None | Attempts to find the next subsequence in the searched sequence on or after index
that represents a phone number. Returns the next match, None if none was found.
Arguments:
index -- The search index to start searching at.
Returns the phone number match found, None if none can be found. | juraj-google-style |
def get_tabular_stream(self, url, **kwargs):
self.close_response()
file_type = kwargs.get('file_type')
if file_type is not None:
kwargs['format'] = file_type
del kwargs['file_type']
try:
self.response = tabulator.Stream(url, **kwargs)
self.response.open()
return self.response
except TabulatorException as e:
raisefrom(DownloadError, 'Getting tabular stream for %s failed!' % url, e) | Get Tabulator stream.
Args:
url (str): URL to download
**kwargs:
headers (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers
file_type (Optional[str]): Type of file. Defaults to inferring.
delimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring.
Returns:
tabulator.Stream: Tabulator Stream object | juraj-google-style |
def _GetTitleFromChromeWebStore(self, extension_identifier):
if extension_identifier in self._extensions:
return self._extensions.get(extension_identifier)
page_content = self._GetChromeWebStorePage(extension_identifier)
if not page_content:
logger.warning(
'[{0:s}] no data returned for extension identifier: {1:s}'.format(
self.NAME, extension_identifier))
return None
first_line, _, _ = page_content.partition('\n')
match = self._TITLE_RE.search(first_line)
name = None
if match:
title = match.group(1)
if title.startswith('Chrome Web Store - '):
name = title[19:]
elif title.endswith('- Chrome Web Store'):
name = title[:-19]
if not name:
self._extensions[extension_identifier] = 'UNKNOWN'
return None
self._extensions[extension_identifier] = name
return name | Retrieves the name of the extension from the Chrome store website.
Args:
extension_identifier (str): Chrome extension identifier.
Returns:
str: name of the extension or None. | juraj-google-style |
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs):
return self.tokenizer.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs) | Post-process the output of a vlm to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`List[str]`: The decoded text. | github-repos |
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country, use_live=use_live)
if (iso3 is not None):
return (iso3, True)
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if (word_or_part in word):
wordlist.remove(word)
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
(simplified_country, removed_words) = cls.simplify_countryname(candidate)
if (simplified_country in countryname):
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if (word in countryname):
remove_matching_from_list(words, word)
new_match_strength += 4
elif (word in cls.major_differentiators):
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if (word in cls.major_differentiators):
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if (new_match_strength > match_strength):
match_strength = new_match_strength
matches = set()
if (new_match_strength == match_strength):
matches.add(iso3)
if ((len(matches) == 1) and (match_strength > 16)):
return (matches.pop(), False)
for (iso3, regex) in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if (index is not None):
return (iso3, False)
if (exception is not None):
raise exception
return (None, False) | Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False). | codesearchnet |
def convert_coco_poly_to_mask(segmentations, height: int, width: int, device: torch.device) -> torch.Tensor:
try:
from pycocotools import mask as coco_mask
except ImportError:
raise ImportError('Pycocotools is not installed in your environment.')
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8, device=device)
mask = torch.any(mask, axis=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, axis=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8, device=device)
return masks | Convert a COCO polygon annotation to a mask.
Args:
segmentations (`List[List[float]]`):
List of polygons, each polygon represented by a list of x-y coordinates.
height (`int`):
Height of the mask.
width (`int`):
Width of the mask. | github-repos |
def concat(input_layer, concat_dim, other_tensors=None):
if input_layer.is_sequence():
all_tensors = input_layer.sequence
all_tensors.extend((other_tensors or []))
else:
all_tensors = [input_layer]
if (other_tensors is None):
raise ValueError('Other Tensors must be supplied.')
all_tensors.extend(other_tensors)
if (not all_tensors):
return prettytensor.wrap_sequence([])
else:
return tf.concat(all_tensors, concat_dim) | Concatenates input PrettyTensor with other_tensors along the specified dim.
This adds the Pretty Tensor passed via input_layer to the front of the list of
tensors to concat.
Args:
input_layer: The input layer.
concat_dim: The dimension along which to concat.
other_tensors: The tensors to concatenate with as an iterable or None if
this is called on a sequence.
Returns:
A new PrettyTensor.
Raises:
ValueError: If other_tensors is None and this is not a sequence. | codesearchnet |
def add_oxidation_state_by_guess(self, **kwargs):
oxid_guess = self.composition.oxi_state_guesses(**kwargs)
oxid_guess = oxid_guess or \
[dict([(e.symbol, 0) for e in self.composition])]
self.add_oxidation_state_by_element(oxid_guess[0]) | Decorates the structure with oxidation state, guessing
using Composition.oxi_state_guesses()
Args:
**kwargs: parameters to pass into oxi_state_guesses() | juraj-google-style |
def _acquire_given_subnet(self, uuid_path, subnet):
lease = self.create_lease_object_from_subnet(subnet)
self._take_lease(lease, uuid_path)
return lease.to_ip_network() | Try to create a lease for subnet
Args:
uuid_path (str): Path to the uuid file of a :class:`lago.Prefix`
subnet (str): dotted ipv4 subnet
(for example ```192.168.200.0```)
Returns:
netaddr.IPNetwork: Which represents the selected subnet
Raises:
LagoSubnetLeaseException: If the requested subnet is not in the
range of this store or its already been taken | codesearchnet |
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params) | Lists snapshots.
Args:
request: (DataflowProjectsLocationsSnapshotsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListSnapshotsResponse) The response message. | github-repos |
def _convert_dict_inputs(inputs, tensor_info_map):
dict_inputs = _prepare_dict_inputs(inputs, tensor_info_map)
return tensor_info.convert_dict_to_compatible_tensor(dict_inputs, tensor_info_map) | Converts from inputs into dict of input tensors.
This handles:
- putting inputs into a dict, per _prepare_dict_inputs(),
- converting all input values into tensors compatible with the
expected input tensor (dtype, shape).
- check sparse/non-sparse tensor types.
Args:
inputs: inputs fed to Module.__call__().
tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo`
describing the signature inputs.
Returns:
A dict of tensors to feed to the signature instantiation.
Raises:
TypeError: If it fails to convert the input values into a dict of tensors
to feed to the signature instantiation. | codesearchnet |
def __init__(self, seed, salt):
self._seed = seed.original_seed if isinstance(seed, SeedStream) else seed
self._salt = salt
self._counter = 0 | Initializes a `SeedStream`.
Args:
seed: Any Python object convertible to string, supplying the
initial entropy. If `None`, operations seeded with seeds
drawn from this `SeedStream` will follow TensorFlow semantics
for not being seeded.
salt: Any Python object convertible to string, supplying
auxiliary entropy. Must be unique across the Distributions
and TensorFlow Probability code base. See class docstring for
rationale. | juraj-google-style |
def CreateAdGroup(client, campaign_id):
ad_group_service = client.GetService('AdGroupService', 'v201809')
ad_group = {'name': 'Dynamic remarketing ad group', 'campaignId': campaign_id, 'status': 'ENABLED'}
operations = [{'operator': 'ADD', 'operand': ad_group}]
return ad_group_service.mutate(operations)['value'][0] | Creates a dynamic remarketing campaign.
Args:
client: an AdWordsClient instance.
campaign_id: an int campaign ID.
Returns:
The ad group that was successfully created. | codesearchnet |
def get_rbounds(step):
if (step.geom is not None):
rcmb = step.geom.rcmb
else:
rcmb = step.sdat.par['geometry']['r_cmb']
if (step.sdat.par['geometry']['shape'].lower() == 'cartesian'):
rcmb = 0
rcmb = max(rcmb, 0)
return (rcmb, (rcmb + 1)) | Radial or vertical position of boundaries.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of floats: radial or vertical positions of boundaries of the
domain. | codesearchnet |
def create_reader_of_type(type_name):
readers = available_readers()
if type_name not in readers.keys():
raise UnknownReaderException('Unknown reader: %s' % (type_name,))
return readers[type_name]() | Create an instance of the reader with the given name.
Args:
type_name: The name of a reader.
Returns:
An instance of the reader with the given type. | juraj-google-style |
def uninstalled(name):
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not __salt__['wusa.is_installed'](name):
ret['result'] = True
ret['comment'] = '{0} already uninstalled'.format(name)
return ret
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = '{0} would be uninstalled'.format(name)
ret['result'] = None
return ret
__salt__['wusa.uninstall'](name)
if not __salt__['wusa.is_installed'](name):
ret['comment'] = '{0} was uninstalled'.format(name)
ret['changes'] = {'old': True, 'new': False}
ret['result'] = True
else:
ret['comment'] = '{0} failed to uninstall'.format(name)
return ret | Ensure an update is uninstalled from the minion
Args:
name(str):
Name of the Windows KB ("KB123456")
Example:
.. code-block:: yaml
KB123456:
wusa.uninstalled | juraj-google-style |
def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0):
batch_size = shape_list(mu)[0]
prior_distribution = tfp.distributions.Normal(
mu_p, tf.exp(tf.multiply(0.5, log_var_p)))
posterior_distribution = tfp.distributions.Normal(
mu, tf.exp(tf.multiply(0.5, log_var)))
kld = tfp.distributions.kl_divergence(posterior_distribution,
prior_distribution)
return tf.reduce_sum(kld) / to_float(batch_size) | KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_var: log(var) parameter of the distribution.
mu_p: optional mu from a learned prior distribution
log_var_p: optional log(var) from a learned prior distribution
Returns:
the KL loss. | juraj-google-style |
def __parameter_enum(self, param):
if isinstance(param, messages.EnumField):
return [enum_entry[0] for enum_entry in sorted(
param.type.to_dict().items(), key=lambda v: v[1])] | Returns enum descriptor of a parameter if it is an enum.
An enum descriptor is a list of keys.
Args:
param: A simple field.
Returns:
The enum descriptor for the field, if it's an enum descriptor, else
returns None. | juraj-google-style |
def _get_qubit_index(self, qubit):
for i, bit in enumerate(self.qubit_list):
if qubit == bit:
qindex = i
break
else:
raise exceptions.VisualizationError("unable to find bit for operation")
return qindex | Get the index number for a quantum bit
Args:
qubit (tuple): The tuple of the bit of the form
(register_name, bit_number)
Returns:
int: The index in the bit list
Raises:
VisualizationError: If the bit isn't found | juraj-google-style |
def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100):
url = nurls['setProperty']
data = {'userid': self.user_id,
'useridx': self.useridx,
'tagtype': tagtype,
'startnum': startnum,
'pagingrow': pagingrow,
}
r = self.session.post(url = url, data = data)
return resultManager(r.text) | GetMusicAlbumList
Args:
tagtype = ???
startnum
pagingrow
Returns:
???
False: Failed to get property | juraj-google-style |
def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0):
thrown_out_count = 0
source_target_output = []
if not max_equal_to_diff_ratio:
return source_target_input, thrown_out_count
for src_tgt in source_target_input:
opcodes = fast_match_sequences(*src_tgt)
diff_char_count = 0
equal_char_count = 0
for tag, i1, i2, j1, j2 in opcodes:
if tag == "diff":
diff_char_count += max(i2 - i1, j2 - j1)
else:
equal_char_count += i2 - i1
if diff_char_count <= max_equal_to_diff_ratio * equal_char_count:
source_target_output.append(src_tgt)
else:
thrown_out_count += 1
return source_target_output, thrown_out_count | Filter out examples that exceed max_edit_ratio between source and target.
Args:
source_target_input: a list of [source, target] pairs
max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars
between source and target
Returns:
source_target_output: filtered subset of [source, target] input pairs
thrown_out_count: number of examples filtered out | juraj-google-style |
def open(self, filename):
if filename:
self.binary = BinaryFile(filename)
self.text_section = self.binary.text_section
self._load(arch_mode=self.binary.architecture_mode) | Open a file for analysis.
Args:
filename (str): Name of an executable file. | codesearchnet |
def _init_vocab_from_file(self, filename):
with tf.gfile.Open(filename) as f:
tokens = [token.strip() for token in f.readlines()]
def token_gen():
for token in tokens:
yield token
self._init_vocab(token_gen(), add_reserved_tokens=False) | Load vocab from a file.
Args:
filename: The file to load vocabulary from. | juraj-google-style |
def from_file_obj(cls, fp):
log.debug("Parsing email from file object")
try:
fp.seek(0)
except IOError:
pass
finally:
s = fp.read()
return cls.from_string(s) | Init a new object from a file-like object.
Not for Outlook msg.
Args:
fp (file-like object): file-like object of raw email
Returns:
Instance of MailParser | juraj-google-style |
def check_tweet(tweet, validation_checking=False):
if ('id' not in tweet):
raise NotATweetError("This text has no 'id' key")
original_format = is_original_format(tweet)
if original_format:
_check_original_format_tweet(tweet, validation_checking=validation_checking)
else:
_check_activity_streams_tweet(tweet, validation_checking=validation_checking)
return original_format | Ensures a tweet is valid and determines the type of format for the tweet.
Args:
tweet (dict/Tweet): the tweet payload
validation_checking (bool): check for valid key structure in a tweet. | codesearchnet |
def not_modified(cls, errors=None):
if cls.expose_status:
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json | Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | juraj-google-style |
def add_candidate_peer_endpoints(self, peer_endpoints):
with self._lock:
for endpoint in peer_endpoints:
if endpoint not in self._candidate_peer_endpoints:
self._candidate_peer_endpoints.append(endpoint) | Adds candidate endpoints to the list of endpoints to
attempt to peer with.
Args:
peer_endpoints ([str]): A list of public uri's which the
validator can attempt to peer with. | juraj-google-style |
def capture(self, payment_id, amount, data={}, **kwargs):
url = "{}/{}/capture".format(self.base_url, payment_id)
data['amount'] = amount
return self.post_url(url, data, **kwargs) | Capture Payment for given Id
Args:
payment_id : Id for which payment object has to be retrieved
Amount : Amount for which the payment has to be retrieved
Returns:
Payment dict after getting captured | juraj-google-style |
def writegroup(self, auth, entries, defer=False):
return self._call('writegroup', auth, [entries], defer) | Writes the given values for the respective resources in the list, all writes have same
timestamp.
Args:
auth: cik for authentication.
entries: List of key, value lists. eg. [[key, value], [k,v],,,] | codesearchnet |
def mesh_element(script, sample_num=1000, element='VERT'):
if (element.lower() == 'vert'):
element_num = 0
elif (element.lower() == 'edge'):
element_num = 1
elif (element.lower() == 'face'):
element_num = 2
filter_xml = ''.join([' <filter name="Mesh Element Subsampling">\n', ' <Param name="Sampling" ', 'value="{:d}" '.format(element_num), 'description="Element to sample:" ', 'enum_val0="Vertex" ', 'enum_val1="Edge" ', 'enum_val2="Face" ', 'enum_cardinality="3" ', 'type="RichEnum" ', '/>\n', ' <Param name="SampleNum" ', 'value="{:d}" '.format(sample_num), 'description="Number of samples" ', 'type="RichInt" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Sampled Mesh')
return None | Create a new layer populated with a point sampling of the current mesh,
at most one sample for each element of the mesh is created.
Samples are taking in a uniform way, one for each element
(vertex/edge/face); all the elements have the same probabilty of being
choosen.
Args:
script: the FilterScript object or script filename to write
the filter to.
sample_num (int): The desired number of elements that must be chosen.
Being a subsampling of the original elements if this number should
not be larger than the number of elements of the original mesh.
element (enum in ['VERT', 'EDGE', 'FACE']): Choose what mesh element
will be used for the subsampling. At most one point sample will
be added for each one of the chosen elements
Layer stack:
Creates new layer 'Sampled Mesh'. Current layer is changed to the new
layer.
MeshLab versions:
2016.12
1.3.4BETA | codesearchnet |
def get_updates_for(self, inputs):
if inputs is None:
return [u for u in self.updates if u._unconditional_update]
updates = [u for u in self.updates if not u._unconditional_update]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, updates)
return [u for u in updates if u in reachable] | Retrieves updates relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`. | github-repos |
def all_tokens(self, delimiter=' '):
tokens = set()
for label in self:
tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))
return tokens | Return a list of all tokens occurring in the label-list.
Args:
delimiter (str): The delimiter used to split labels into tokens
(see :meth:`audiomate.annotations.Label.tokenized`).
Returns:
:class:`set`: A set of distinct tokens. | juraj-google-style |
def remove_attribute(self, attribute: str) -> None:
attr_index = self.__attr_index(attribute)
if (attr_index is not None):
self.yaml_node.value.pop(attr_index) | Remove an attribute from the node.
Use only if is_mapping() returns True.
Args:
attribute: The name of the attribute to remove. | codesearchnet |
def get_distance(self, i, j, jimage=None):
return self[i].distance(self[j], jimage) | Get distance between site i and j assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the jimage nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations if the index
jimage of atom j is specified it returns the distance between the i
atom and the specified jimage atom.
Args:
i (int): Index of first site
j (int): Index of second site
jimage: Number of lattice translations in each lattice direction.
Default is None for nearest image.
Returns:
distance | codesearchnet |
def _RegisterProcess(self, process):
if process is None:
raise ValueError('Missing process.')
if process.pid in self._processes_per_pid:
raise KeyError(
'Already managing process: {0!s} (PID: {1:d})'.format(
process.name, process.pid))
self._processes_per_pid[process.pid] = process | Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing. | juraj-google-style |
def _initialize_tensor_name_to_ids(self):
tensor_name_to_ids = {}
for (i, operation) in enumerate(self._operations):
for (j, tensor) in enumerate(operation.outputs):
tensor_name_to_ids[tensor.name] = (i, j)
return tensor_name_to_ids | Initializer for _tensor_name_to_ids.
Returns:
a {string: (int, int)}, mapping the name of tensor T to the index of T's
operation in _operations and T's index in T's operation's outputs. | codesearchnet |
def Verify(self):
if (not (self.Hash.ToBytes() == GetGenesis().Hash.ToBytes())):
return False
bc = GetBlockchain()
if (not bc.ContainsBlock(self.Index)):
return False
if (self.Index > 0):
prev_header = GetBlockchain().GetHeader(self.PrevHash.ToBytes())
if (prev_header is None):
return False
if ((prev_header.Index + 1) != self.Index):
return False
if (prev_header.Timestamp >= self.Timestamp):
return False
if (not Helper.VerifyScripts(self)):
return False
return True | Verify block using the verification script.
Returns:
bool: True if valid. False otherwise. | codesearchnet |
def usufyToTextExport(d, fPath=None):
if (d == []):
return '+------------------+\n| No data found... |\n+------------------+'
import pyexcel as pe
import pyexcel.ext.text as text
if (fPath == None):
isTerminal = True
else:
isTerminal = False
try:
oldData = get_data(fPath)
except:
oldData = {'OSRFramework': []}
tabularData = _generateTabularData(d, {'OSRFramework': [[]]}, True, canUnicode=False)
sheet = pe.Sheet(tabularData['OSRFramework'])
sheet.name = (('Profiles recovered (' + getCurrentStrDatetime()) + ').')
sheet.name_columns_by_row(0)
text.TABLEFMT = 'grid'
try:
with open(fPath, 'w') as oF:
oF.write(str(sheet))
except Exception as e:
return unicode(sheet) | Workaround to export to a .txt file or to show the information.
Args:
-----
d: Data to export.
fPath: File path for the output file. If None was provided, it will
assume that it has to print it.
Returns:
--------
unicode: It sometimes returns a unicode representation of the Sheet
received. | codesearchnet |
def _populate_from_repo(self, example: Example):
path = Path(example.filepath)
example_folder = path.parent
log_file_path = example_folder / self.LOGS_FILENAME
if log_file_path.exists():
example.logs = log_file_path.read_text()
graph_file_path = example_folder / self.GRAPH_FILENAME
if graph_file_path.exists():
example.graph = graph_file_path.read_text()
output_file_path = example_folder / self.OUTPUT_FILENAME
if output_file_path.exists():
example.output = output_file_path.read_text()
compile_output_file_path = example_folder / self.COMPILE_OUTPUT_FILENAME
if compile_output_file_path.exists():
example.compile_output = compile_output_file_path.read_text() | Populate fields of the example reading them from the repository.
Args:
example: beam example that should be verified | github-repos |
def ParseFileDownloadedRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = ChromeHistoryFileDownloadedEventData()
event_data.full_path = self._GetRowValue(query_hash, row, 'target_path')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.received_bytes = self._GetRowValue(
query_hash, row, 'received_bytes')
event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'start_time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a file downloaded row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. | juraj-google-style |
def _torch_extract_fbank_features(self, waveform: 'torch.FloatTensor', audio_lengths: 'torch.Tensor', device: str='cpu') -> 'torch.FloatTensor':
fft_window = torch.hamming_window(self.win_length, periodic=False, device=device, dtype=torch.float64)
batch_size = waveform.shape[0]
frames = waveform.unfold(-1, self.win_length, self.hop_length)
if batch_size > 1:
frames = frames.clone()
to_mask_batch_idxs = torch.arange(batch_size)[audio_lengths != audio_lengths.max()]
if to_mask_batch_idxs.numel() > 0:
batch_idxs_down = (audio_lengths[to_mask_batch_idxs] - self.win_length)
batch_idxs_up = audio_lengths[to_mask_batch_idxs]
offset_idx = batch_idxs_down.min()
max_idx = batch_idxs_up.max()
mask = torch.arange(max_idx - offset_idx, device=device).expand(to_mask_batch_idxs.shape[0], -1)
mask = ((batch_idxs_down - offset_idx).unsqueeze(1) <= mask) & (mask < (batch_idxs_up - offset_idx).unsqueeze(1))
mask = mask.unsqueeze(-1).expand(-1, -1, self.win_length)
masked_frames = frames[to_mask_batch_idxs, offset_idx:max_idx].masked_fill_(mask, 0)
frames[to_mask_batch_idxs, offset_idx:max_idx] = masked_frames
frames_prev = torch.roll(frames, 1, dims=-1)
frames_prev[:, :, 0] = frames_prev[:, :, 1]
frames = (frames - self.preemphasis * frames_prev) * 32768
S = torch.fft.rfft(fft_window * frames.view(-1, self.win_length), n=self.n_fft, dim=1)
S = S.view(frames.shape[0], -1, S.shape[-1])
S = S.to(torch.complex64)
spec = torch.abs(S)
spec_power = spec ** 2
mel_filters = torch.from_numpy(self.mel_filters).to(device, torch.float32)
log_spec = torch.clamp(spec_power @ mel_filters, min=1.0)
log_spec = torch.log(log_spec)
return log_spec | Compute the log mel-scaled spectrogram of batched waveforms using PyTorch's FFT implementation.
Args:
waveform (torch.FloatTensor` of shape `(batch_size, max_audio_length)`):
The batched waveforms.
audio_lengths (`torch.Tensor` of shape `(batch_size,)`):
The lengths of the waveforms along the max_audio_length dimension.
device (`str`, *optional*, defaults to "cpu"):
The device to run the computation on. (e.g., "cpu", "cuda")
Returns:
`torch.FloatTensor` of shape `(batch_size, max_feature_length, feature_size)`:
The log mel-scaled spectrogram of the batched waveforms. | github-repos |
def detect_alias_config_change(self):
if self.parse_error():
return False
alias_config_sha1 = hashlib.sha1(self.alias_config_str.encode('utf-8')).hexdigest()
if (alias_config_sha1 != self.alias_config_hash):
self.alias_config_hash = alias_config_sha1
return True
return False | Change if the alias configuration has changed since the last run.
Returns:
False if the alias configuration file has not been changed since the last run.
Otherwise, return True. | codesearchnet |
def while_loop(cond_fn, body_fn, inputs, num_loop_vars=None, has_accumulators=False, **kwargs):
if (num_loop_vars is None):
return WhileLoopOperation(cond_fn, body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs
assert (num_loop_vars > 0)
extra_inputs = inputs[num_loop_vars:]
my_vars = []
for (i, x) in enumerate(extra_inputs):
my_vars.append(get_variable(x.mesh, ('loop_var_%d' % i), x.shape, initializer=tf.zeros_initializer(), dtype=x.dtype, collections=[tf.GraphKeys.LOCAL_VARIABLES]))
my_vars = tuple(my_vars)
first_input = depend(inputs[0], [assign(var, x) for (var, x) in zip(my_vars, extra_inputs)])
inputs = ([first_input] + inputs[1:num_loop_vars])
def my_cond_fn(*inputs):
return cond_fn(*(inputs + my_vars))
def my_body_fn(*inputs):
outputs = tuple(body_fn(*(inputs + my_vars)))
extra_outputs = outputs[num_loop_vars:]
first_output = depend(outputs[0], [assign(var, x) for (var, x) in zip(my_vars, extra_outputs)])
outputs = ((first_output,) + outputs[1:num_loop_vars])
return outputs
return WhileLoopOperation(my_cond_fn, my_body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs | While Loop.
See comments above for WhileLoopOperation
num_loop_vars is a hack for the multi-gpu setup. In this case, loops
are generally slow, as all loop variables are placed on device. By setting
num_loop_vars=k, then all of the loop variables except for the first k
are handled as mtf Variables instead of loop variables, using explicit
updates and control dependencies. In this case, we only return the
first num_loop_vars outputs. Do not use this option on TPU, since it
is unnecessary and also produces incorrect results, since xla does not
respect control dependencies.
Args:
cond_fn: a function from n Tensors to scalar boolean Tensor
body_fn: a function from n Tensors to list of n Tensors
inputs: a list of n Tensors
num_loop_vars: an optional integer.
has_accumulators: a boolean
**kwargs: additional kwargs passed to tf.while_loop
Returns:
a list of n Tensors. | codesearchnet |
def is_distributed(partition_column, lower_bound, upper_bound):
if (
(partition_column is not None)
and (lower_bound is not None)
and (upper_bound is not None)
):
if upper_bound > lower_bound:
return True
else:
raise InvalidArguments("upper_bound must be greater than lower_bound.")
elif (partition_column is None) and (lower_bound is None) and (upper_bound is None):
return False
else:
raise InvalidArguments(
"Invalid combination of partition_column, lower_bound, upper_bound."
"All these arguments should be passed (distributed) or none of them (standard pandas)."
) | Check if is possible distribute a query given that args
Args:
partition_column: column used to share the data between the workers
lower_bound: the minimum value to be requested from the partition_column
upper_bound: the maximum value to be requested from the partition_column
Returns:
True for distributed or False if not | juraj-google-style |
def experimental_write_bytecode(filename, mlir_txt):
pywrap_mlir.experimental_write_bytecode(filename, mlir_txt) | Writes an MLIR module out as bytecode.
Args:
filename: The filename to write to.
mlir_txt: The MLIR module in textual format. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.