code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def get(self, node_id):
return self.prepare_model(self.client.api.inspect_node(node_id)) | Get a node.
Args:
node_id (string): ID of the node to be inspected.
Returns:
A :py:class:`Node` object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | codesearchnet |
def retrieve_pwd_from_config(msg, cfg):
msg_type = msg.__class__.__name__.lower()
key_fmt = msg.profile + "_" + msg_type
pwd = cfg.pwd[key_fmt].split(" :: ")
if len(pwd) == 1:
msg.auth = pwd[0]
else:
msg.auth = tuple(pwd) | Retrieve auth from profile configuration and set in msg.auth attr.
Args:
:msg: (Message class) an instance of a message class.
:cfg: (jsonconfig.Config) config instance. | juraj-google-style |
def __init__(self, value=0):
super(ExtensionTag, self).__init__(value, Tags.EXTENSION_TAG) | Construct an ExtensionTag object.
Args:
value (int): A number representing the extension tag. Often
displayed in hex format. Optional, defaults to 0. | juraj-google-style |
def _mean_of_runs(stats, key='runs'):
num_runs = len(stats[key])
first = stats[key][0]
mean = {}
for stat_key in first:
if isinstance(first[stat_key], numbers.Number):
mean[stat_key] = (sum((run[stat_key] for run in stats[key])) / float(num_runs))
return mean | Obtain the mean of stats.
Args:
stats: dict; A set of stats, structured as above.
key: str; Optional key to determine where list of runs is found in stats | codesearchnet |
def compile_file(source, globals_=None):
if isinstance(source, gast.AST):
source = quoting.to_source(source)
tempdir = tempfile.mkdtemp()
uuid = str(uuid4().hex[:4])
tmpname = os.path.join(tempdir, 'tangent_%s.py' % uuid)
with open(tmpname, 'w') as f:
f.write(source)
module_name = 'tangent_%s' % uuid
if six.PY3:
spec = util.spec_from_file_location(module_name, tmpname)
m = util.module_from_spec(spec)
spec.loader.exec_module(m)
else:
m = imp.load_source(module_name, tmpname)
if globals_:
m.__dict__.update(globals_)
return m | Compile by saving to file and importing that.
Compiling the AST/source code this way ensures that the source code is
readable by e.g. `pdb` or `inspect`.
Args:
source: The code to compile, either as a string or as an AST.
globals_: A dictionary of variables that should be available as globals in
the compiled module. They will be monkey patched after importing the
module.
Returns:
A module object containing the compiled source code. | juraj-google-style |
def pack_x_y_sample_weight(x, y=None, sample_weight=None):
if y is None:
if not nest.is_nested(x):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight) | Packs user-provided data into a tuple.
This is a convenience utility for packing data into the tuple formats
that `Model.fit` uses.
Standalone usage:
>>> x = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x)
>>> isinstance(data, tf.Tensor)
True
>>> y = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)
>>> isinstance(data, tuple)
True
>>> x, y = data
Args:
x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element.
Returns:
Tuple in the format used in `Model.fit`. | github-repos |
def execute_work_items(work_items, config):
return celery.group(
worker_task.s(work_item, config)
for work_item in work_items
) | Execute a suite of tests for a given set of work items.
Args:
work_items: An iterable of `work_db.WorkItem`s.
config: The configuration to use for the test execution.
Returns: An iterable of WorkItems. | juraj-google-style |
def audio_bottom(x, model_hparams, vocab_size):
del vocab_size
inputs = x
with tf.variable_scope('audio_modality'):
def xnet_resblock(x, filters, res_relu, name):
'Xception block.'
with tf.variable_scope(name):
y = common_layers.separable_conv_block(x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding='SAME', force2d=True, name='sep_conv_block')
y = common_layers.pool(y, (3, 3), 'MAX', 'SAME', strides=(2, 2))
return (y + common_layers.conv_block(x, filters, [((1, 1), (1, 1))], padding='SAME', strides=(2, 2), first_relu=res_relu, force2d=True, name='res_conv0'))
x = (tf.to_float(inputs) / 255.0)
x.set_shape([None, None, None, 1])
for i in range(model_hparams.audio_compression):
x = xnet_resblock(x, (2 ** (i + 1)), True, ('compress_block_%d' % i))
return xnet_resblock(x, model_hparams.hidden_size, False, 'compress_block_final') | Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size]. | codesearchnet |
def CollectFromFileSystem(
cls, artifacts_registry, knowledge_base, searcher, file_system):
for preprocess_plugin in cls._file_system_plugins.values():
artifact_definition = artifacts_registry.GetDefinitionByName(
preprocess_plugin.ARTIFACT_DEFINITION_NAME)
if not artifact_definition:
logger.warning('Missing artifact definition: {0:s}'.format(
preprocess_plugin.ARTIFACT_DEFINITION_NAME))
continue
logger.debug('Running file system preprocessor plugin: {0:s}'.format(
preprocess_plugin.ARTIFACT_DEFINITION_NAME))
try:
preprocess_plugin.Collect(
knowledge_base, artifact_definition, searcher, file_system)
except (IOError, errors.PreProcessFail) as exception:
logger.warning((
'Unable to collect value from artifact definition: {0:s} '
'with error: {1!s}').format(
preprocess_plugin.ARTIFACT_DEFINITION_NAME, exception)) | Collects values from Windows Registry values.
Args:
artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts
definitions registry.
knowledge_base (KnowledgeBase): to fill with preprocessing information.
searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess
the file system.
file_system (dfvfs.FileSystem): file system to be preprocessed. | juraj-google-style |
def ToVM(self):
if (self.Type == ContractParameterType.String):
return str(self.Value).encode('utf-8').hex()
elif ((self.Type == ContractParameterType.Integer) and isinstance(self.Value, int)):
return BigInteger(self.Value)
return self.Value | Used for turning a ContractParameter item into somethnig consumable by the VM
Returns: | codesearchnet |
def set_lacp_timeout(self, name, value=None):
commands = [('interface %s' % name)]
string = 'port-channel lacp fallback timeout'
commands.append(self.command_builder(string, value=value))
return self.configure(commands) | Configures the Port-Channel LACP fallback timeout
The fallback timeout configures the period an interface in
fallback mode remains in LACP mode without receiving a PDU.
Args:
name(str): The Port-Channel interface name
value(int): port-channel lacp fallback timeout in seconds
Returns:
True if the operation succeeds otherwise False is returned | codesearchnet |
def dispatch_pure(request: str, methods: Methods, *, context: Any, convert_camel_case: bool, debug: bool) -> Response:
try:
deserialized = validate(deserialize(request), schema)
except JSONDecodeError as exc:
return InvalidJSONResponse(data=str(exc), debug=debug)
except ValidationError as exc:
return InvalidJSONRPCResponse(data=None, debug=debug)
return call_requests(create_requests(deserialized, context=context, convert_camel_case=convert_camel_case), methods, debug=debug) | Pure version of dispatch - no logging, no optional parameters.
Does two things:
1. Deserializes and validates the string.
2. Calls each request.
Args:
request: The incoming request string.
methods: Collection of methods that can be called.
context: If specified, will be the first positional argument in all requests.
convert_camel_case: Will convert the method name/any named params to snake case.
debug: Include more information in error responses.
Returns:
A Response. | codesearchnet |
def _get_mpr_table(self, connection, partition):
logger.debug(
'Looking for materialized view of the partition.\n partition: {}'.format(partition.name))
foreign_table = partition.vid
view_table = '{}_v'.format(foreign_table)
view_exists = self._relation_exists(connection, view_table)
if view_exists:
logger.debug(
'Materialized view of the partition found.\n partition: {}, view: {}'
.format(partition.name, view_table))
return view_table
logger.debug(
'Looking for foreign table of the partition.\n partition: {}'.format(partition.name))
foreign_exists = self._relation_exists(connection, foreign_table)
if foreign_exists:
logger.debug(
'Foreign table of the partition found.\n partition: {}, foreign table: {}'
.format(partition.name, foreign_table))
return foreign_table
raise MissingTableError('postgres database does not have table for {} partition.'
.format(partition.vid)) | Returns name of the postgres table who stores mpr data.
Args:
connection: connection to postgres db who stores mpr data.
partition (orm.Partition):
Returns:
str:
Raises:
MissingTableError: if partition table not found in the db. | juraj-google-style |
def attribute_label(self, attribute_id, label, action='GET', params=None):
if (params is None):
params = {}
if (not self.can_update()):
self._tcex.handle_error(910, [self.type])
if (action == 'GET'):
return self.tc_requests.get_attribute_label(self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner, params=params)
if (action == 'DELETE'):
return self.tc_requests.delete_attribute_label(self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner)
self._tcex.handle_error(925, ['action', 'attribute_label', 'action', 'action', action])
return None | Gets a security labels from a attribute
Args:
attribute_id:
label:
action:
params:
Returns: Security label json | codesearchnet |
def norm(value, dims, order=None):
if dims == 0:
return tf.math.abs(value)
elif dims == 1:
axis = -1
elif dims == 2:
axis = [-1, -2]
else:
ValueError(dims)
if order is None:
order = np.inf
return tf.norm(tensor=value, axis=axis, ord=order) | Compute the norm of the given (possibly batched) value.
Args:
value: A `Tensor` of real dtype.
dims: An Python integer with the number of non-batching dimensions in the
value, i.e. `dims=0` (scalars), `dims=1` (vectors), `dims=2` (matrices).
order: Order of the norm, defaults to `np.inf`. | juraj-google-style |
def __init__(self, source_urn=None, args=None, token=None):
self.source_urn = source_urn
self.args = args
self.token = token
self.lock = threading.RLock() | OutputPlugin constructor.
Constructor should be overridden to maintain instance-local state - i.e.
state that gets accumulated during the single output plugin run and that
should be used to update the global state via UpdateState method.
Args:
source_urn: URN of the data source to process the results from.
args: This plugin's arguments.
token: Security token. | juraj-google-style |
def is_metal(self, efermi_tol=0.0001):
for (spin, values) in self.bands.items():
for i in range(self.nb_bands):
if (np.any(((values[(i, :)] - self.efermi) < (- efermi_tol))) and np.any(((values[(i, :)] - self.efermi) > efermi_tol))):
return True
return False | Check if the band structure indicates a metal by looking if the fermi
level crosses a band.
Returns:
True if a metal, False if not | codesearchnet |
def compare_python_to_reference_murmur3_32(data: Any, seed: int = 0) -> None:
assert mmh3, "Need mmh3 module"
c_data = to_str(data)
c_signed = mmh3.hash(c_data, seed=seed)
py_data = to_bytes(c_data)
py_unsigned = murmur3_x86_32(py_data, seed=seed)
py_signed = twos_comp_to_signed(py_unsigned, n_bits=32)
preamble = "Hashing {data} with MurmurHash3/32-bit/seed={seed}".format(
data=repr(data), seed=seed)
if c_signed == py_signed:
print(preamble + " -> {result}: OK".format(result=c_signed))
else:
raise AssertionError(
preamble + "; mmh3 says "
"{c_data} -> {c_signed}, Python version says {py_data} -> "
"{py_unsigned} = {py_signed}".format(
c_data=repr(c_data),
c_signed=c_signed,
py_data=repr(py_data),
py_unsigned=py_unsigned,
py_signed=py_signed)) | Checks the pure Python implementation of 32-bit murmur3 against the
``mmh3`` C-based module.
Args:
data: data to hash
seed: seed
Raises:
AssertionError: if the two calculations don't match | juraj-google-style |
def iterator_zip(variables: VarType, parent: str=None) -> Iterable[VarMatrix]:
logger.debug('Yielding from zip iterator')
if isinstance(variables, list):
for item in variables:
(yield list(variable_matrix(item, parent, 'zip')))
else:
(yield list(variable_matrix(variables, parent, 'zip'))) | Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused | codesearchnet |
def _get_metric_object(self, metric, y_t, y_p):
if metric is None:
return None
if str(metric).lower() not in ['accuracy', 'acc', 'crossentropy', 'ce']:
metric_obj = metrics_mod.get(metric)
else:
y_t_rank = len(y_t.shape.as_list())
y_p_rank = len(y_p.shape.as_list())
y_t_last_dim = y_t.shape.as_list()[-1]
y_p_last_dim = y_p.shape.as_list()[-1]
is_binary = y_p_last_dim == 1
is_sparse_categorical = y_t_rank < y_p_rank or (y_t_last_dim == 1 and y_p_last_dim > 1)
if str(metric).lower() in ['accuracy', 'acc']:
if is_binary:
metric_obj = metrics_mod.binary_accuracy
elif is_sparse_categorical:
metric_obj = metrics_mod.sparse_categorical_accuracy
else:
metric_obj = metrics_mod.categorical_accuracy
elif is_binary:
metric_obj = metrics_mod.binary_crossentropy
elif is_sparse_categorical:
metric_obj = metrics_mod.sparse_categorical_crossentropy
else:
metric_obj = metrics_mod.categorical_crossentropy
if isinstance(metric_obj, losses_mod.Loss):
metric_obj._allow_sum_over_batch_size = True
if not isinstance(metric_obj, metrics_mod.Metric):
if isinstance(metric, str):
metric_name = metric
else:
metric_name = get_custom_object_name(metric)
if metric_name is None:
raise ValueError('Metric should be a callable, found: {}'.format(metric))
metric_obj = metrics_mod.MeanMetricWrapper(metric_obj, name=metric_name)
return metric_obj | Converts user-supplied metric to a `Metric` object.
Args:
metric: A string, function, or `Metric` object.
y_t: Sample of label.
y_p: Sample of output.
Returns:
A `Metric` object. | github-repos |
def _hash_outputs(self, index, sighash_type):
if sighash_type == shared.SIGHASH_ALL:
outputs = ByteData()
for tx_out in self.tx_outs:
outputs += tx_out.to_bytes()
return utils.hash256(outputs.to_bytes())
elif (sighash_type == shared.SIGHASH_SINGLE
and index < len(self.tx_outs)):
return utils.hash256(self.tx_outs[index].to_bytes())
else:
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.') | BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash | juraj-google-style |
def get_model(self, model, model_id):
return self._store.find_record(self._get_model_class(model), int(model_id)) | Get a single model from the server.
Args:
model (string): The class as a string.
model_id (string): The integer ID as a string.
Returns:
:class:`cinder_data.model.CinderModel`: A instance of the model. | codesearchnet |
def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = TangoAndroidMessageEventData()
event_data.message_identifier = self._GetRowValue(query_hash, row, 'msg_id')
event_data.direction = self._GetRowValue(query_hash, row, 'direction')
timestamp = self._GetRowValue(query_hash, row, 'create_time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'send_time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_SENT)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a message row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query. | codesearchnet |
def validate_callbacks(input_callbacks, optimizer):
if input_callbacks:
for callback in input_callbacks:
if isinstance(callback, (callbacks.LearningRateScheduler, callbacks.ReduceLROnPlateau)):
if not isinstance(optimizer, optimizer_v2.OptimizerV2):
raise ValueError('You must specify a Keras Optimizer V2 when using %s callback with DistributionStrategy.' % callback)
if isinstance(callback, callbacks.TensorBoard):
if getattr(callback, 'write_grads', False):
logging.warning(UserWarning('`write_grads` in the TensorBoard callback is not supported when using DistributionStrategy. Setting `write_grads` to `False`.'))
callback.write_grads = False | Validate whether given callbacks are supported by DistributionStrategy.
Args:
input_callbacks: List of callbacks passed by the user to fit.
optimizer: Optimizer instance used to train the model.
Raises:
ValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of the
callbacks passed.
ValueError: If `write_grads` is one of the parameters passed as part of the
TensorBoard callback. | github-repos |
def retrieve_api_token(self):
payload = self.oauth2_manager.get_access_token_params(refresh_token=self.refresh_token)
response = requests.post(self.oauth2_manager.access_token_url, json=payload)
response.raise_for_status()
response_json = json.loads(response.text)
return response_json['access_token'] | Retrieve the access token from AVS.
This function is memoized, so the
value returned by the function will be remembered and returned by
subsequent calls until the memo expires. This is because the access
token lasts for one hour, then a new token needs to be requested.
Decorators:
helpers.expiring_memo
Returns:
str -- The access token for communicating with AVS | codesearchnet |
def sleep(self, seconds):
until = time.time() + seconds
try:
while True:
self._service_futures([], until)
except TimeoutError:
return | Services all futures while waiting
Args:
seconds (float): Time to wait | juraj-google-style |
def get_filename(self, **kwargs):
if self.filename_parser is None:
raise RuntimeError("No filename pattern or specific filename provided")
output_filename = self.filename_parser.compose(kwargs)
dirname = os.path.dirname(output_filename)
if dirname and not os.path.isdir(dirname):
LOG.info("Creating output directory: {}".format(dirname))
os.makedirs(dirname)
return output_filename | Create a filename where output data will be saved.
Args:
kwargs (dict): Attributes and other metadata to use for formatting
the previously provided `filename`. | juraj-google-style |
def rpc(self, address, rpc_id):
if ((address in self.mock_rpcs) and (rpc_id in self.mock_rpcs[address])):
value = self.mock_rpcs[address][rpc_id]
return value
result = self._call_rpc(address, rpc_id, bytes())
if (len(result) != 4):
self.warn((u'RPC 0x%X on address %d: response had invalid length %d not equal to 4' % (rpc_id, address, len(result))))
if (len(result) < 4):
raise HardwareError('Response from RPC was not long enough to parse as an integer', rpc_id=rpc_id, address=address, response_length=len(result))
if (len(result) > 4):
result = result[:4]
(res,) = struct.unpack('<L', result)
return res | Call an RPC and receive the result as an integer.
If the RPC does not properly return a 32 bit integer, raise a warning
unless it cannot be converted into an integer at all, in which case
a HardwareError is thrown.
Args:
address (int): The address of the tile we want to call the RPC
on
rpc_id (int): The id of the RPC that we want to call
Returns:
int: The result of the RPC call. If the rpc did not succeed
an error is thrown instead. | codesearchnet |
def width(self):
return sum((reg.size for reg in (self.qregs + self.cregs))) | Return number of qubits plus clbits in circuit.
Returns:
int: Width of circuit. | codesearchnet |
def __init__(self, pubsub_source_descriptors: List[PubSubSourceDescriptor], with_attributes: bool=False):
self.pubsub_source_descriptors = pubsub_source_descriptors
self.with_attributes = with_attributes
for descriptor in self.pubsub_source_descriptors:
match_descriptor = re.match(PUBSUB_DESCRIPTOR_REGEXP, descriptor.source)
if not match_descriptor:
raise ValueError('PubSub source descriptor must be in the form "projects/<project>/topics/<topic>" or "projects/<project>/subscription/<subscription>" (got %r).' % descriptor.source) | Initializes ``PubSubMultipleReader``.
Args:
pubsub_source_descriptors: List of Cloud Pub/Sub topics or subscriptions
of type `~PubSubSourceDescriptor`.
with_attributes:
True - input elements will be :class:`~PubsubMessage` objects.
False - input elements will be of type ``bytes`` (message data only). | github-repos |
async def get_records_for_zone(self, dns_zone, params=None):
managed_zone = self.get_managed_zone(dns_zone)
url = f'{self._base_url}/managedZones/{managed_zone}/rrsets'
if (not params):
params = {}
if ('fields' not in params):
params['fields'] = 'rrsets/name,rrsets/kind,rrsets/rrdatas,rrsets/type,rrsets/ttl,nextPageToken'
next_page_token = None
records = []
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = (await self.get_json(url, params=params))
records.extend(response['rrsets'])
next_page_token = response.get('nextPageToken')
if (not next_page_token):
break
logging.info(f'Found {len(records)} rrsets for zone "{dns_zone}".')
return records | Get all resource record sets for a managed zone, using the DNS zone.
Args:
dns_zone (str): Desired DNS zone to query.
params (dict): (optional) Additional query parameters for HTTP
requests to the GDNS API.
Returns:
list of dicts representing rrsets. | codesearchnet |
def __init__(self, column_names=None, column_sizes=None):
super(CLITabularTableView, self).__init__()
self._columns = column_names or []
self._column_sizes = column_sizes or []
self._number_of_columns = len(self._columns)
self._rows = [] | Initializes a command line interface tabular table view.
Args:
column_names (Optional[list[str]]): column names.
column_sizes (Optional[list[int]]): minimum column sizes, in number of
characters. If a column name or row value is larger than the
minimum column size the column will be enlarged. Note that the
minimum columns size will be rounded up to the number of spaces
of the next tab. | juraj-google-style |
def _create_request(self, verb, url, query_params=None, data=None, send_as_file=False):
kwargs = {
'headers': self._default_headers,
'params': query_params,
'timeout': self._req_timeout,
}
if MultiRequest._VERB_POST == verb:
if send_as_file:
kwargs['files'] = {'file': data}
else:
kwargs['data'] = data
return PreparedRequest(partial(self._session.post, url, **kwargs), url)
elif MultiRequest._VERB_GET == verb:
return PreparedRequest(partial(self._session.get, url, **kwargs), url)
else:
raise InvalidRequestError('Invalid verb {0}'.format(verb)) | Helper method to create a single post/get requests.
Args:
verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET
url - A string URL
query_params - None or a dict
data - None or a string or a dict
send_as_file - A boolean, should the data be sent as a file.
Returns:
requests.PreparedRequest
Raises:
InvalidRequestError - if an invalid verb is passed in. | juraj-google-style |
def set_state_tree(self, state_tree):
for k, v in state_tree.items():
path_value_dict = self._flatten_nested_dict(v)
if k == 'trainable_variables':
self._assign_variable_values(self.trainable_variables, path_value_dict)
elif k == 'non_trainable_variables':
self._assign_variable_values(self.non_trainable_variables, path_value_dict)
elif k == 'optimizer_variables':
self._assign_variable_values(self.optimizer.variables, path_value_dict)
elif k == 'metrics_variables':
self._assign_variable_values(self.metrics_variables, path_value_dict)
else:
raise ValueError(f'Unknown variable name: {k}') | Assigns values to variables of the model.
This method takes a dictionary of nested variable values, which
represents the state tree of the model, and assigns them to the
corresponding variables of the model. The dictionary keys represent the
variable names (e.g., `'trainable_variables'`, `'optimizer_variables'`),
and the values are nested dictionaries containing the variable
paths and their corresponding values.
Args:
state_tree: A dictionary representing the state tree of the model.
The keys are the variable names, and the values are nested
dictionaries representing the variable paths and their values. | github-repos |
def download_file(self, path, target_path):
self.__validate_storage_path(path)
entity = self.api_client.get_entity_by_query(path=path)
if (entity['entity_type'] != 'file'):
raise StorageArgumentException('Only file entities can be downloaded')
signed_url = self.api_client.get_signed_url(entity['uuid'])
response = self.api_client.download_signed_url(signed_url)
with open(target_path, 'wb') as output:
for chunk in response.iter_content(chunk_size=1024):
output.write(chunk) | Download a file from storage service to local disk.
Existing files on the target path will be overwritten.
The download is not recursive, as it only works on files.
Args:
path (str): The path of the entity to be downloaded. Must start with a '/'.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes | codesearchnet |
def diff_main(self, text1, text2, checklines=True, deadline=None):
if (deadline == None):
if (self.Diff_Timeout <= 0):
deadline = sys.maxsize
else:
deadline = (time.time() + self.Diff_Timeout)
if ((text1 == None) or (text2 == None)):
raise ValueError('Null inputs. (diff_main)')
if (text1 == text2):
if text1:
return [(self.DIFF_EQUAL, text1)]
return []
commonlength = self.diff_commonPrefix(text1, text2)
commonprefix = text1[:commonlength]
text1 = text1[commonlength:]
text2 = text2[commonlength:]
commonlength = self.diff_commonSuffix(text1, text2)
if (commonlength == 0):
commonsuffix = ''
else:
commonsuffix = text1[(- commonlength):]
text1 = text1[:(- commonlength)]
text2 = text2[:(- commonlength)]
diffs = self.diff_compute(text1, text2, checklines, deadline)
if commonprefix:
diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]
if commonsuffix:
diffs.append((self.DIFF_EQUAL, commonsuffix))
self.diff_cleanupMerge(diffs)
return diffs | Find the differences between two texts. Simplifies the problem by
stripping any common prefix or suffix off the texts before diffing.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Optional speedup flag. If present and false, then don't run
a line-level diff first to identify the changed areas.
Defaults to true, which does a faster, slightly less optimal diff.
deadline: Optional time when the diff should be complete by. Used
internally for recursive calls. Users should set DiffTimeout instead.
Returns:
Array of changes. | codesearchnet |
def GetMessages(self, files):
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for desc in file_desc.message_types_by_name.values():
result[desc.full_name] = self.GetPrototype(desc)
for extension in file_desc.extensions_by_name.values():
if (extension.containing_type.full_name not in self._classes):
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type.full_name]
extended_class.RegisterExtension(extension)
return result | Gets all the messages from a specified file.
This will find and resolve dependencies, failing if the descriptor
pool cannot satisfy them.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message. | codesearchnet |
def hottestmonth(self, value=None):
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int '
'for field `hottestmonth`'.format(value))
if value < 1:
raise ValueError('value need to be greater or equal 1 '
'for field `hottestmonth`')
if value > 12:
raise ValueError('value need to be smaller 12 '
'for field `hottestmonth`')
self._hottestmonth = value | Corresponds to IDD Field `hottestmonth`
Args:
value (int): value for IDD Field `hottestmonth`
value >= 1
value <= 12
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | juraj-google-style |
def get_event_position(voevent, index=0):
od = voevent.WhereWhen.ObsDataLocation[index]
ac = od.ObservationLocation.AstroCoords
ac_sys = voevent.WhereWhen.ObsDataLocation.ObservationLocation.AstroCoordSystem
sys = ac_sys.attrib['id']
if hasattr(ac.Position2D, 'Name1'):
assert ((ac.Position2D.Name1 == 'RA') and (ac.Position2D.Name2 == 'Dec'))
posn = Position2D(ra=float(ac.Position2D.Value2.C1), dec=float(ac.Position2D.Value2.C2), err=float(ac.Position2D.Error2Radius), units=ac.Position2D.attrib['unit'], system=sys)
return posn | Extracts the `AstroCoords` from a given `WhereWhen.ObsDataLocation`.
Note that a packet may include multiple 'ObsDataLocation' entries
under the 'WhereWhen' section, for example giving locations of an object
moving over time. Most packets will have only one, however, so the
default is to just return co-ords extracted from the first.
Args:
voevent (:class:`voeventparse.voevent.Voevent`): Root node of the
VOEvent etree.
index (int): Index of the ObsDataLocation to extract AstroCoords from.
Returns:
Position (:py:class:`.Position2D`): The sky position defined in the
ObsDataLocation. | codesearchnet |
def load_pdb(self, pdb_id, mapped_chains=None, pdb_file=None, file_type=None, is_experimental=True, set_as_representative=False, representative_chain=None, force_rerun=False):
if self.structures.has_id(pdb_id):
if force_rerun:
existing = self.structures.get_by_id(pdb_id)
self.structures.remove(existing)
else:
log.debug('{}: PDB ID already present in list of structures'.format(pdb_id))
pdb = self.structures.get_by_id(pdb_id)
if pdb_file:
pdb.load_structure_path(pdb_file, file_type)
if mapped_chains:
pdb.add_mapped_chain_ids(mapped_chains)
if (not self.structures.has_id(pdb_id)):
if is_experimental:
pdb = PDBProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
else:
pdb = StructProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
self.structures.append(pdb)
if set_as_representative:
pdb.parse_structure()
self._representative_structure_setter(structprop=pdb, keep_chain=representative_chain, force_rerun=force_rerun)
return self.structures.get_by_id(pdb_id) | Load a structure ID and optional structure file into the structures attribute.
Args:
pdb_id (str): PDB ID
mapped_chains (str, list): Chain ID or list of IDs which you are interested in
pdb_file (str): Path to PDB file
file_type (str): Type of PDB file
is_experimental (bool): If this structure file is experimental
set_as_representative (bool): If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
PDBProp: The object that is now contained in the structures attribute | codesearchnet |
def flatten_with_tuple_paths(structure, expand_composites=False):
return list(zip(yield_flat_paths(structure, expand_composites=expand_composites), flatten(structure, expand_composites=expand_composites))) | Returns a list of `(tuple_path, atom)` tuples.
The order of pairs produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each atom was located. See `nest.yield_flat_paths`
for more information about tuple paths.
Args:
structure: the nested structure to flatten.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A list of `(tuple_path, atom)` tuples. Each `tuple_path` is a tuple
of indices and/or dictionary keys that uniquely specify the path to
`atom` within `structure`. | github-repos |
def find_causal_link(self, direction, mechanism, purviews=False, allow_neg=False):
purviews = self.potential_purviews(direction, mechanism, purviews)
if (not purviews):
max_ria = _null_ac_ria(self.mechanism_state(direction), direction, mechanism, None)
else:
max_ria = max((self.find_mip(direction, mechanism, purview, allow_neg) for purview in purviews))
return CausalLink(max_ria) | Return the maximally irreducible cause or effect ratio for a
mechanism.
Args:
direction (str): The temporal direction, specifying cause or
effect.
mechanism (tuple[int]): The mechanism to be tested for
irreducibility.
Keyword Args:
purviews (tuple[int]): Optionally restrict the possible purviews
to a subset of the subsystem. This may be useful for _e.g._
finding only concepts that are "about" a certain subset of
nodes.
Returns:
CausalLink: The maximally-irreducible actual cause or effect. | codesearchnet |
def capitalcase(string):
string = str(string)
if not string:
return string
return uppercase(string[0]) + string[1:] | Convert string into capital case.
First letters will be uppercase.
Args:
string: String to convert.
Returns:
string: Capital case string. | juraj-google-style |
def _save_and_write_assets(self, assets_collection_to_add=None):
asset_filename_map = _maybe_save_assets(_add_asset_to_collection, assets_collection_to_add)
if not asset_filename_map:
tf_logging.info('No assets to write.')
return
copy_assets_to_destination_dir(asset_filename_map, self._export_dir, self._saved_asset_files) | Saves asset to the meta graph and writes asset files to disk.
Args:
assets_collection_to_add: The collection where the asset paths are setup. | github-repos |
def SetUsername(self, username):
self._username = username
logger.debug('Elasticsearch username: {0!s}'.format(username)) | Sets the username.
Args:
username (str): username to authenticate with. | juraj-google-style |
def _extract_mnist_labels(filename, num_labels):
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels | Extract labels from an MNIST file into integers.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A int64 numpy array of shape [num_labels] | codesearchnet |
def gzip_dir(path, compresslevel=6):
for f in os.listdir(path):
full_f = os.path.join(path, f)
if (not f.lower().endswith('gz')):
with open(full_f, 'rb') as f_in, GzipFile('{}.gz'.format(full_f), 'wb', compresslevel=compresslevel) as f_out:
shutil.copyfileobj(f_in, f_out)
shutil.copystat(full_f, '{}.gz'.format(full_f))
os.remove(full_f) | Gzips all files in a directory. Note that this is different from
shutil.make_archive, which creates a tar archive. The aim of this method
is to create gzipped files that can still be read using common Unix-style
commands like zless or zcat.
Args:
path (str): Path to directory.
compresslevel (int): Level of compression, 1-9. 9 is default for
GzipFile, 6 is default for gzip. | codesearchnet |
def FromString(cls, desc):
parse_exp = (Literal(u'run_time').suppress() + time_interval(u'interval'))
try:
data = parse_exp.parseString(desc)
return TimeBasedStopCondition(data[u'interval'][0])
except ParseException:
raise ArgumentError(u'Could not parse time based stop condition') | Parse this stop condition from a string representation.
The string needs to match:
run_time number [seconds|minutes|hours|days|months|years]
Args:
desc (str): The description
Returns:
TimeBasedStopCondition | codesearchnet |
def get_package(name, version, paths=None):
if isinstance(version, basestring):
range_ = VersionRange(('==%s' % version))
else:
range_ = VersionRange.from_version(version, '==')
it = iter_packages(name, range_, paths)
try:
return it.next()
except StopIteration:
return None | Get an exact version of a package.
Args:
name (str): Name of the package, eg 'maya'.
version (Version or str): Version of the package, eg '1.0.0'
paths (list of str, optional): paths to search for package, defaults
to `config.packages_path`.
Returns:
`Package` object, or None if the package was not found. | codesearchnet |
def search_features(self, search):
if isinstance(search, string_types):
search = [search]
search = [s.replace('*', '.*') for s in search]
cols = list(self.data.columns)
results = []
for s in search:
results.extend([f for f in cols if re.match((s + '$'), f)])
return list(set(results)) | Returns all features that match any of the elements in the input
list.
Args:
search (str, list): A string or list of strings defining the query.
Returns:
A list of matching feature names. | codesearchnet |
def remove(self, word):
self._dictionary.pop(word.lower())
self._update_dictionary() | Remove a word from the word frequency list
Args:
word (str): The word to remove | juraj-google-style |
def add_it(workbench, file_list, labels):
md5s = []
for filename in file_list:
if filename != '.DS_Store':
with open(filename, 'rb') as pe_file:
base_name = os.path.basename(filename)
md5 = workbench.store_sample(pe_file.read(), base_name, 'exe')
workbench.add_node(md5, md5[:6], labels)
md5s.append(md5)
return md5s | Add the given file_list to workbench as samples, also add them as nodes.
Args:
workbench: Instance of Workbench Client.
file_list: list of files.
labels: labels for the nodes.
Returns:
A list of md5s. | juraj-google-style |
def generate_contour_data(pid):
if isinstance(pid, GenInput):
pid = pid.return_dict()
begin_time = time.time()
WORKING_DIRECTORY = '.'
if ('WORKING_DIRECTORY' not in pid['general'].keys()):
pid['general']['WORKING_DIRECTORY'] = WORKING_DIRECTORY
running_process = GenProcess(**{**pid, **pid['generate_info']})
running_process.set_parameters()
running_process.run_snr()
file_out = FileReadOut(running_process.xvals, running_process.yvals, running_process.final_dict, **{**pid['general'], **pid['generate_info'], **pid['output_info']})
print('outputing file:', ((pid['general']['WORKING_DIRECTORY'] + '/') + pid['output_info']['output_file_name']))
getattr(file_out, (file_out.output_file_type + '_read_out'))()
print((time.time() - begin_time))
return | Main function for this program.
This will read in sensitivity_curves and binary parameters; calculate snrs
with a matched filtering approach; and then read the contour data out to a file.
Args:
pid (obj or dict): GenInput class or dictionary containing all of the input information for
the generation. See BOWIE documentation and example notebooks for usage of
this class. | codesearchnet |
def get_pipeline(self, name):
check.str_param(name, 'name')
if name in self._pipeline_cache:
return self._pipeline_cache[name]
try:
pipeline = self.pipeline_dict[name]()
except KeyError:
raise DagsterInvariantViolationError(
'Could not find pipeline "{name}". Found: {pipeline_names}.'.format(
name=name,
pipeline_names=', '.join(
[
'"{pipeline_name}"'.format(pipeline_name=pipeline_name)
for pipeline_name in self.pipeline_dict.keys()
]
),
)
)
check.invariant(
pipeline.name == name,
'Name does not match. Name in dict {name}. Name in pipeline {pipeline.name}'.format(
name=name, pipeline=pipeline
),
)
self._pipeline_cache[name] = check.inst(
pipeline,
PipelineDefinition,
(
'Function passed into pipeline_dict with key {key} must return a '
'PipelineDefinition'
).format(key=name),
)
return pipeline | Get a pipeline by name. Only constructs that pipeline and caches it.
Args:
name (str): Name of the pipeline to retriever
Returns:
PipelineDefinition: Instance of PipelineDefinition with that name. | juraj-google-style |
def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, past_key_values_length=0, training: bool=False) -> tf.Tensor:
if input_ids is None and inputs_embeds is None:
raise ValueError('Need to provide either `input_ids` or `input_embeds`.')
if input_ids is not None:
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings | Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor. | github-repos |
def invoke_one(self, line):
funname = line.pop(0)
context = self.contexts[(- 1)]
func = self.find_function(context, funname)
if isinstance(func, dict):
self.contexts.append(func)
self._check_initialize_context()
return (None, line, False)
if (func.takes_cmdline is True):
val = func(line)
line = []
else:
(posargs, kwargs, line) = self.process_arguments(func, line)
if (inspect.isclass(func) and (not func.metadata.spec_filled(posargs, kwargs))):
raise ValidationError('Not enough parameters specified to call function', function=func.metadata.name, signature=func.metadata.signature())
val = func(*posargs, **kwargs)
finished = True
if (func.finalizer is True):
self.contexts.pop()
elif (val is not None):
if func.metadata.returns_data():
val = func.metadata.format_returnvalue(val)
else:
self.contexts.append(val)
self._check_initialize_context()
finished = False
val = None
return (val, line, finished) | Invoke a function given a list of arguments with the function listed first.
The function is searched for using the current context on the context stack
and its annotated type information is used to convert all of the string parameters
passed in line to appropriate python types.
Args:
line (list): The list of command line arguments.
Returns:
(object, list, bool): A tuple containing the return value of the function, if any,
a boolean specifying if the function created a new context (False if a new context
was created) and a list with the remainder of the command line if this function
did not consume all arguments. | codesearchnet |
def has_all_nonzero_segment_lengths(neuron, threshold=0.0):
bad_ids = []
for sec in _nf.iter_sections(neuron):
p = sec.points
for (i, s) in enumerate(zip(p[:(- 1)], p[1:])):
if (segment_length(s) <= threshold):
bad_ids.append((sec.id, i))
return CheckResult((len(bad_ids) == 0), bad_ids) | Check presence of neuron segments with length not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a segment length is considered to
be non-zero
Returns:
CheckResult with result including list of (section_id, segment_id)
of zero length segments | codesearchnet |
def file(self, md5=None, sha1=None, sha256=None, **kwargs):
indicator_obj = File(md5, sha1, sha256, **kwargs)
return self._indicator(indicator_obj) | Add File data to Batch object.
.. note:: A least one file hash value must be specified.
Args:
md5 (str, optional): The md5 value for this Indicator.
sha1 (str, optional): The sha1 value for this Indicator.
sha256 (str, optional): The sha256 value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
size (str, kwargs): The file size for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of File. | codesearchnet |
def device(name):
ensure_initialized()
return context().device(name) | Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tf.device('gpu:0'):
with tf.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.random.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to perform
automatic placement.
Returns:
Context manager for setting the device. | github-repos |
def namespace_for_prefix(self, prefix):
try:
ni = self.__lookup_prefix(prefix)
except PrefixNotFoundError:
return None
else:
return ni.uri | Get the namespace the given prefix maps to.
Args:
prefix (str): The prefix
Returns:
str: The namespace, or None if the prefix isn't mapped to
anything in this set. | codesearchnet |
def parse_query(query_str):
def _generate_match_all_fields_query():
stripped_query_str = ' '.join(query_str.replace(':', ' ').split())
return {'multi_match': {'query': stripped_query_str, 'fields': ['_all'], 'zero_terms_query': 'all'}}
if (not isinstance(query_str, six.text_type)):
query_str = six.text_type(query_str.decode('utf-8'))
logger.info((('Parsing: "' + query_str) + '".'))
parser = StatefulParser()
rst_visitor = RestructuringVisitor()
es_visitor = ElasticSearchVisitor()
try:
(unrecognized_text, parse_tree) = parser.parse(query_str, Query)
if unrecognized_text:
msg = (((('Parser returned unrecognized text: "' + unrecognized_text) + '" for query: "') + query_str) + '".')
if ((query_str == unrecognized_text) and (parse_tree is None)):
logger.warn(msg)
return _generate_match_all_fields_query()
else:
msg += 'Continuing with recognized parse tree.'
logger.warn(msg)
except SyntaxError as e:
logger.warn((((('Parser syntax error (' + six.text_type(e)) + ') with query: "') + query_str) + '". Continuing with a match_all with the given query.'))
return _generate_match_all_fields_query()
try:
restructured_parse_tree = parse_tree.accept(rst_visitor)
logger.debug(('Parse tree: \n' + emit_tree_format(restructured_parse_tree)))
except Exception as e:
logger.exception((((RestructuringVisitor.__name__ + ' crashed') + ((': ' + six.text_type(e)) + '.')) if six.text_type(e) else '.'))
return _generate_match_all_fields_query()
try:
es_query = restructured_parse_tree.accept(es_visitor)
except Exception as e:
logger.exception((((ElasticSearchVisitor.__name__ + ' crashed') + ((': ' + six.text_type(e)) + '.')) if six.text_type(e) else '.'))
return _generate_match_all_fields_query()
if (not es_query):
return _generate_match_all_fields_query()
return es_query | Drives the whole logic, by parsing, restructuring and finally, generating an ElasticSearch query.
Args:
query_str (six.text_types): the given query to be translated to an ElasticSearch query
Returns:
six.text_types: Return an ElasticSearch query.
Notes:
In case there's an error, an ElasticSearch `multi_match` query is generated with its `query` value, being the
query_str argument. | codesearchnet |
def _model_to_dict(model, ignore):
return {attr: value for attr, value in model.__dict__.items()
if not attr.startswith('_') and attr not in ignore} | Convert OSS model to dict.
Args:
model (oss2.models.RequestResult): Model.
ignore (tuple of str): Keys to not insert to dict.
Returns:
dict: Model dict version. | juraj-google-style |
def __init__(self, addr, raw_addr, name=None, rssi=0):
self.addr = addr
self.raw_addr = raw_addr
self.name = name
self.rssi = rssi
self._age = time.time() | Initialise a new ScanResult.
Args:
addr (str): Device hardware address in xx:xx:xx:xx:xx:xx format.
raw_addr (bytearray): Device hardware address as raw bytes.
name (str): Device name (if available) as ASCII text.
rssi (float): Latest RSSI from the scan result for the device, if any. | juraj-google-style |
def _parse(json_str: str, primitive_cls: Type[Decimal]) -> Decimal:
decimal_value = json.loads(json_str, parse_float=decimal.Decimal, parse_int=decimal.Decimal)
if not isinstance(decimal_value, decimal.Decimal):
raise ValueError('Invalid Decimal format')
if not decimal_value.is_finite():
raise ValueError('Decimal out of range.')
return cast(Any, primitive_cls)(value=json_str) | Parses the json_str into a Decimal FHIR primitive protobuf message.
Args:
json_str: The raw JSON string to parse.
primitive_cls: The type of FHIR primitive to parse into.
Returns:
A FHIR primitive Decimal protobuf message. | github-repos |
def run_instrumentation_test(self, device, package, options=None, prefix=None, runner=None):
instrumentation_block = [_InstrumentationBlock(prefix=prefix)]
def parse_instrumentation(raw_line):
line = raw_line.rstrip().decode('utf-8')
logging.info(line)
instrumentation_block[0] = self._parse_line(instrumentation_block[0], line)
device.adb.instrument(package=package, options=options, runner=runner, handler=parse_instrumentation)
return self._finish_parsing(instrumentation_block[0]) | Runs instrumentation tests on a device and creates test records.
Args:
device: AndroidDevice, the device to run instrumentation tests on.
package: string, the package name of the instrumentation tests.
options: dict, Instrumentation options for the instrumentation
tests.
prefix: string, an optional prefix for parser output for
distinguishing between instrumentation test runs.
runner: string, the runner to use for the instrumentation package,
default to DEFAULT_INSTRUMENTATION_RUNNER.
Returns:
A boolean indicating whether or not all the instrumentation test
methods passed.
Raises:
TestError if the instrumentation run crashed or if parsing the
output failed. | github-repos |
def handle(self, message):
logger.debug(message)
if Utilities.isNotEmpty(message['metadata']['opts']):
target = message['metadata']['opts']['target']
thread = message['metadata']['opts'].get('thread')
pattern = re.compile('^@([a-zA-Z0-9._-]+)|\s@([a-zA-Z0-9._-]+)')
matches = re.findall(pattern, message['text'])
matches = set(matches)
logger.debug('MATCHES!!!! {}'.format(matches))
for match in matches:
if isinstance(match, tuple):
if match[0] != '':
match = match[0]
else:
match = match[1]
if not match.startswith('@'):
match = '@' + match
message['text'] = message['text'].replace(
match,
'<{}>'.format(match)
)
pattern = re.compile('
matches = re.findall(pattern, message['text'])
matches = set(matches)
for match in matches:
channel_id = self.botThread.get_channel_id_by_name(match)
if channel_id:
message['text'] = message['text'].replace(
'
'<
channel_id,
match
)
)
if (message['text'].find('<<@') != -1
or message['text'].find('<<
message['text'] = message['text'].replace('<<', '<')
message['text'] = message['text'].replace('>>', '>')
if target.startswith('U'):
target = self.botThread.get_dm_channel(target)
attachment = message['metadata']['opts'].get('attachment')
if attachment:
text = message['metadata']['opts'].get('fallback')
attachment = self.build_attachment(
text, target, attachment, thread)
self.botThread.post_attachment(attachment)
else:
self.botThread.slack_client.rtm_send_message(
target, message['text'], thread=thread) | Attempts to send a message to the specified destination in Slack.
Extends Legobot.Lego.handle()
Args:
message (Legobot.Message): message w/ metadata to send. | juraj-google-style |
def size(self, st_size):
self._check_positive_int(st_size)
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
if self._byte_contents:
if st_size < current_size:
self._byte_contents = self._byte_contents[:st_size]
else:
if IS_PY2:
self._byte_contents = '%s%s' % (
self._byte_contents, '\0' * (st_size - current_size))
else:
self._byte_contents += b'\0' * (st_size - current_size)
self.st_size = st_size
self.epoch += 1 | Resizes file content, padding with nulls if new size exceeds the
old size.
Args:
st_size: The desired size for the file.
Raises:
IOError: if the st_size arg is not a non-negative integer
or if st_size exceeds the available file system space | juraj-google-style |
def __init__(self, channel):
self.BatchAnnotateImages = channel.unary_unary(
"/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages",
request_serializer=google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateImagesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_image__annotator__pb2.BatchAnnotateImagesResponse.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
try:
executor = None
parser = build_args()
args = parser.parse_args(args=argv)
model = DeviceModel()
parser = SensorGraphFileParser()
parser.parse_file(args.sensor_graph)
parser.compile(model)
if not args.disable_optimizer:
opt = SensorGraphOptimizer()
opt.optimize(parser.sensor_graph, model=model)
graph = parser.sensor_graph
sim = SensorGraphSimulator(graph)
for stop in args.stop:
sim.stop_condition(stop)
for watch in args.watch:
watch_sel = DataStreamSelector.FromString(watch)
graph.sensor_log.watch(watch_sel, watch_printer)
if args.semihost_device is not None:
executor = SemihostedRPCExecutor(args.port, args.semihost_device)
sim.rpc_executor = executor
for mock in args.mock_rpc:
slot, rpc_id, value = process_mock_rpc(mock)
sim.rpc_executor.mock(slot, rpc_id, value)
for stim in args.stimulus:
sim.stimulus(stim)
graph.load_constants()
if args.trace is not None:
sim.record_trace()
try:
if args.connected:
sim.step(user_connected, 8)
sim.run(accelerated=not args.realtime)
except KeyboardInterrupt:
pass
if args.trace is not None:
sim.trace.save(args.trace)
finally:
if executor is not None:
executor.hw.close()
return 0 | Main entry point for iotile sensorgraph simulator.
This is the iotile-sgrun command line program. It takes
an optional set of command line parameters to allow for
testing.
Args:
argv (list of str): An optional set of command line
parameters. If not passed, these are taken from
sys.argv. | juraj-google-style |
def files_comments_add(self, *, comment: str, file: str, **kwargs) -> SlackResponse:
kwargs.update({'comment': comment, 'file': file})
return self.api_call('files.comments.add', json=kwargs) | Add a comment to an existing file.
Args:
comment (str): The body of the comment.
e.g. 'Everyone should take a moment to read this file.'
file (str): The file id. e.g. 'F1234467890' | codesearchnet |
def fail_all_requests(self, error):
for state in self.scheduler.active_requests.values():
self._handle_request_error(error, state)
self.scheduler.finish_request(state.request_id)
for req_id in list(self.scheduler.waiting_requests.keys()):
state = self.scheduler.waiting_requests.pop(req_id)
self._handle_request_error(error, state)
self.scheduler.waiting_requests_order.clear() | Fail all active requests with the given error.
Args:
error: The error to report in the failure message | github-repos |
def post_cutout(self, token, channel,
x_start,
y_start,
z_start,
data,
resolution=0):
datatype = self.get_proj_info(token)['channels'][channel]['datatype']
if data.dtype.name != datatype:
data = data.astype(datatype)
data = numpy.rollaxis(data, 1)
data = numpy.rollaxis(data, 2)
if six.PY3 or data.nbytes > 1.5e9:
ul_func = self._post_cutout_no_chunking_npz
else:
ul_func = self._post_cutout_no_chunking_blosc
if data.size < self._chunk_threshold:
return ul_func(token, channel, x_start,
y_start, z_start, data,
resolution)
return self._post_cutout_with_chunking(token, channel,
x_start, y_start, z_start, data,
resolution, ul_func) | Post a cutout to the server.
Arguments:
token (str)
channel (str)
x_start (int)
y_start (int)
z_start (int)
data (numpy.ndarray): A numpy array of data. Pass in (x, y, z)
resolution (int : 0): Resolution at which to insert the data
Returns:
bool: True on success
Raises:
RemoteDataUploadError: if there's an issue during upload. | juraj-google-style |
def validate_yaml(self, properties):
validator = OurValidator(schema)
if not validator.validate(properties):
for key, value in validator.errors.items():
if any(['unallowed value' in v for v in value]):
print(('{key} has an illegal value. Allowed values are {values} and are case '
'sensitive.').format(key=key, values=schema[key]['allowed']))
raise ValueError(validator.errors) | Validate the parsed YAML file for adherance to the ChemKED format.
Arguments:
properties (`dict`): Dictionary created from the parsed YAML file
Raises:
`ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose
string contains the errors that are present. | juraj-google-style |
def GetSOAPHeaders(self, create_method):
header = create_method(self._SOAP_HEADER_CLASS % self._version)
header.clientCustomerId = self._adwords_client.client_customer_id
header.developerToken = self._adwords_client.developer_token
header.userAgent = ''.join([
self._adwords_client.user_agent,
googleads.common.GenerateLibSig(self._PRODUCT_SIG)])
header.validateOnly = self._adwords_client.validate_only
header.partialFailure = self._adwords_client.partial_failure
return header | Returns the SOAP headers required for request authorization.
Args:
create_method: The SOAP library specific method used to instantiate SOAP
objects.
Returns:
A SOAP object containing the headers. | juraj-google-style |
def __init__(self,
datastore_client,
work_type_entity_id):
self._datastore_client = datastore_client
self._work_type_entity_id = work_type_entity_id
self._work = {} | Initializes WorkPiecesBase class.
Args:
datastore_client: instance of CompetitionDatastoreClient.
work_type_entity_id: ID of the WorkType parent entity | juraj-google-style |
def isregex(value):
if not value:
return False
return any((isregex_expr(value), isinstance(value, retype))) | Returns ``True`` if the input argument object is a native
regular expression object, otherwise ``False``.
Arguments:
value (mixed): input value to test.
Returns:
bool | juraj-google-style |
def _do_parse(inp, fmt, encoding, force_types):
res = {}
_check_lib_installed(fmt, 'parse')
if (fmt == 'ini'):
cfg = configobj.ConfigObj(inp, encoding=encoding)
res = cfg.dict()
elif (fmt == 'json'):
if six.PY3:
inp = io.TextIOWrapper(inp, encoding=encoding)
res = json.load(inp, encoding=encoding)
elif (fmt == 'json5'):
if six.PY3:
inp = io.TextIOWrapper(inp, encoding=encoding)
res = json5.load(inp, encoding=encoding)
elif (fmt == 'toml'):
if (not _is_utf8(encoding)):
raise AnyMarkupError('toml is always utf-8 encoded according to specification')
if six.PY3:
inp = io.TextIOWrapper(inp, encoding=encoding)
res = toml.load(inp)
elif (fmt == 'xml'):
res = xmltodict.parse(inp, encoding=encoding)
elif (fmt == 'yaml'):
res = yaml.safe_load(inp)
else:
raise
return _ensure_proper_types(res, encoding, force_types) | Actually parse input.
Args:
inp: bytes yielding file-like object
fmt: format to use for parsing
encoding: encoding of `inp`
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed `inp` (dict or list) containing unicode values
Raises:
various sorts of errors raised by used libraries while parsing | codesearchnet |
def batch(self, spec, batch_size):
raise NotImplementedError(f'{type(self).__name__}.batch') | Returns the TypeSpec representing a batch of values described by `spec`.
Args:
spec: The `TypeSpec` for an individual value.
batch_size: An `int` indicating the number of values that are batched
together, or `None` if the batch size is not known.
Returns:
A `TypeSpec` for a batch of values. | github-repos |
def get_params_from_sqlalchemy_url(db_url):
result = urlsplit(db_url)
return {'database': result.path[1:], 'host': result.hostname, 'port': result.port,
'username': result.username, 'password': result.password, 'driver': result.scheme} | Gets PostgreSQL database connection parameters from SQLAlchemy url
Args:
db_url (str): SQLAlchemy url
Returns:
Dict[str,Any]: Dictionary of database connection parameters | juraj-google-style |
def easeOutBounce(n):
_checkRange(n)
if n < (1/2.75):
return 7.5625 * n * n
elif n < (2/2.75):
n -= (1.5/2.75)
return 7.5625 * n * n + 0.75
elif n < (2.5/2.75):
n -= (2.25/2.75)
return 7.5625 * n * n + 0.9375
else:
n -= (2.65/2.75)
return 7.5625 * n * n + 0.984375 | A bouncing tween function that hits the destination and then bounces to rest.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine(). | juraj-google-style |
def _bbox(nodes):
left, bottom = np.min(nodes, axis=1)
right, top = np.max(nodes, axis=1)
return left, right, bottom, top | Get the bounding box for set of points.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): A set of points.
Returns:
Tuple[float, float, float, float]: The left, right,
bottom and top bounds for the box. | juraj-google-style |
def execute_managed_notebook(cls, nb_man, kernel_name, log_output=False, start_timeout=60, execution_timeout=None, **kwargs):
preprocessor = PapermillExecutePreprocessor(timeout=execution_timeout, startup_timeout=start_timeout, kernel_name=kernel_name, log=logger)
preprocessor.log_output = log_output
preprocessor.preprocess(nb_man, kwargs) | Performs the actual execution of the parameterized notebook locally.
Args:
nb (NotebookNode): Executable notebook object.
kernel_name (str): Name of kernel to execute the notebook against.
log_output (bool): Flag for whether or not to write notebook output to stderr.
start_timeout (int): Duration to wait for kernel start-up.
execution_timeout (int): Duration to wait before failing execution (default: never).
Note: The preprocessor concept in this method is similar to what is used
by `nbconvert`, and it is somewhat misleading here. The preprocesser
represents a notebook processor, not a preparation object. | codesearchnet |
def AddPathSegment(self, path_segment, scan_object):
if path_segment in self._path_segments:
raise ValueError('Path segment already set.')
if isinstance(scan_object, PathFilterScanTreeNode):
scan_object.parent = self
self._path_segments[path_segment] = scan_object | Adds a path segment.
Args:
path_segment: a string containing the path segment.
scan_object: a scan object, either a scan tree sub node (instance of
PathFilterScanTreeNode) or a string containing a path.
Raises:
ValueError: if the node already contains a scan object for
the path segment. | juraj-google-style |
def filter_by_analysis_period(self, analysis_period):
self._check_analysis_period(analysis_period)
_filtered_data = self.filter_by_moys(analysis_period.moys)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data | Filter a Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data | juraj-google-style |
def alpha_blend(self, other):
fa = self.__a + other.__a - (self.__a * other.__a)
if fa==0: sa = 0
else: sa = min(1.0, self.__a/other.__a)
da = 1.0 - sa
sr, sg, sb = [v * sa for v in self.__rgb]
dr, dg, db = [v * da for v in other.__rgb]
return Color((sr+dr, sg+dg, sb+db), 'rgb', fa, self.__wref) | Alpha-blend this color on the other one.
Args:
:other:
The grapefruit.Color to alpha-blend with this one.
Returns:
A grapefruit.Color instance which is the result of alpha-blending
this color on the other one.
>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)
>>> c2 = Color.from_rgb(1, 1, 1, 0.8)
>>> c3 = c1.alpha_blend(c2)
>>> c3
Color(1.0, 0.875, 0.75, 0.84) | juraj-google-style |
def do_keygen(args):
if (args.key_name is not None):
key_name = args.key_name
else:
key_name = 'validator'
key_dir = get_key_dir()
if (not os.path.exists(key_dir)):
raise CliException('Key directory does not exist: {}'.format(key_dir))
priv_filename = os.path.join(key_dir, (key_name + '.priv'))
pub_filename = os.path.join(key_dir, (key_name + '.pub'))
if (not args.force):
file_exists = False
for filename in [priv_filename, pub_filename]:
if os.path.exists(filename):
file_exists = True
print('file exists: {}'.format(filename), file=sys.stderr)
if file_exists:
raise CliException('files exist, rerun with --force to overwrite existing files')
context = create_context('secp256k1')
private_key = context.new_random_private_key()
public_key = context.get_public_key(private_key)
try:
priv_exists = os.path.exists(priv_filename)
with open(priv_filename, 'w') as priv_fd:
if (not args.quiet):
if priv_exists:
print('overwriting file: {}'.format(priv_filename))
else:
print('writing file: {}'.format(priv_filename))
priv_fd.write(private_key.as_hex())
priv_fd.write('\n')
keydir_info = os.stat(key_dir)
keydir_gid = keydir_info.st_gid
keydir_uid = keydir_info.st_uid
os.chown(priv_filename, keydir_uid, keydir_gid)
os.chmod(priv_filename, 416)
pub_exists = os.path.exists(pub_filename)
with open(pub_filename, 'w') as pub_fd:
if (not args.quiet):
if pub_exists:
print('overwriting file: {}'.format(pub_filename))
else:
print('writing file: {}'.format(pub_filename))
pub_fd.write(public_key.as_hex())
pub_fd.write('\n')
os.chown(pub_filename, keydir_uid, keydir_gid)
os.chmod(pub_filename, 420)
except IOError as ioe:
raise CliException('IOError: {}'.format(str(ioe))) | Executes the key generation operation, given the parsed arguments.
Args:
args (:obj:`Namespace`): The parsed args. | codesearchnet |
def __init__(self, model, ncats, alpha_lambda=1.0, beta_lambda=2.0,
freeparams=['alpha_lambda', 'beta_lambda']):
super(GammaDistributedOmegaModel, self).__init__(model, "omega",
ncats, alpha_lambda=1.0, beta_lambda=2.0,
freeparams=['alpha_lambda', 'beta_lambda']) | Initialize an `GammaDistributedModel` object.
The `lambda_param` is set to "omega".
Args:
`model` `ncats`,`alpha_lambda`, `beta_lambda`, `freeparams`
Meaning described in main class doc string for
`GammaDistributedModel`. | juraj-google-style |
def get_graphs(self, run_key, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
graph_wrappers = graph_dict.get(run_key, {})
graph_defs = dict()
for device_name, wrapper in graph_wrappers.items():
graph_defs[device_name] = wrapper.graph_def
return graph_defs | Get the runtime GraphDef protos associated with a run key.
Args:
run_key: A Session.run kay.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `dict` mapping device name to `GraphDef` protos. | juraj-google-style |
def build_or_reuse_placeholder(tensor_spec):
g = tfv1.get_default_graph()
name = tensor_spec.name
try:
tensor = g.get_tensor_by_name(name + ':0')
assert "Placeholder" in tensor.op.type, "Tensor {} exists but is not a placeholder!".format(name)
assert tensor_spec.is_compatible_with(tensor), \
"Tensor {} exists but is not compatible with the signature!".format(tensor)
return tensor
except KeyError:
with tfv1.name_scope(None):
ret = tfv1.placeholder(
tensor_spec.dtype, shape=tensor_spec.shape, name=tensor_spec.name)
return ret | Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one.
Args:
tensor_spec (tf.TensorSpec):
Returns:
tf.Tensor: | juraj-google-style |
def __create_and_save_state(cls, job_config, mapreduce_spec):
state = model.MapreduceState.create_new(job_config.job_id)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = 0
state.app_id = job_config._app
config = datastore_rpc.Configuration(force_writes=job_config._force_writes)
state.put(config=config)
return state | Save map job state to datastore.
Save state to datastore so that UI can see it immediately.
Args:
job_config: map_job.JobConfig.
mapreduce_spec: model.MapreduceSpec.
Returns:
model.MapreduceState for this job. | codesearchnet |
def load_bmp(path):
surface = object.__new__(Surface)
surface._ptr = check_ptr_err(lib.SDL_LoadBMP_RW(lib.SDL_RWFromFile(path, "rb"), 1))
return surface | Load a surface from a file.
Args:
path (str): Path to the BMP file to load.
Returns:
Surface: A surface containing the pixels loaded from the file.
Raises:
SDLError: If the file cannot be loaded. | juraj-google-style |
def OnCreateAccount(self, account):
pubkey = account.PublicKey.encode_point(False)
pubkeyunhex = binascii.unhexlify(pubkey)
pub = pubkeyunhex[1:65]
priv = bytearray(account.PrivateKey)
decrypted = pub + priv
encrypted_pk = self.EncryptPrivateKey(bytes(decrypted))
db_account, created = Account.get_or_create(
PrivateKeyEncrypted=encrypted_pk, PublicKeyHash=account.PublicKeyHash.ToBytes())
db_account.save()
self.__dbaccount = db_account | Save a KeyPair in encrypted form into the database.
Args:
account (KeyPair): | juraj-google-style |
def get_arrive_stop(self, **kwargs):
params = {
'idStop': kwargs.get('stop_number'),
'cultureInfo': util.language_code(kwargs.get('lang'))
}
result = self.make_request('geo', 'get_arrive_stop', **params)
if not util.check_result(result, 'arrives'):
return False, 'UNKNOWN ERROR'
values = util.response_list(result, 'arrives')
return True, [emtype.Arrival(**a) for a in values] | Obtain bus arrival info in target stop.
Args:
stop_number (int): Stop number to query.
lang (str): Language code (*es* or *en*).
Returns:
Status boolean and parsed response (list[Arrival]), or message string
in case of error. | juraj-google-style |
def replace_variables(self, text):
variables = {
'python-executable': str(self._venv_path / 'bin' / 'python')
}
return text.format(**variables) | Replace variable placeholders in `text` with values from the virtual env.
The variables are:
- {python-executable}
Args:
text: The text to do replacment int.
Returns: The text after replacement. | juraj-google-style |
def is_common(schema):
if isinstance(schema, StreamSchema):
return (schema.schema() in _SCHEMA_COMMON)
if isinstance(schema, CommonSchema):
return True
if isinstance(schema, basestring):
return is_common(StreamSchema(schema))
return False | Is `schema` an common schema.
Args:
schema: Scheme to test.
Returns:
bool: ``True`` if schema is a common schema, otherwise ``False``. | codesearchnet |
def get_absl_log_prefix(record):
created_tuple = time.localtime(record.created)
created_microsecond = int(record.created % 1.0 * 1e6)
critical_prefix = ''
level = record.levelno
if _is_non_absl_fatal_record(record):
level = logging.ERROR
critical_prefix = _CRITICAL_PREFIX
severity = converter.get_initial_for_level(level)
return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (
severity,
created_tuple.tm_mon,
created_tuple.tm_mday,
created_tuple.tm_hour,
created_tuple.tm_min,
created_tuple.tm_sec,
created_microsecond,
_get_thread_id(),
record.filename,
record.lineno,
critical_prefix) | Returns the absl log prefix for the log record.
Args:
record: logging.LogRecord, the record to get prefix for. | juraj-google-style |
def handle_error(self, error, download_request):
if (hasattr(error, 'errno') and (error.errno == errno.EACCES)):
self.handle_certificate_problem(str(error))
else:
self.handle_general_download_error(str(error), download_request) | Checks what error occured and looks for an appropriate solution.
Args:
error: Exception
The error that has occured.
download_request:
The request which resulted in the error. | codesearchnet |
class Mask2FormerPixelDecoderOutput(ModelOutput):
multi_scale_features: Tuple[torch.FloatTensor] = None
mask_features: Optional[torch.FloatTensor] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None | Mask2Former's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns
the mask features and the multiscale features.
Args:
multi_scale_features (`tuple(torch.FloatTensor)`):
Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height,
width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder.
mask_features (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder
Layer.
attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed
or when `config.output_attentions=True` | github-repos |
def local_get_state(self, device, id_override=None, type_override=None):
if ALLOW_LOCAL_CONTROL:
if (device.local_id() is not None):
hub = HUBS.get(device.hub_id())
if ((hub is not None) and (hub['token'] is not None)):
ip = hub['ip']
access_token = hub['token']
else:
return self.get_device_state(device, id_override, type_override)
else:
return self.get_device_state(device, id_override, type_override)
_LOGGER.info('Getting local state')
local_id = (id_override or device.local_id())
object_type = (type_override or device.object_type())
LOCAL_API_HEADERS['Authorization'] = ('Bearer ' + access_token)
url_string = 'https:
try:
arequest = requests.get(url_string, headers=LOCAL_API_HEADERS, verify=False, timeout=3)
except requests.exceptions.RequestException:
_LOGGER.error('Error sending local control request. Sending request online')
return self.get_device_state(device, id_override, type_override)
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
temp_state = device.json_state
for (key, value) in response_json['data']['last_reading'].items():
temp_state['last_reading'][key] = value
return temp_state
else:
return self.get_device_state(device, id_override, type_override) | Get device state via local API, and fall back to online API.
Args:
device (WinkDevice): The device the change is being requested for.
id_override (String, optional): A device ID used to override the
passed in device's ID. Used to make changes on sub-devices.
i.e. Outlet in a Powerstrip. The Parent device's ID.
type_override (String, optional): Used to override the device type
when a device inherits from a device other than WinkDevice.
Returns:
response_json (Dict): The API's response in dictionary format | codesearchnet |
def color_lerp(c1: Tuple[(int, int, int)], c2: Tuple[(int, int, int)], a: float) -> Color:
return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a)) | Return the linear interpolation between two colors.
``a`` is the interpolation value, with 0 returing ``c1``,
1 returning ``c2``, and 0.5 returing a color halfway between both.
Args:
c1 (Union[Tuple[int, int, int], Sequence[int]]):
The first color. At a=0.
c2 (Union[Tuple[int, int, int], Sequence[int]]):
The second color. At a=1.
a (float): The interpolation value,
Returns:
Color: The interpolated Color. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.