code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def set(self, key, value):
match = self._get_match(key=key)
if (not match):
self._log.info('"%s" does not exist, so it will be added.', key)
if isinstance(value, str):
self._log.info('"%s" will be added as a PHP string value.', key)
value_str = "'{}'".format(value)
else:
self._log.info('"%s" will be added as a PHP object value.', key)
value_str = str(value).lower()
new = "define('{key}', {value});".format(key=key, value=value_str)
self._log.info('"%s" will be added as: %s', key, new)
replace_this = '<?php\n'
replace_with = (('<?php\n' + new) + '\n')
self._content = self._content.replace(replace_this, replace_with)
self._log.info('Content string has been updated.')
return True
if (self._get_value_from_match(key=key, match=match) == value):
self._log.info('"%s" is already up-to-date.', key)
return False
self._log.info('"%s" exists and will be updated.', key)
start_index = match.start(1)
end_index = match.end(1)
if isinstance(value, bool):
value = str(value).lower()
self._log.info('"%s" will be updated with boolean value: %s', key, value)
else:
self._log.info('"%s" will be updated with string value: %s', key, value)
start = self._content[:start_index]
end = self._content[end_index:]
self._content = ((start + value) + end)
return True | Updates the value of the given key in the loaded content.
Args:
key (str): Key of the property to update.
value (str): New value of the property.
Return:
bool: Indicates whether or not a change was made. | codesearchnet |
def authenticate_search_bind(self, username, password):
connection = self._make_connection(bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'))
try:
connection.bind()
log.debug("Successfully bound to LDAP as '{0}' for search_bind method".format((self.config.get('LDAP_BIND_USER_DN') or 'Anonymous')))
except Exception as e:
self.destroy_connection(connection)
log.error(e)
return AuthenticationResponse()
user_filter = '({search_attr}={username})'.format(search_attr=self.config.get('LDAP_USER_LOGIN_ATTR'), username=username)
search_filter = '(&{0}{1})'.format(self.config.get('LDAP_USER_OBJECT_FILTER'), user_filter)
log.debug("Performing an LDAP Search using filter '{0}', base '{1}', and scope '{2}'".format(search_filter, self.full_user_search_dn, self.config.get('LDAP_USER_SEARCH_SCOPE')))
connection.search(search_base=self.full_user_search_dn, search_filter=search_filter, search_scope=getattr(ldap3, self.config.get('LDAP_USER_SEARCH_SCOPE')), attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES'))
response = AuthenticationResponse()
if ((len(connection.response) == 0) or (self.config.get('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND') and (len(connection.response) > 1))):
log.debug("Authentication was not successful for user '{0}'".format(username))
else:
for user in connection.response:
if (('type' not in user) or (user.get('type') != 'searchResEntry')):
continue
user_connection = self._make_connection(bind_user=user['dn'], bind_password=password)
log.debug("Directly binding a connection to a server with user:'{0}'".format(user['dn']))
try:
user_connection.bind()
log.debug("Authentication was successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.success
user['attributes']['dn'] = user['dn']
response.user_info = user['attributes']
response.user_id = username
response.user_dn = user['dn']
if self.config.get('LDAP_SEARCH_FOR_GROUPS'):
response.user_groups = self.get_user_groups(dn=user['dn'], _connection=connection)
self.destroy_connection(user_connection)
break
except ldap3.core.exceptions.LDAPInvalidCredentialsResult:
log.debug("Authentication was not successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.fail
except Exception as e:
log.error(e)
response.status = AuthenticationResponseStatus.fail
self.destroy_connection(user_connection)
self.destroy_connection(connection)
return response | Performs a search bind to authenticate a user. This is
required when a the login attribute is not the same
as the RDN, since we cannot string together their DN on
the fly, instead we have to find it in the LDAP, then attempt
to bind with their credentials.
Args:
username (str): Username of the user to bind (the field specified
as LDAP_BIND_LOGIN_ATTR)
password (str): User's password to bind with when we find their dn.
Returns:
AuthenticationResponse | codesearchnet |
def delete_template(self, template_id):
url = self.TEMPLATE_DELETE_URL
request = self._get_request()
response = request.post(url + template_id, get_json=False)
return response | Deletes the specified template
Args:
template_id (str): The id of the template to delete
Returns:
A status code | juraj-google-style |
def exists_function(function: _evaluation.ExistsFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:
if operand_result is None:
raise ValueError('exists() cannot be called without an operand.')
if params_result:
raise ValueError('Unsupported FHIRPath expression: `criteria` parameter for exists() is not currently supported.')
sql_alias = 'exists_'
sql_data_type = _sql_data_types.Boolean
if not _fhir_path_data_types.returns_collection(function.parent_node.return_type) and (not operand_result.where_part):
return dataclasses.replace(operand_result, select_part=operand_result.select_part.is_not_null(_sql_alias=sql_alias))
else:
return _sql_data_types.Select(select_part=_sql_data_types.RawExpression('CASE WHEN COUNT(*) = 0 THEN FALSE ELSE TRUE END', _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=str(operand_result.to_subquery()), where_part=f'{operand_result.sql_alias} IS NOT NULL', sql_dialect=_sql_data_types.SqlDialect.SPARK) | Generates Spark SQL representing the FHIRPath empty() function.
Returns `TRUE` if the operand has any elements, and `FALSE` otherwise.
This is the opposite of `_EmptyFunction`. If the operand is empty, then the
result is `FALSE`.
The returned SQL expression is a table of cardinality 1, whose value is of
`BOOL` type. By default, `_ExistsFunction` will return `FALSE` if given no
operand.
Args:
function: The FHIRPath AST `ExistsFunction` node
operand_result: The expression which is being evaluated
params_result: The parameter passed in to function
Returns:
A compiled Spark SQL expression.
Raises:
ValueError: When the function is called without an operand | github-repos |
def reflect_runtime_member(self, name):
for scope in reversed(self.scopes):
try:
return structured.reflect_runtime_member(scope, name)
except (NotImplementedError, KeyError, AttributeError):
continue
return protocol.AnyType | Reflect 'name' using ONLY runtime reflection.
You most likely want to use ScopeStack.reflect instead.
Returns:
Type of 'name', or protocol.AnyType. | codesearchnet |
def get_info_by_tail_number(self, tail_number, page=1, limit=100):
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_aircraft_data(url) | Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10) | codesearchnet |
def get_results_as_xarray(self, parameter_space, result_parsing_function, output_labels, runs):
np_array = np.array(self.get_space(self.db.get_complete_results(), {}, collections.OrderedDict([(k, v) for (k, v) in parameter_space.items()]), runs, result_parsing_function))
clean_parameter_space = collections.OrderedDict([(k, v) for (k, v) in parameter_space.items()])
clean_parameter_space['runs'] = range(runs)
if isinstance(output_labels, list):
clean_parameter_space['metrics'] = output_labels
xr_array = xr.DataArray(np_array, coords=clean_parameter_space, dims=list(clean_parameter_space.keys()))
return xr_array | Return the results relative to the desired parameter space in the form
of an xarray data structure.
Args:
parameter_space (dict): The space of parameters to export.
result_parsing_function (function): user-defined function, taking a
result dictionary as argument, that can be used to parse the
result files and return a list of values.
output_labels (list): a list of labels to apply to the results
dimensions, output by the result_parsing_function.
runs (int): the number of runs to export for each parameter
combination. | codesearchnet |
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10000000.0)
return random_ops.truncated_normal(shape, mean, stddev, dtype=dtype, seed=seed) | Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Args:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor. | github-repos |
def freeze(self):
self._frozen = True
if self._tuple_types is None:
raise ValueError("Can't freeze an InfeedQueue without setting all tuple types.")
if self._tuple_shapes is None:
raise ValueError("Can't freeze an InfeedQueue without setting all tuple shapes.")
for shape in self._tuple_shapes:
if shape.dims is None:
raise ValueError("Can't freeze an InfeedQueue without setting all tuple shapes.")
for policy in self._sharding_policies:
policy.freeze()
self._validate() | Freezes the InfeedQueue so it can no longer be modified.
The configuration is implicitly frozen before any host-side or
device-side Ops are generated. The configuration cannot be frozen
until the types and shapes of the tuple elements have been set.
Raises:
ValueError: if the types or shapes of the tuple elements have not been
set. | github-repos |
def __init__(self, channel):
self.SubmitJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/SubmitJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.GetJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/GetJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.GetJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.ListJobs = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/ListJobs",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsResponse.FromString,
)
self.UpdateJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/UpdateJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.UpdateJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.CancelJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/CancelJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.CancelJobRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString,
)
self.DeleteJob = channel.unary_unary(
"/google.cloud.dataproc.v1.JobController/DeleteJob",
request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.DeleteJobRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
) | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def slicewise(self, fn, *inputs):
if fn == tf.add:
assert len(inputs) == 2
if isinstance(inputs[0], mtf.LazyAllreduceSum):
return inputs[0] + inputs[1]
inputs = mtf.convert_args_to_laid_out_tensors(inputs)
ret = fn(*[
x.one_slice if isinstance(x, self.LaidOutTensor) else x
for x in inputs])
if isinstance(ret, tuple):
return tuple([self.LaidOutTensor([t]) for t in ret])
else:
return self.LaidOutTensor([ret]) | Execute a function in parallel on all slices.
Args:
fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: a list of inputs. Each input is either a LaidOutTensor or
is convertible to a tf.Tensor.
Returns:
a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple. | juraj-google-style |
def atomic_swap(alias_name, new_index_name, index_client):
logging.info('Performing atomic index alias swap')
if index_client.exists_alias(name=alias_name):
old_index_name = get_index_from_alias(alias_name, index_client)
logging.info('Removing old as well as adding new')
actions = {'actions': [
{'remove': {'index': old_index_name, 'alias': alias_name}},
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
index_client.delete(index=old_index_name)
else:
logging.info('Old alias not found, only adding new')
actions = {'actions': [
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions) | Points an alias to a new index, then delete the old index if needed
Uses client.update_aliases to perform this with zero downtime
Args:
alias_name (str) Name of the alias
new_index_name (str) The new index that the alias should point to
index_client (Elasticsearch.IndicesClient) Elasticsearch index client | juraj-google-style |
def _get_fitnesses(self, problem, population, cache_encoded=True, cache_solution=False, pool=None):
fitnesses = ([None] * len(population))
if cache_encoded:
try:
encoded_keys = map(self._get_encoded_key, population)
to_decode_indices = []
for (i, encoded_key) in enumerate(encoded_keys):
try:
fitnesses[i] = self.__encoded_cache[encoded_key]
except KeyError:
to_decode_indices.append(i)
except UnhashableError:
encoded_keys = None
to_decode_indices = range(len(population))
else:
encoded_keys = None
to_decode_indices = range(len(population))
if (encoded_keys is None):
to_decode_keys = None
else:
to_decode_keys = [encoded_keys[i] for i in to_decode_indices]
solutions = ([None] * len(population))
for (i, solution) in zip(to_decode_indices, self._pmap(problem.decode_solution, [population[i] for i in to_decode_indices], to_decode_keys, pool)):
solutions[i] = solution
if cache_solution:
try:
if problem.hash_solution:
hash_solution_func = problem.hash_solution
else:
hash_solution_func = self._get_solution_key
solution_keys = [(hash_solution_func(solution) if (solution is not None) else None) for solution in solutions]
to_eval_indices = []
for (i, solution_key) in enumerate(solution_keys):
if (solution_key is not None):
try:
fitnesses[i] = self.__solution_cache[solution_key]
except KeyError:
to_eval_indices.append(i)
except UnhashableError:
solution_keys = None
to_eval_indices = to_decode_indices[:]
else:
solution_keys = None
to_eval_indices = to_decode_indices[:]
if (solution_keys is None):
if (encoded_keys is None):
to_eval_keys = None
else:
to_eval_keys = [encoded_keys[i] for i in to_eval_indices]
else:
to_eval_keys = [solution_keys[i] for i in to_eval_indices]
finished = False
eval_bookkeeping = {}
for (i, fitness_finished) in zip(to_eval_indices, self._pmap(problem.get_fitness, [solutions[i] for i in to_eval_indices], to_eval_keys, pool, bookkeeping_dict=eval_bookkeeping)):
try:
(fitness, maybe_finished) = fitness_finished
if maybe_finished:
finished = True
except TypeError:
fitness = fitness_finished
fitnesses[i] = fitness
self.fitness_runs += len(eval_bookkeeping['key_indices'])
if (cache_encoded and (encoded_keys is not None)):
for i in to_decode_indices:
self.__encoded_cache[encoded_keys[i]] = fitnesses[i]
if (cache_solution and (solution_keys is not None)):
for i in to_eval_indices:
self.__solution_cache[solution_keys[i]] = fitnesses[i]
return (solutions, fitnesses, finished) | Get the fitness for every solution in a population.
Args:
problem: Problem; The problem that defines fitness.
population: list; List of potential solutions.
pool: None/multiprocessing.Pool; Pool of processes for parallel
decoding and evaluation. | codesearchnet |
async def _get_popular_people_page(self, page=1):
return await self.get_data(self.url_builder(
'person/popular',
url_params=OrderedDict(page=page),
)) | Get a specific page of popular person data.
Arguments:
page (:py:class:`int`, optional): The page to get.
Returns:
:py:class:`dict`: The page data. | juraj-google-style |
def report(self, name, owner=None, **kwargs):
return Report(self.tcex, name, owner=owner, **kwargs) | Create the Report TI object.
Args:
owner:
name:
**kwargs:
Return: | codesearchnet |
def _process_image_files(name, filenames, texts, labels, num_shards):
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
coord = tf.train.Coordinator()
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush() | Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set. | juraj-google-style |
def url_fetch(config, task) -> Iterator[dict]:
for url, uri in get_rows(config, task['auth'], task['urls']):
if config.verbose:
print('URL/URI', url, uri)
record = {'URL': url, 'URI': None if uri is None else str(uri)}
url_request = request.Request(url, data=task.get('data'))
try:
url_response = request.urlopen(url_request)
if task.get('status', False):
record['Status'] = url_response.status
if task.get('read', False):
record['Read'] = url_response.read()
except InvalidURL as error:
if task.get('status', False):
record['Status'] = 400
except HTTPError as error:
if task.get('status', False):
record['Status'] = error.status
except Exception as error:
if task.get('status', False):
record['Status'] = 500
yield record | Fetch URL list and return both status code and/or contents.
Takes no parameters, it operates on recipe JSON directly. Core
function is to call urlopen on each passed in URL.
Returns:
Produces a dictionary generator with record matching URL_SCHEMA. | github-repos |
def sites_at_edges( self ):
min_x = min( [ s.r[0] for s in self.sites ] )
max_x = max( [ s.r[0] for s in self.sites ] )
min_y = min( [ s.r[1] for s in self.sites ] )
max_y = max( [ s.r[1] for s in self.sites ] )
min_z = min( [ s.r[2] for s in self.sites ] )
max_z = max( [ s.r[2] for s in self.sites ] )
x_max = [ s for s in self.sites if s.r[0] == min_x ]
x_min = [ s for s in self.sites if s.r[0] == max_x ]
y_max = [ s for s in self.sites if s.r[1] == min_y ]
y_min = [ s for s in self.sites if s.r[1] == max_y ]
z_max = [ s for s in self.sites if s.r[2] == min_z ]
z_min = [ s for s in self.sites if s.r[2] == max_z ]
return ( x_max, x_min, y_max, y_min, z_max, z_min ) | Finds the six sites with the maximum and minimum coordinates along x, y, and z.
Args:
None
Returns:
(List(List)): In the order [ +x, -x, +y, -y, +z, -z ] | juraj-google-style |
def extractDates(inp, tz=None, now=None):
service = DateService(tz=tz, now=now)
return service.extractDates(inp) | Extract semantic date information from an input string.
This is a convenience method which would only be used if
you'd rather not initialize a DateService object.
Args:
inp (str): The input string to be parsed.
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A list of datetime objects extracted from input. | codesearchnet |
def smear(self, sigma):
diff = [(self.x[(i + 1)] - self.x[i]) for i in range((len(self.x) - 1))]
avg_x_per_step = (np.sum(diff) / len(diff))
if (len(self.ydim) == 1):
self.y = gaussian_filter1d(self.y, (sigma / avg_x_per_step))
else:
self.y = np.array([gaussian_filter1d(self.y[(:, k)], (sigma / avg_x_per_step)) for k in range(self.ydim[1])]).T | Apply Gaussian smearing to spectrum y value.
Args:
sigma: Std dev for Gaussian smear function | codesearchnet |
def update_utxoset(self, transaction):
spent_outputs = [
spent_output for spent_output in transaction.spent_outputs
]
if spent_outputs:
self.delete_unspent_outputs(*spent_outputs)
self.store_unspent_outputs(
*[utxo._asdict() for utxo in transaction.unspent_outputs]
) | Update the UTXO set given ``transaction``. That is, remove
the outputs that the given ``transaction`` spends, and add the
outputs that the given ``transaction`` creates.
Args:
transaction (:obj:`~bigchaindb.models.Transaction`): A new
transaction incoming into the system for which the UTXO
set needs to be updated. | juraj-google-style |
def _create_and_save_tf1_gather_model(self, saved_model_path: str, signature_key: str, tags: Collection[str], input_key: str, output_key: str, input_type: dtypes.DType, use_variable=False) -> core.Tensor:
with ops.Graph().as_default(), session.Session() as sess:
in_placeholder, output_tensor = self._create_simple_tf1_gather_model(input_type=input_type, use_variable_for_filter=use_variable)
if use_variable:
sess.run(variables.global_variables_initializer())
self._save_tf1_model(sess, saved_model_path, signature_key, tags, inputs={input_key: in_placeholder}, outputs={output_key: output_tensor})
return in_placeholder | Creates and saves a simple gather model.
This is intended to be used for TF1 (graph mode) tests.
Args:
saved_model_path: Directory to save the model.
signature_key: The key to the SignatureDef that inputs & outputs
correspond to.
tags: Set of tags associated with the model.
input_key: The key to the input tensor.
output_key: The key to the output tensor.
input_type: type of the input index tensor for gather operation.
use_variable: Setting this to `True` makes the filter for the gather
operation a `tf.Variable`.
Returns:
in_placeholder: The placeholder tensor used as an input to the model. | github-repos |
def smiles_to_compound(smiles, assign_descriptors=True):
it = iter(smiles)
mol = molecule()
try:
for token in it:
mol(token)
(result, _) = mol(None)
except KeyError as err:
raise ValueError('Unsupported Symbol: {}'.format(err))
result.graph.remove_node(0)
logger.debug(result)
if assign_descriptors:
molutil.assign_descriptors(result)
return result | Convert SMILES text to compound object
Raises:
ValueError: SMILES with unsupported format | codesearchnet |
def deserialize(config, custom_objects=None):
return deserialize_keras_object(config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='metric function') | Deserializes a serialized metric class/function instance.
Args:
config: Metric configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras `Metric` instance or a metric function. | github-repos |
def _delete_minibatch(self, bucket, keys):
request = messages.DeleteBatchRequest(bucket, keys)
results = {}
try:
response = self.client.delete_batch(request)
for key in response.deleted:
results[bucket, key] = None
for key, error in zip(response.failed, response.errors):
results[bucket, key] = error
except messages.S3ClientError as e:
for key in keys:
results[bucket, key] = e
return results | A helper method. Boto3 allows batch deletions
for files within the same bucket.
Args:
bucket: String bucket name
keys: List of keys to be deleted in the bucket
Returns: dict of the form {(bucket, key): error}, where error is None if the
operation succeeded | github-repos |
def _check_callback(callback):
if inspect.isclass(callback):
callback_object = callback()
if (not callable(callback_object)):
raise ValueError('Callback must be a class that implements __call__ or a function.')
elif callable(callback):
callback_object = callback
else:
raise ValueError('Callback must be a class that implements __call__ or a function.')
return callback_object | Turns a callback that is potentially a class into a callable object.
Args:
callback (object): An object that might be a class, method, or function.
if the object is a class, this creates an instance of it.
Raises:
ValueError: If an instance can't be created or it isn't a callable object.
TypeError: If the class requires arguments to be instantiated.
Returns:
callable: A callable object suitable for use as the consumer callback. | codesearchnet |
def filter(self, scored_list):
top_n_key = -1 * self.top_n
top_n_list = sorted(scored_list, key=lambda x: x[1])[top_n_key:]
result_list = sorted(top_n_list, key=lambda x: x[0])
return result_list | Filtering with top-n ranking.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result. | juraj-google-style |
def _unbind_topics(self, topics):
self.client.unsubscribe(topics.status)
self.client.unsubscribe(topics.tracing)
self.client.unsubscribe(topics.streaming)
self.client.unsubscribe(topics.response) | Unsubscribe to all of the topics we needed for communication with device
Args:
topics (MQTTTopicValidator): The topic validator for this device that
we have connected to. | codesearchnet |
def write_journal(self, journal_file_path):
with open(journal_file_path, 'w') as jrn_file:
jrn_file.write(self._journal_contents) | Write the constructed journal in to the provided file.
Args:
journal_file_path (str): full path to output journal file | codesearchnet |
def publishItems(self, items_info):
if self.securityhandler is None:
print ("Security handler required")
return
itemInfo = None
item_results = None
item_info = None
admin = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
item_results = []
for item_info in items_info:
if 'ReplaceTag' in item_info:
itemInfo = {"ReplaceTag":item_info['ReplaceTag'] }
else:
itemInfo = {"ReplaceTag":"{FeatureService}" }
itemInfo['ItemInfo'] = self._publishItems(config=item_info)
if itemInfo['ItemInfo'] is not None and 'name' in itemInfo['ItemInfo']:
print ("%s created" % itemInfo['ItemInfo']['name'])
item_results.append(itemInfo)
else:
print (str(itemInfo['ItemInfo']))
return item_results
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishItems",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
itemInfo = None
item_results = None
item_info = None
admin = None
del itemInfo
del item_results
del item_info
del admin
gc.collect() | Publishes a list of items.
Args:
items_info (list): A list of JSON configuration items to publish.
Returns:
list: A list of results from :py:meth:`arcrest.manageorg._content.User.addItem`. | juraj-google-style |
def check_arg_in_support(f):
@functools.wraps(f)
def _check_arg_and_apply_f(*args, **kwargs):
dist = args[0]
x = args[1]
with tf.control_dependencies(([assert_util.assert_greater_equal(x, dist.loc, message='x is not in the support of the distribution')] if dist.validate_args else [])):
return f(*args, **kwargs)
return _check_arg_and_apply_f | Decorator function for argument bounds checking.
This decorator is meant to be used with methods that require the first
argument to be in the support of the distribution. If `validate_args` is
`True`, the method is wrapped with an assertion that the first argument is
greater than or equal to `loc`, since the support of the half-Cauchy
distribution is given by `[loc, infinity)`.
Args:
f: method to be decorated.
Returns:
Returns a decorated method that, when `validate_args` attribute of the class
is `True`, will assert that all elements in the first argument are within
the support of the distribution before executing the original method. | codesearchnet |
def period(self, value: float):
if (value < 0):
raise ValueError('Period must be greater or equal than zero.')
self._period = timedelta(seconds=value) | Set the period.
Args:
value (float): seconds | codesearchnet |
def validate(self, value, model_instance):
if not isinstance(value, base.StateWrapper):
raise exceptions.ValidationError(self.error_messages['wrong_type'] % value)
elif not value.workflow == self.workflow:
raise exceptions.ValidationError(self.error_messages['wrong_workflow'] % value.workflow)
elif value.state not in self.workflow.states:
raise exceptions.ValidationError(self.error_messages['invalid_state'] % value.state) | Validate that a given value is a valid option for a given model instance.
Args:
value (xworkflows.base.StateWrapper): The base.StateWrapper returned by to_python.
model_instance: A WorkflowEnabled instance | juraj-google-style |
def _from_config(cls, config, **kwargs):
torch_dtype = kwargs.pop('torch_dtype', config.torch_dtype)
if isinstance(torch_dtype, str):
torch_dtype = getattr(torch, torch_dtype)
dtype_orig = None
if torch_dtype is not None:
dtype_orig = cls._set_default_torch_dtype(torch_dtype)
config = copy.deepcopy(config)
if config._attn_implementation_internal is not None:
attn_implementation = config._attn_implementation_internal
else:
attn_implementation = None
config._attn_implementation = kwargs.pop('attn_implementation', attn_implementation)
if not getattr(config, '_attn_implementation_autoset', False):
config = cls._autoset_attn_implementation(config, check_device_map=False, torch_dtype=torch_dtype)
if is_deepspeed_zero3_enabled() and (not _is_quantized) and (not _is_ds_init_called):
logger.info('Detected DeepSpeed ZeRO-3: activating zero.init() for this model')
import deepspeed
init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]
with ContextManagers(init_contexts):
model = cls(config, **kwargs)
else:
model = cls(config, **kwargs)
if dtype_orig is not None:
torch.set_default_dtype(dtype_orig)
return model | All context managers that the model should be initialized under go here.
Args:
torch_dtype (`torch.dtype`, *optional*):
Override the default `torch.dtype` and load the model under this dtype. | github-repos |
def additive_coupling(name, x, mid_channels=512, reverse=False, activation='relu', dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
output_channels = (common_layers.shape_list(x)[(- 1)]
(x1, x2) = tf.split(x, num_or_size_splits=2, axis=(- 1))
z1 = x1
shift = conv_stack('nn', x1, mid_channels, output_channels=output_channels, activation=activation, dropout=dropout)
if (not reverse):
z2 = (x2 + shift)
else:
z2 = (x2 - shift)
return (tf.concat([z1, z2], axis=3), 0.0) | Reversible additive coupling layer.
Args:
name: variable scope.
x: 4-D Tensor, shape=(NHWC).
mid_channels: number of channels in the coupling layer.
reverse: Forward or reverse operation.
activation: "relu" or "gatu"
dropout: default, 0.0
Returns:
output: 4-D Tensor, shape=(NHWC)
objective: 0.0 | codesearchnet |
def max(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
return cls._binary_op(x, y, tf.maximum, tf.float32) | Returns a TensorFluent for the maximum function.TensorFluent
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the maximum function. | juraj-google-style |
def exit_code_from_run_infos(run_infos: t.List[RunInfo]) -> int:
assert (run_infos is not None)
if (not hasattr(run_infos, '__iter__')):
return run_infos.retcode
rcs = [ri.retcode for ri in run_infos]
max_rc = max(rcs)
min_rc = min(rcs)
if (max_rc == 0):
return min_rc
return max_rc | Generate a single exit code from a list of RunInfo objects.
Takes a list of RunInfos and returns the exit code that is furthest away
from 0.
Args:
run_infos (t.List[RunInfo]): [description]
Returns:
int: [description] | codesearchnet |
def __init__(self, x, y=None, **kwargs):
if not self.can_handle(x, y):
raise ValueError('{} Cannot handle input {}, {}'.format(self.__class__, x, y)) | Create a DataAdapter based on data inputs.
The caller must make sure to call `can_handle()` first before invoking this
method. Provide unsupported data type will result into unexpected behavior.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
**kwargs: Other keyword arguments for DataAdapter during the construction
of the tf.dataset.Dataset. For example:
- Numpy data might have `sample_weights` which will be used for
weighting the loss function during training.
- Numpy data might need to have `batch_size` parameter when constructing
the dataset and iterator.
- Certain input might need to be distribution strategy aware. When
`distribution_strategy` is passed, the created dataset need to respect
the strategy.
DataAdapter might choose to ignore any keyword argument if it doesn't
use it, or raise exception if any required argument is not provide. | github-repos |
def set_installed_version(vcs, version):
version_path = _get_version_path(vcs)
with open(version_path, 'w') as f:
f.write(version) | Set the installed version for this project.
Args:
vcs (easyci.vcs.base.Vcs)
version (str) | juraj-google-style |
def detect_palette_support(basic_palette=None):
result = col_init = win_enabled = None
TERM = (env.TERM or '')
if (os_name == 'nt'):
from .windows import is_ansi_capable, enable_vt_processing, is_colorama_initialized
if is_ansi_capable():
win_enabled = all(enable_vt_processing())
col_init = is_colorama_initialized()
if (TERM.startswith('xterm') or (TERM == 'linux') or col_init):
result = 'basic'
if (('256color' in TERM) or (TERM == 'fbterm') or env.ANSICON):
result = 'extended'
if ((env.COLORTERM in ('truecolor', '24bit')) or win_enabled):
result = 'truecolor'
pal_name = 'Unknown'
if (result and (not basic_palette)):
(result, pal_name, basic_palette) = _find_basic_palette(result)
try:
import webcolors
except ImportError:
webcolors = None
log.debug(f"{result!r} ({os_name}, TERM={(env.TERM or '')}, COLORTERM={(env.COLORTERM or '')}, ANSICON={env.ANSICON}, webcolors={bool(webcolors)}, basic_palette={pal_name})")
return (result, basic_palette) | Returns whether we think the terminal supports basic, extended, or
truecolor. None if not able to tell.
Returns:
None or str: 'basic', 'extended', 'truecolor' | codesearchnet |
def _MergeEntities(self, a, b):
def _MergeAgencyId(a_agency_id, b_agency_id):
a_agency_id = a_agency_id or None
b_agency_id = b_agency_id or None
return self._MergeIdentical(a_agency_id, b_agency_id)
scheme = {'agency_id': _MergeAgencyId,
'agency_name': self._MergeIdentical,
'agency_url': self._MergeIdentical,
'agency_timezone': self._MergeIdentical}
return self._SchemedMerge(scheme, a, b) | Merges two agencies.
To be merged, they are required to have the same id, name, url and
timezone. The remaining language attribute is taken from the new agency.
Args:
a: The first agency.
b: The second agency.
Returns:
The merged agency.
Raises:
MergeError: The agencies could not be merged. | juraj-google-style |
def _ConstructAndTestGradientForConfig(self, pool_func, input_sizes, output_sizes, window, strides, padding, data_format, data_type, use_gpu):
jacob_a, jacob_n = self._getJacobians(pool_func, input_sizes, output_sizes, window, strides, padding, data_format, use_gpu, dtype=data_type.as_numpy_dtype)
if data_type == dtypes.bfloat16:
_, jacob_n = self._getJacobians(pool_func, input_sizes, output_sizes, window, strides, padding, data_format, use_gpu, dtype=np.float32)
input_jacob_a, grad_jacob_a = jacob_a
input_jacob_n, grad_jacob_n = jacob_n
self.assertAllClose(input_jacob_a, input_jacob_n, rtol=0.001, atol=0.001)
self.assertAllClose(grad_jacob_a, grad_jacob_n, rtol=0.001, atol=0.001) | Verifies the gradients of a pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
data_format: Data format string.
data_type: The data type to use to run the pooling operation.
use_gpu: Whether to run on GPU. | github-repos |
def tf_step(self, x, iteration, conjugate, residual, squared_residual):
(x, next_iteration, conjugate, residual, squared_residual) = super(ConjugateGradient, self).tf_step(x, iteration, conjugate, residual, squared_residual)
A_conjugate = self.fn_x(conjugate)
if (self.damping > 0.0):
A_conjugate = [(A_conj + (self.damping * conj)) for (A_conj, conj) in zip(A_conjugate, conjugate)]
conjugate_A_conjugate = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(conj * A_conj)) for (conj, A_conj) in zip(conjugate, A_conjugate)])
alpha = (squared_residual / tf.maximum(x=conjugate_A_conjugate, y=util.epsilon))
next_x = [(t + (alpha * conj)) for (t, conj) in zip(x, conjugate)]
next_residual = [(res - (alpha * A_conj)) for (res, A_conj) in zip(residual, A_conjugate)]
next_squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in next_residual])
beta = (next_squared_residual / tf.maximum(x=squared_residual, y=util.epsilon))
next_conjugate = [(res + (beta * conj)) for (res, conj) in zip(next_residual, conjugate)]
return (next_x, next_iteration, next_conjugate, next_residual, next_squared_residual) | Iteration loop body of the conjugate gradient algorithm.
Args:
x: Current solution estimate $x_t$.
iteration: Current iteration counter $t$.
conjugate: Current conjugate $c_t$.
residual: Current residual $r_t$.
squared_residual: Current squared residual $r_t^2$.
Returns:
Updated arguments for next iteration. | codesearchnet |
def push(self, stream_id, timestamp, value):
stream = DataStream.FromEncoded(stream_id)
reading = IOTileReading(stream_id, timestamp, value)
try:
self.storage.push(stream, reading)
return Error.NO_ERROR
except StorageFullError:
return pack_error(ControllerSubsystem.SENSOR_LOG, SensorLogError.RING_BUFFER_FULL) | Push a value to a stream.
Args:
stream_id (int): The stream we want to push to.
timestamp (int): The raw timestamp of the value we want to
store.
value (int): The 32-bit integer value we want to push.
Returns:
int: Packed 32-bit error code. | juraj-google-style |
def _checkString(inputstring, description, minlength=0, maxlength=None):
if not isinstance(description, str):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if not isinstance(inputstring, str):
raise TypeError('The {0} should be a string. Given: {1!r}'.format(description, inputstring))
if not isinstance(maxlength, (int, type(None))):
raise TypeError('The maxlength must be an integer or None. Given: {0!r}'.format(maxlength))
_checkInt(minlength, minvalue=0, maxvalue=None, description='minlength')
if len(inputstring) < minlength:
raise ValueError('The {0} is too short: {1}, but minimum value is {2}. Given: {3!r}'.format( \
description, len(inputstring), minlength, inputstring))
if not maxlength is None:
if maxlength < 0:
raise ValueError('The maxlength must be positive. Given: {0}'.format(maxlength))
if maxlength < minlength:
raise ValueError('The maxlength must not be smaller than minlength. Given: {0} and {1}'.format( \
maxlength, minlength))
if len(inputstring) > maxlength:
raise ValueError('The {0} is too long: {1}, but maximum value is {2}. Given: {3!r}'.format( \
description, len(inputstring), maxlength, inputstring)) | Check that the given string is valid.
Args:
* inputstring (string): The string to be checked
* description (string): Used in error messages for the checked inputstring
* minlength (int): Minimum length of the string
* maxlength (int or None): Maximum length of the string
Raises:
TypeError, ValueError
Uses the function :func:`_checkInt` internally. | juraj-google-style |
def authorize(self, http):
return google_auth_httplib2.AuthorizedHttp(self._google_auth_credentials, http=http) | Return an http client authorized with the google-auth credentials.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
Returns:
google_auth_httplib2.AuthorizedHttp: An authorized http client. | github-repos |
def reset(self):
if self.running:
raise RuntimeError('paco: executor is still running')
self.pool.clear()
self.observer.clear()
self.semaphore = asyncio.Semaphore(self.limit, loop=self.loop) | Resets the executer scheduler internal state.
Raises:
RuntimeError: is the executor is still running. | codesearchnet |
def fail(msg, extras=None):
raise signals.TestFailure(msg, extras) | Explicitly fail a test.
Args:
msg: A string explaining the details of the failure.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestFailure: Mark a test as failed. | github-repos |
def cos(cls, x: 'TensorFluent') -> 'TensorFluent':
return cls._unary_op(x, tf.cos, tf.float32) | Returns a TensorFluent for the cos function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the cos function. | juraj-google-style |
def write(self, value):
if (not isinstance(value, bool)):
raise TypeError('Invalid value type, should be bool.')
try:
if value:
os.write(self._fd, b'1\n')
else:
os.write(self._fd, b'0\n')
except OSError as e:
raise GPIOError(e.errno, ('Writing GPIO: ' + e.strerror))
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, ('Rewinding GPIO: ' + e.strerror)) | Set the state of the GPIO to `value`.
Args:
value (bool): ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `value` type is not bool. | codesearchnet |
def consume(self, source):
manifest = OrderedDict()
rules = parse_stylesheet(source, skip_comments=True, skip_whitespace=True)
for rule in rules:
name = self.digest_prelude(rule)
if (not name.startswith(RULE_BASE_PREFIX)):
continue
properties = self.digest_content(rule)
manifest[name] = properties
return manifest | Parse source and consume tokens from tinycss2.
Arguments:
source (string): Source content to parse.
Returns:
dict: Retrieved rules. | codesearchnet |
def unexpected_disconnect(self, conn_or_internal_id):
data = {'id': conn_or_internal_id}
action = ConnectionAction('force_disconnect', data, sync=False)
self._actions.put(action) | Notify that there was an unexpected disconnection of the device.
Any in progress operations are canceled cleanly and the device is transitioned
to a disconnected state.
Args:
conn_or_internal_id (string, int): Either an integer connection id or a string
internal_id | codesearchnet |
def __init__(self, paths=None, separator='/'):
if not paths:
raise errors.FormatError('Missing paths value.')
super(FileSourceType, self).__init__()
self.paths = paths
self.separator = separator | Initializes a source type.
Args:
paths (Optional[str]): paths relative to the root of the file system.
separator (Optional[str]): path segment separator.
Raises:
FormatError: when paths is not set. | juraj-google-style |
def view(self, vleaf, fpath=None, cleanup=True, format=None):
graph = self.create_graphviz_digraph(vleaf, format=format)
graph.view(fpath, cleanup=cleanup) | View the graph.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is True.
format (str):
Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration. | juraj-google-style |
def __init__(self, srcstate_id, nextstate_id, ilabel=None):
self.srcstate = srcstate_id
self.nextstate = nextstate_id
self.ilabel = ilabel | The initialization function
Args:
srcstate_id (int): The source state identifier
nextstate_id (int): The destination state identifier
ilabel (str): The symbol corresponding to character for the transition | juraj-google-style |
def release_port(upnp, external_port):
mapping = upnp.getspecificportmapping(external_port, 'UDP')
if mapping is None:
log.error('could not find a port mapping', external=external_port)
return False
else:
log.debug('found existing port mapping', mapping=mapping)
if upnp.deleteportmapping(external_port, 'UDP'):
log.info('successfully released port mapping', external=external_port)
return True
log.warning(
'could not release port mapping, check your router for stale mappings',
)
return False | Try to release the port mapping for `external_port`.
Args:
external_port (int): the port that was previously forwarded to.
Returns:
success (boolean): if the release was successful. | juraj-google-style |
def _GetMostSignificantPathSegmentIndex(self, paths, similarity_weights, occurrence_weights, value_weights):
if (not paths):
raise ValueError('Missing paths.')
number_of_paths = len(paths)
path_segment_index = None
if (number_of_paths == 1):
path_segment_index = self._GetPathSegmentIndexForValueWeights(value_weights)
elif (number_of_paths == 2):
path_segment_index = self._GetPathSegmentIndexForOccurrenceWeights(occurrence_weights, value_weights)
elif (number_of_paths > 2):
path_segment_index = self._GetPathSegmentIndexForSimilarityWeights(similarity_weights, occurrence_weights, value_weights)
return path_segment_index | Retrieves the index of the most significant path segment.
Args:
paths: a list of strings containing the paths.
similarity_weights: the similarity weights object (instance of
_PathSegmentWeights).
occurrence_weights: the occurrence weights object (instance of
_PathSegmentWeights).
value_weights: the value weights object (instance of _PathSegmentWeights).
Returns:
An integer containing the path segment index.
Raises:
ValueError: when paths is an empty list. | codesearchnet |
def standardize_weights(y, sample_weight=None, class_weight=None, sample_weight_mode=None):
if isinstance(sample_weight, tuple):
sample_weight = sample_weight[0]
if sample_weight_mode is not None and sample_weight_mode != 'samplewise':
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode should be None or "temporal". Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for an input with shape ' + str(y.shape) + '. Timestep-wise sample weighting (use of sample_weight_mode="temporal") is restricted to outputs that are at least 3D, i.e. that have a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + '. In order to use timestep-wise sample weighting, you should pass a 2D sample_weight array.')
elif sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape {}. In order to use timestep-wise sample weights, you should specify sample_weight_mode="temporal" in compile(); founssd "{}" instead. If you just mean to use sample-wise weights, make sure your sample_weight array is 1D.'.format(sample_weight.shape, sample_weight_mode))
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' + str(sample_weight.shape) + '.Expected sample_weight with rank less than or equal to ' + str(len(y.shape)))
if not tensor_util.is_tf_type(sample_weight) and y.shape[:sample_weight.ndim] != sample_weight.shape:
raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + ' for an input with shape ' + str(y.shape) + '. sample_weight cannot be broadcast.')
class_sample_weight = None
if isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('`class_weight` not supported for 3+ dimensional targets.')
if tensor_util.is_tf_type(y):
keys = np.array(sorted(class_weight.keys()))
values = np.array([class_weight[i] for i in keys])
weight_vector = np.zeros(np.max(keys) + 1)
weight_vector[:] = np.nan
weight_vector[keys] = values
y_classes = smart_cond.smart_cond(len(y.shape.as_list()) == 2 and backend.shape(y)[1] > 1, lambda: backend.argmax(y, axis=1), lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))
class_sample_weight = array_ops.gather(weight_vector, y_classes)
gen_array_ops.check_numerics(class_sample_weight, 'Invalid classes or class weights detected. NaN values indicate that an appropriate class weight could not be determined.')
class_sample_weight = math_ops.cast(class_sample_weight, backend.floatx())
if sample_weight is not None:
sample_weight = math_ops.cast(tensor_conversion.convert_to_tensor_v2_with_dispatch(sample_weight), backend.floatx())
else:
y_classes = y
if len(y.shape) == 2:
if y.shape[1] > 1:
y_classes = np.argmax(y, axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
class_sample_weight = numpy_compat.np_asarray([class_weight[cls] for cls in y_classes if cls in class_weight])
if len(class_sample_weight) != len(y_classes):
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError('`class_weight` must contain all classes in the data. The classes %s exist in the data but not in `class_weight`.' % (existing_classes - existing_class_weight))
if class_sample_weight is not None and sample_weight is not None:
return class_sample_weight * sample_weight
if sample_weight is not None:
return sample_weight
if class_sample_weight is not None:
return class_sample_weight
return None | Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array. If both `sample_weight` and `class_weight` are provided,
the weights are multiplied.
Args:
y: Numpy array or Tensor of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`. `"temporal"` indicated
that we expect 2D weight data that will be applied to the last 2
dimensions of the targets (i.e. we are weighting timesteps, not
samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments. | github-repos |
def from_known_inputs(cls, logs=None, metric_names=None, label_names=None):
if (not metric_names):
metric_names = ()
if (not label_names):
label_names = ()
known_labels = []
known_metrics = []
for l in label_descriptor.KnownLabels.__members__.values():
if (l.update_label_func and (l.label_name in label_names)):
known_labels.append(l)
for m in metric_descriptor.KnownMetrics.__members__.values():
if (m.update_op_func and (m.metric_name in metric_names)):
known_metrics.append(m)
return cls(logs=logs, metrics=known_metrics, labels=known_labels) | An alternate constructor that assumes known metrics and labels.
This differs from the default constructor in that the metrics and labels
are iterables of names of 'known' metrics and labels respectively. The
names are used to obtain the metrics and labels from
:class:`endpoints_management.control.metric_descriptor.KnownMetrics` and
:class:`endpoints_management.control.label_descriptor.KnownLabels` respectively.
names that don't correspond to a known metric or label are ignored; as
are metrics or labels that don't yet have a way of updating the
`ReportRequest` operation.
Args:
logs (iterable[string]): the name of logs to be included in the
`ReportRequest`
metric_names (iterable[string]): the name of a known metric to be
added to the `ReportRequest`
label_names (iterable[string]): the name of a known label to be added
to the `ReportRequest` | codesearchnet |
def starts_with_prefix_in_list(text, prefixes):
for prefix in prefixes:
if text.startswith(prefix):
return True
return False | Return True if the given string starts with one of the prefixes in the given list, otherwise
return False.
Arguments:
text (str): Text to check for prefixes.
prefixes (list): List of prefixes to check for.
Returns:
bool: True if the given text starts with any of the given prefixes, otherwise False. | juraj-google-style |
def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple:
original_height, original_width = original_size
best_fit = None
max_effective_resolution = 0
min_wasted_resolution = float('inf')
for height, width in possible_resolutions:
scale = min(width / original_width, height / original_height)
downscaled_width, downscaled_height = (int(original_width * scale), int(original_height * scale))
effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
wasted_resolution = width * height - effective_resolution
if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
max_effective_resolution = effective_resolution
min_wasted_resolution = wasted_resolution
best_fit = (height, width)
return best_fit | Selects the best resolution from a list of possible resolutions based on the original size.
This is done by calculating the effective and wasted resolution for each possible resolution.
The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.
Args:
original_size (tuple):
The original size of the image in the format (height, width).
possible_resolutions (list):
A list of possible resolutions in the format [(height1, width1), (height2, width2), ...].
Returns:
tuple: The best fit resolution in the format (height, width). | github-repos |
def _checkString(inputstring, description, minlength=0, maxlength=None):
if (not isinstance(description, str)):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if (not isinstance(inputstring, str)):
raise TypeError('The {0} should be a string. Given: {1!r}'.format(description, inputstring))
if (not isinstance(maxlength, (int, type(None)))):
raise TypeError('The maxlength must be an integer or None. Given: {0!r}'.format(maxlength))
_checkInt(minlength, minvalue=0, maxvalue=None, description='minlength')
if (len(inputstring) < minlength):
raise ValueError('The {0} is too short: {1}, but minimum value is {2}. Given: {3!r}'.format(description, len(inputstring), minlength, inputstring))
if (not (maxlength is None)):
if (maxlength < 0):
raise ValueError('The maxlength must be positive. Given: {0}'.format(maxlength))
if (maxlength < minlength):
raise ValueError('The maxlength must not be smaller than minlength. Given: {0} and {1}'.format(maxlength, minlength))
if (len(inputstring) > maxlength):
raise ValueError('The {0} is too long: {1}, but maximum value is {2}. Given: {3!r}'.format(description, len(inputstring), maxlength, inputstring)) | Check that the given string is valid.
Args:
* inputstring (string): The string to be checked
* description (string): Used in error messages for the checked inputstring
* minlength (int): Minimum length of the string
* maxlength (int or None): Maximum length of the string
Raises:
TypeError, ValueError
Uses the function :func:`_checkInt` internally. | codesearchnet |
def parse_kegg_gene_metadata(infile):
metadata = defaultdict(str)
with open(infile) as mf:
kegg_parsed = bs_kegg.parse(mf.read())
if 'DBLINKS' in kegg_parsed.keys():
if 'UniProt' in kegg_parsed['DBLINKS']:
unis = str(kegg_parsed['DBLINKS']['UniProt']).split(' ')
if isinstance(unis, list):
metadata['uniprot'] = unis[0]
else:
metadata['uniprot'] = unis
if 'NCBI-ProteinID' in kegg_parsed['DBLINKS']:
metadata['refseq'] = str(kegg_parsed['DBLINKS']['NCBI-ProteinID'])
if 'STRUCTURE' in kegg_parsed.keys():
metadata['pdbs'] = str(kegg_parsed['STRUCTURE']['PDB']).split(' ')
else:
metadata['pdbs'] = None
if 'ORGANISM' in kegg_parsed.keys():
metadata['taxonomy'] = str(kegg_parsed['ORGANISM'])
return metadata | Parse the KEGG flatfile and return a dictionary of metadata.
Dictionary keys are:
refseq
uniprot
pdbs
taxonomy
Args:
infile: Path to KEGG flatfile
Returns:
dict: Dictionary of metadata | juraj-google-style |
def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int=0) -> nn.Linear:
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).detach().clone()
if layer.bias is not None:
if dim == 1:
b = layer.bias.detach().clone()
else:
b = layer.bias[index].detach().clone()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer | Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (`torch.nn.Linear`): The layer to prune.
index (`torch.LongTensor`): The indices to keep in the layer.
dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices.
Returns:
`torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`. | github-repos |
def hwvtep_add_ve_interface(self, **kwargs):
name = kwargs.pop('name')
ve_id = kwargs.pop('ve_id')
vrrp_id = kwargs.pop('vrrp_id')
ve_args = dict(name=name, ve_id=ve_id)
method_name = 'overlay_gateway_ip_interface_ve_ve_id'
method_class = self._brocade_tunnels
ve_attr = getattr(method_class, method_name)
config = ve_attr(**ve_args)
output = self._callback(config)
method_name = 'overlay_gateway_ip_interface_ve_vrrp_extended_group'
vrrp_attr = getattr(method_class, method_name)
vrrp_args = dict(name=name, vrrp_extended_group=vrrp_id)
config = vrrp_attr(**vrrp_args)
output = self._callback(config)
return output | Add virtual ethernet (ve) interface to the overlay-gateway
Args:
name (str): gateway-name
int_id (int): ve id
vrrp_id (int): VRPP-E group ID
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None | juraj-google-style |
def list_group_maintainers(self, name):
return self.service.list_group_maintainers(
name, self.url_prefix, self.auth, self.session,
self.session_send_opts) | Get the maintainers of a group.
Args:
name (string): Name of group to query.
Returns:
(list[string]): List of maintainer names. | juraj-google-style |
def write_contents(self, filename, contents, directory=None):
filepath = "{}/{}".format(directory.rstrip("/"), filename) if directory else filename
self._write_to_zipfile(filepath, contents)
return filepath | write_contents: Write contents to filename in zip
Args:
contents: (str) contents of file
filename: (str) name of file in zip
directory: (str) directory in zipfile to write file to (optional)
Returns: path to file in zip | juraj-google-style |
def to_dict(self):
output = copy.deepcopy(self.__dict__)
if output['backbone_config'] is not None:
output['backbone_config'] = self.backbone_config.to_dict()
output['model_type'] = self.__class__.model_type
return output | Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, | github-repos |
def remove(self, *l):
removeList = list(flatten(l))
self._remove(removeList, self.value) | remove elements from self.value by matching.
Create the exactly same single you want to delete and pass it(them) in.
Normally this method needs to be overwrited by subclass. It only looks inside current instance's value, not recursive. There is no need for a recursive one anyway.
Args:
*l: a single element, a bunch of element seperated by comma, or a list of elements, or any combination. Element is what you match with. | juraj-google-style |
def bulk_lookup(self, api_name, keys):
cached_data = {}
for key in keys:
value = self.lookup_value(api_name, key)
if value is not None:
cached_data[key] = value
return cached_data | Perform lookup on an enumerable of keys.
Args:
api_name: a string name of the API. Keys and values are segmented by api_name.
keys: an enumerable of string keys. | juraj-google-style |
def transform_to_length(nndata, length):
if length is None:
return nndata
if length:
for cn in range(length):
if cn not in nndata.cn_weights:
nndata.cn_weights[cn] = 0
nndata.cn_nninfo[cn] = []
return nndata | Given NNData, transforms data to the specified fingerprint length
Args:
nndata: (NNData)
length: (int) desired length of NNData | juraj-google-style |
def register_codec(x):
_codecs.append(x) | Registers a codec to use for encoding/decoding.
Args:
x: The codec object to register. The object must implement can_encode,
do_encode, can_decode, and do_decode. See the various _*Codec classes for
examples. | github-repos |
def _parse_email(self, val):
ret = {
'type': None,
'value': None
}
try:
ret['type'] = val[1]['type']
except (KeyError, ValueError, TypeError):
pass
ret['value'] = val[3].strip()
try:
self.vars['email'].append(ret)
except AttributeError:
self.vars['email'] = []
self.vars['email'].append(ret) | The function for parsing the vcard email addresses.
Args:
val (:obj:`list`): The value to parse. | juraj-google-style |
def get_volume_details(self, volume_name: str) -> dict:
if volume_name not in self.volumes:
raise RuntimeError('No such volume found: ', volume_name)
volume = self._client.volumes.get(volume_name)
return volume.attrs | Get details of the volume.
Args:
volume_name (str): Name of the volume
Returns:
dict, details of the volume | juraj-google-style |
def first_timestamp(self, event_key=None):
if (event_key is None):
timestamps = [self._trackers[key].first_timestamp for key in self._trackers]
return min((timestamp for timestamp in timestamps if (timestamp >= 0)))
else:
return self._trackers[event_key].first_timestamp | Obtain the first timestamp.
Args:
event_key: the type key of the sought events (e.g., constants.NAN_KEY).
If None, includes all event type keys.
Returns:
First (earliest) timestamp of all the events of the given type (or all
event types if event_key is None). | codesearchnet |
def input_fn(is_training, data_dir, batch_size, num_epochs=1, num_gpus=None,
dtype=tf.float32):
mlperf_log.resnet_print(key=mlperf_log.INPUT_ORDER)
filenames = get_filenames(is_training, data_dir)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
if is_training:
dataset = dataset.shuffle(buffer_size=_NUM_TRAIN_FILES)
dataset = dataset.flat_map(tf.data.TFRecordDataset)
return resnet_run_loop.process_record_dataset(
dataset=dataset,
is_training=is_training,
batch_size=batch_size,
shuffle_buffer=_SHUFFLE_BUFFER,
parse_record_fn=parse_record,
num_epochs=num_epochs,
num_gpus=num_gpus,
examples_per_epoch=_NUM_IMAGES['train'] if is_training else None,
dtype=dtype
) | Input function which provides batches for train or eval.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
num_gpus: The number of gpus used for training.
dtype: Data type to use for images/features
Returns:
A dataset that can be used for iteration. | juraj-google-style |
def get_user_info_for_username(self, username, _connection=None):
ldap_filter = '(&({0}={1}){2})'.format(self.config.get('LDAP_USER_LOGIN_ATTR'), username, self.config.get('LDAP_USER_OBJECT_FILTER'))
return self.get_object(dn=self.full_user_search_dn, filter=ldap_filter, attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES'), _connection=_connection) | Gets info about a user at a specified username by searching the
Users DN. Username attribute is the same as specified as
LDAP_USER_LOGIN_ATTR.
Args:
username (str): Username of the user to search for.
_connection (ldap3.Connection): A connection object to use when
searching. If not given, a temporary connection will be
created, and destroyed after use.
Returns:
dict: A dictionary of the user info from LDAP | codesearchnet |
def _routing_enabled():
return sklearn.get_config().get('enable_metadata_routing', False) | Return whether metadata routing is enabled.
Returns:
enabled : bool
Whether metadata routing is enabled. If the config is not set, it
defaults to False.
TODO: remove when the config key is no longer available in scikit-learn | github-repos |
def _CalculateElementsDataSize(self, context):
elements_data_size = None
if self._HasElementsDataSize():
elements_data_size = self._EvaluateElementsDataSize(context)
elif self._HasNumberOfElements():
element_byte_size = self._element_data_type_definition.GetByteSize()
if (element_byte_size is not None):
number_of_elements = self._EvaluateNumberOfElements(context)
elements_data_size = (number_of_elements * element_byte_size)
return elements_data_size | Calculates the elements data size.
Args:
context (Optional[DataTypeMapContext]): data type map context, used to
determine the size hint.
Returns:
int: the elements data size or None if not available. | codesearchnet |
def append_with_data(url, data):
if (data is None):
return url
url_parts = list(urlparse(url))
query = OrderedDict(parse_qsl(url_parts[4], keep_blank_values=True))
query.update(data)
url_parts[4] = URLHelper.query_dict_to_string(query)
return urlunparse(url_parts) | Append the given URL with the given data OrderedDict.
Args:
url (str): The URL to append.
data (obj): The key value OrderedDict to append to the URL.
Returns:
str: The new URL. | codesearchnet |
def GetDisplayNameForPathSpec(cls, path_spec, mount_path=None, text_prepend=None):
if (not path_spec):
return None
relative_path = cls.GetRelativePathForPathSpec(path_spec, mount_path=mount_path)
if (not relative_path):
return path_spec.type_indicator
if text_prepend:
relative_path = '{0:s}{1:s}'.format(text_prepend, relative_path)
parent_path_spec = path_spec.parent
if (parent_path_spec and (path_spec.type_indicator in (dfvfs_definitions.TYPE_INDICATOR_BZIP2, dfvfs_definitions.TYPE_INDICATOR_GZIP))):
parent_path_spec = parent_path_spec.parent
if (parent_path_spec and (parent_path_spec.type_indicator == dfvfs_definitions.TYPE_INDICATOR_VSHADOW)):
store_index = getattr(path_spec.parent, 'store_index', None)
if (store_index is not None):
return 'VSS{0:d}:{1:s}:{2:s}'.format((store_index + 1), path_spec.type_indicator, relative_path)
return '{0:s}:{1:s}'.format(path_spec.type_indicator, relative_path) | Retrieves the display name of a path specification.
Args:
path_spec (dfvfs.PathSpec): path specification.
mount_path (Optional[str]): path where the file system that is used
by the path specification is mounted, such as "/mnt/image". The
mount path will be stripped from the absolute path defined by
the path specification.
text_prepend (Optional[str]): text to prepend.
Returns:
str: human readable version of the path specification or None. | codesearchnet |
def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42):
np.random.seed(seed)
n_val = int(val_pct*n)
idx_start = cv_idx*n_val
idxs = np.random.permutation(n)
return idxs[idx_start:idx_start+n_val] | Get a list of index values for Validation set from a dataset
Arguments:
n : int, Total number of elements in the data set.
cv_idx : int, starting index [idx_start = cv_idx*int(val_pct*n)]
val_pct : (int, float), validation set percentage
seed : seed value for RandomState
Returns:
list of indexes | juraj-google-style |
def ParseUserEngagedRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = WindowsTimelineUserEngagedEventData()
event_data.package_identifier = self._GetRowValue(query_hash, row, 'PackageName')
payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))
payload_json_string = payload_json_bytes.decode('utf-8')
payload = json.loads(payload_json_string)
if ('reportingApp' in payload):
event_data.reporting_app = payload['reportingApp']
if ('activeDurationSeconds' in payload):
event_data.active_duration_seconds = int(payload['activeDurationSeconds'])
timestamp = self._GetRowValue(query_hash, row, 'StartTime')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a timeline row that describes a user interacting with an app.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row. | codesearchnet |
def container_type_mismatch(self, stack, cls, mutations, name):
details = f'Container: {self._pp.print_generic_type(cls)}\n'
allowed_contained = ''
new_contained = ''
for formal in cls.formal_type_parameters.keys():
if formal in mutations:
params, values, _ = mutations[formal]
allowed_content = self._pp.print_type_of_instance(cls.get_formal_type_parameter(formal))
new_content = self._pp.join_printed_types(sorted((self._pp.print_type(v) for v in set(values.data) - set(params.data))))
allowed_contained += f' {formal}: {allowed_content}\n'
new_contained += f' {formal}: {new_content}\n'
annotation = self._pp.print_type_of_instance(cls)
details += 'Allowed contained types (from annotation %s):\n%sNew contained types:\n%s' % (annotation, allowed_contained, new_contained)
suffix = '' if name is None else ' for ' + name
err_msg = f'New container type{suffix} does not match type annotation'
self.error(stack, err_msg, details=details) | Invalid combination of annotation and mutation.
Args:
stack: the frame stack
cls: the container type
mutations: a dict of {parameter name: (annotated types, new types)}
name: the variable name (or None) | github-repos |
def remove_app(name, site):
current_apps = list_apps(site)
if name not in current_apps:
log.debug('Application already absent: %s', name)
return True
ps_cmd = ['Remove-WebApplication',
'-Name', "'{0}'".format(name),
'-Site', "'{0}'".format(site)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to remove application: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
new_apps = list_apps(site)
if name not in new_apps:
log.debug('Application removed successfully: %s', name)
return True
log.error('Unable to remove application: %s', name)
return False | Remove an IIS application.
Args:
name (str): The application name.
site (str): The IIS site name.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.remove_app name='app0' site='site0' | juraj-google-style |
def get_temp_dir():
return _googletest.GetTempDir() | Returns a temporary directory for use during tests.
There is no need to delete the directory after the test.
@compatibility(TF2)
This function is removed in TF2. Please use `TestCase.get_temp_dir` instead
in a test case.
Outside of a unit test, obtain a temporary directory through Python's
`tempfile` module.
@end_compatibility
Returns:
The temporary directory. | github-repos |
def create_as(access_token, subscription_id, resource_group, as_name, update_domains, fault_domains, location):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/availabilitySets/', as_name, '?api-version=', COMP_API])
as_body = {'location': location}
properties = {'platformUpdateDomainCount': update_domains}
properties['platformFaultDomainCount'] = fault_domains
as_body['properties'] = properties
body = json.dumps(as_body)
return do_put(endpoint, body, access_token) | Create availability set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
as_name (str): Name of the new availability set.
update_domains (int): Number of update domains.
fault_domains (int): Number of fault domains.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. JSON body of the availability set properties. | codesearchnet |
def _handle_join_dags(self, request):
if (request.payload['names'] is None):
send_response = (len(self._dags_running) <= 1)
else:
send_response = all([(name not in self._dags_running.keys()) for name in request.payload['names']])
if send_response:
return Response(success=True, uid=request.uid)
else:
return None | The handler for the join_dags request.
If dag names are given in the payload only return a valid Response if none of
the dags specified by the names are running anymore. If no dag names are given,
wait for all dags except one, which by design is the one that issued the request,
to be finished.
Args:
request (Request): Reference to a request object containing the
incoming request.
Returns:
Response: A response object containing the following fields:
- success: True if all dags the request was waiting for have
completed. | codesearchnet |
def from_file(cls, fp, is_outlook=False):
log.debug("Parsing email from file {!r}".format(fp))
with ported_open(fp) as f:
message = email.message_from_file(f)
if is_outlook:
log.debug("Removing temp converted Outlook email {!r}".format(fp))
os.remove(fp)
return cls(message) | Init a new object from a file path.
Args:
fp (string): file path of raw email
is_outlook (boolean): if True is an Outlook email
Returns:
Instance of MailParser | juraj-google-style |
def _select_in_voltage_range(self, min_voltage=None, max_voltage=None):
min_voltage = min_voltage if min_voltage is not None \
else self.min_voltage
max_voltage = max_voltage if max_voltage is not None \
else self.max_voltage
return list(filter(lambda p: min_voltage <= p.voltage <= max_voltage,
self.voltage_pairs)) | Selects VoltagePairs within a certain voltage range.
Args:
min_voltage (float): The minimum allowable voltage for a given
step.
max_voltage (float): The maximum allowable voltage allowable for a
given step.
Returns:
A list of VoltagePair objects | juraj-google-style |
def LR_predict(w, b, X):
m = X.shape[1]
Y_prediction = np.zeros((1, m))
w = w.reshape(X.shape[0], 1)
A = sigmoid(np.dot(w.T, X) + b)
for i in range(A.shape[1]):
if A[0, i] > 0.5:
Y_prediction[0, i] = 1.0
else:
Y_prediction[0, i] = 0.0
assert (Y_prediction.shape == (1, m))
return Y_prediction | Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X | juraj-google-style |
def parse_hpo_diseases(hpo_lines):
diseases = {}
LOG.info("Parsing hpo diseases...")
for index, line in enumerate(hpo_lines):
if index == 0:
continue
if not len(line) > 3:
continue
disease_info = parse_hpo_disease(line)
if not disease_info:
continue
disease_nr = disease_info['disease_nr']
hgnc_symbol = disease_info['hgnc_symbol']
hpo_term = disease_info['hpo_term']
source = disease_info['source']
disease_id = "{0}:{1}".format(source, disease_nr)
if disease_id not in diseases:
diseases[disease_id] = {
'disease_nr': disease_nr,
'source': source,
'hgnc_symbols': set(),
'hpo_terms': set(),
}
if hgnc_symbol:
diseases[disease_id]['hgnc_symbols'].add(hgnc_symbol)
if hpo_term:
diseases[disease_id]['hpo_terms'].add(hpo_term)
LOG.info("Parsing done.")
return diseases | Parse hpo disease phenotypes
Args:
hpo_lines(iterable(str))
Returns:
diseases(dict): A dictionary with mim numbers as keys | juraj-google-style |
def rotation_matrix(self):
self._normalise()
product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())
return product_matrix[1:][(:, 1:)] | Get the 3x3 rotation matrix equivalent of the quaternion rotation.
Returns:
A 3x3 orthogonal rotation matrix as a 3x3 Numpy array
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one. | codesearchnet |
def on_predict_begin(self, logs=None): | Called at the beginning of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future. | github-repos |
def validate_and_slice_inputs(names_to_saveables):
saveables = []
seen_ops = object_identity.ObjectIdentitySet()
for name, op in sorted(names_to_saveables.items(), key=lambda x: x[0]):
for converted_saveable_object in saveable_objects_for_op(op, name):
_add_saveable(saveables, seen_ops, converted_saveable_object)
return saveables | Returns the variables and names that will be used for a Saver.
Args:
names_to_saveables: A dict (k, v) where k is the name of an operation and
v is an operation to save or a BaseSaverBuilder.Saver.
Returns:
A list of SaveableObjects.
Raises:
TypeError: If any of the keys are not strings or any of the
values are not one of Tensor or Variable or a trackable operation.
ValueError: If the same operation is given in more than one value
(this also applies to slices of SlicedVariables). | github-repos |
def read_infile(infile: Union[(Path, str)], from_words=False, word_column: int=WORD_COLUMN, pos_column: int=POS_COLUMN, tag_column: int=TAG_COLUMN, max_sents: int=(- 1), read_only_words: bool=False) -> List[Tuple[(List, Union[(List, None)])]]:
(answer, curr_word_sent, curr_tag_sent) = ([], [], [])
if from_words:
(word_column, read_only_words) = (0, True)
with open(infile, 'r', encoding='utf8') as fin:
for line in fin:
line = line.strip()
if line.startswith('
continue
if (line == ''):
if (len(curr_word_sent) > 0):
if read_only_words:
curr_tag_sent = None
answer.append((curr_word_sent, curr_tag_sent))
(curr_tag_sent, curr_word_sent) = ([], [])
if (len(answer) == max_sents):
break
continue
splitted = line.split('\t')
index = splitted[0]
if ((not from_words) and (not index.isdigit())):
continue
curr_word_sent.append(splitted[word_column])
if (not read_only_words):
(pos, tag) = (splitted[pos_column], splitted[tag_column])
tag = (pos if (tag == '_') else '{},{}'.format(pos, tag))
curr_tag_sent.append(tag)
if (len(curr_word_sent) > 0):
if read_only_words:
curr_tag_sent = None
answer.append((curr_word_sent, curr_tag_sent))
return answer | Reads input file in CONLL-U format
Args:
infile: a path to a file
word_column: column containing words (default=1)
pos_column: column containing part-of-speech labels (default=3)
tag_column: column containing fine-grained tags (default=5)
max_sents: maximal number of sents to read
read_only_words: whether to read only words
Returns:
a list of sentences. Each item contains a word sequence and a tag sequence, which is ``None``
in case ``read_only_words = True`` | codesearchnet |
def remove(self, dic):
for kw in dic:
removePair = Pair(kw, dic[kw])
self._remove([removePair]) | remove the pair by passing a identical dict
Args:
dic (dict): key and value | juraj-google-style |
def get_vulnerability_chains(
current_node,
sink,
def_use,
chain=[]
):
for use in def_use[current_node]:
if use == sink:
yield chain
else:
vuln_chain = list(chain)
vuln_chain.append(use)
yield from get_vulnerability_chains(
use,
sink,
def_use,
vuln_chain
) | Traverses the def-use graph to find all paths from source to sink that cause a vulnerability.
Args:
current_node()
sink()
def_use(dict):
chain(list(Node)): A path of nodes between source and sink. | juraj-google-style |
def check(self, orb):
return self.prev is not None and np.sign(self(orb)) != np.sign(self(self.prev)) | Method that check whether or not the listener is triggered
Args:
orb (Orbit):
Return:
bool: True if there is a zero-crossing for the parameter watched by the listener | juraj-google-style |
def _ParseBinaryDataAsString(self, parser_mediator, binary_data_value):
if (not binary_data_value):
return None
try:
return binary_data_value.decode('utf-8')
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning('invalid binary data string value: {0:s}'.format(repr(binary_data_value)))
return None | Parses a binary data value as string
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
binary_data_value (bytes): binary data value
(CSSM_DB_ATTRIBUTE_FORMAT_BLOB)
Returns:
str: binary data value formatted as a string or None if no string could
be extracted or binary data value is None (NULL). | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.